diff --git a/.circleci/.gitattributes b/.circleci/.gitattributes deleted file mode 100644 index 2dd06ee5f7cd..000000000000 --- a/.circleci/.gitattributes +++ /dev/null @@ -1 +0,0 @@ -config.yml linguist-generated diff --git a/.circleci/.gitignore b/.circleci/.gitignore deleted file mode 100644 index 3018b3a68132..000000000000 --- a/.circleci/.gitignore +++ /dev/null @@ -1 +0,0 @@ -.tmp/ diff --git a/.circleci/Makefile b/.circleci/Makefile deleted file mode 100644 index dc75ea5f1f19..000000000000 --- a/.circleci/Makefile +++ /dev/null @@ -1,100 +0,0 @@ -# Set SHELL to 'strict mode' without using .SHELLFLAGS for max compatibility. -# See https://fieldnotes.tech/how-to-shell-for-compatible-makefiles/ -SHELL := /usr/bin/env bash -euo pipefail -c - -# CONFIG is the name of the make target someone -# would invoke to update the main config file (config.yml). -CONFIG ?= ci-config -# VERIFY is the name of the make target someone -# would invoke to verify the config file. -VERIFY ?= ci-verify - -CIRCLECI := circleci --skip-update-check -ifeq ($(DEBUG_CIRCLECI_CLI),YES) -CIRCLECI += --debug -endif - -# For config processing, always refer to circleci.com not self-hosted circleci, -# because self-hosted does not currently support the necessary API. -CIRCLECI_CLI_HOST := https://circleci.com -export CIRCLECI_CLI_HOST - -# Set up some documentation/help message variables. -# We do not attempt to install the CircleCI CLI from this Makefile. -CCI_INSTALL_LINK := https://circleci.com/docs/2.0/local-cli/\#installation -CCI_INSTALL_MSG := Please install CircleCI CLI. See $(CCI_INSTALL_LINK) -CCI_VERSION := $(shell $(CIRCLECI) version 2> /dev/null) -ifeq ($(CCI_VERSION),) -# Attempting to use the CLI fails with installation instructions. -CIRCLECI := echo '$(CCI_INSTALL_MSG)'; exit 1; \# -endif - -SOURCE_DIR := config -SOURCE_YML := $(shell [ ! -d $(SOURCE_DIR) ] || find $(SOURCE_DIR) -name '*.yml') -CONFIG_SOURCE := Makefile $(SOURCE_YML) | $(SOURCE_DIR) -OUT := config.yml -TMP := .tmp/config-processed -CONFIG_PACKED := .tmp/config-packed -GO_VERSION_FILE := ../.go-version -GO_VERSION := $(shell cat $(GO_VERSION_FILE)) - -default: help - -help: - @echo "Usage:" - @echo " make $(CONFIG): recompile config.yml from $(SOURCE_DIR)/" - @echo " make $(VERIFY): verify that config.yml is a true mapping from $(SOURCE_DIR)/" - @echo - @echo "Diagnostics:" - @[ -z "$(CCI_VERSION)" ] || echo " circleci-cli version $(CCI_VERSION)" - @[ -n "$(CCI_VERSION)" ] || echo " $(CCI_INSTALL_MSG)" - -$(SOURCE_DIR): - @echo No source directory $(SOURCE_DIR) found.; exit 1 - -# Make sure our .tmp dir exists. -$(shell [ -d .tmp ] || mkdir .tmp) - -.PHONY: $(CONFIG) -$(CONFIG): $(OUT) $(GO_VERSION_FILE) - -.PHONY: $(VERIFY) -$(VERIFY): config-up-to-date - @$(CIRCLECI) config validate $(OUT) - -define GENERATED_FILE_HEADER -### *** -### WARNING: DO NOT manually EDIT or MERGE this file, it is generated by 'make $(CONFIG)'. -### INSTEAD: Edit or merge the source in $(SOURCE_DIR)/ then run 'make $(CONFIG)'. -### *** -endef -export GENERATED_FILE_HEADER - -# GEN_CONFIG writes the config to a temporary file. If the whole process succeeds, -# it them moves that file to $@. This makes is an atomic operation, so if it fails -# make doesn't consider a half-baked file up to date. -define GEN_CONFIG - @yq -i ".references.environment.GO_IMAGE = \"docker.mirror.hashicorp.services/cimg/go:$(GO_VERSION)\"" $(SOURCE_DIR)/executors/\@executors.yml - - @$(CIRCLECI) config pack $(SOURCE_DIR) > $(CONFIG_PACKED) - @echo "$$GENERATED_FILE_HEADER" > $@.tmp || { rm -f $@; exit 1; } - @$(CIRCLECI) config process $(CONFIG_PACKED) >> $@.tmp || { rm -f $@.tmp; exit 1; } - @mv -f $@.tmp $@ -endef - -.PHONY: $(OUT) -$(OUT): $(CONFIG_SOURCE) - $(GEN_CONFIG) - @echo "$@ updated" - -$(TMP): $(CONFIG_SOURCE) - $(GEN_CONFIG) - -.PHONY: config-up-to-date -config-up-to-date: $(TMP) # Note this must not depend on $(OUT)! - @if diff -w $(OUT) $<; then \ - echo "Generated $(OUT) is up to date!"; \ - else \ - echo "Generated $(OUT) is out of date, run make $(CONFIG) to update."; \ - exit 1; \ - fi diff --git a/.circleci/README.md b/.circleci/README.md deleted file mode 100644 index 1ec75cafade9..000000000000 --- a/.circleci/README.md +++ /dev/null @@ -1,130 +0,0 @@ -# How to use CircleCI multi-file config - -This README and the Makefile should be in your `.circleci` directory, -in the root of your repository. -All path references in this README assume we are in this `.circleci` directory. - -The `Makefile` in this directory generates `./config.yml` in CircleCI 2.0 syntax, -from the tree rooted at `./config/`, which contains files in CircleCI 2.0 or 2.1 syntax. - - -## Quickstart - -The basic workflow is: - -- Edit source files in `./config/` -- When you are done, run `make ci-config` to update `./config.yml` -- Commit this entire `.circleci` directory, including that generated file together. -- Run `make ci-verify` to ensure the current `./config.yml` is up to date with the source. - -When merging this `.circleci` directory: - -- Do not merge the generated `./config.yml` file, instead: -- Merge the source files under `./config/`, and then -- Run `make ci-config` to re-generate the merged `./config.yml` - -And that's it, for more detail, read on! - - -## How does it work, roughly? - -CircleCI supports [generating a single config file from many], -using the `$ circleci config pack` command. -It also supports [expanding 2.1 syntax to 2.0 syntax] -using the `$ circleci config process` command. -We use these two commands, stitched together using the `Makefile` -to implement the workflow. - -[generating a single config file from many]: https://circleci.com/docs/2.0/local-cli/#packing-a-config -[expanding 2.1 syntax to 2.0 syntax]: https://circleci.com/docs/2.0/local-cli/#processing-a-config - - -## Prerequisites - -You will need the [CircleCI CLI tool] installed and working, -at least version `0.1.5607`. -You can [download this tool directly from GitHub Releases]. - -``` -$ circleci version -0.1.5607+f705856 -``` - -[CircleCI CLI tool]: https://circleci.com/docs/2.0/local-cli/ -[download this tool directly from GitHub Releases]: https://github.com/CircleCI-Public/circleci-cli/releases - - -## Updating the config source - -Before making changes, be sure to understand the layout -of the `./config/` file tree, as well as circleci 2.1 syntax. -See the [Syntax and layout] section below. - -To update the config, you should edit, add or remove files -in the `./config/` directory, -and then run `make ci-config`. -If that's successful, -you should then commit every `*.yml` file in the tree rooted in this directory. -That is: you should commit both the source under `./config/` -and the generated file `./config.yml` at the same time, in the same commit. -The included git pre-commit hook will help with this. -Do not edit the `./config.yml` file directly, as you will lose your changes -next time `make ci-config` is run. - -[Syntax and layout]: #syntax-and-layout - - -### Verifying `./config.yml` - -To check whether or not the current `./config.yml` is up to date with the source -and valid, run `$ make ci-verify`. -Note that `$ make ci-verify` should be run in CI, -in case not everyone has the git pre-commit hook set up correctly. - - -#### Example shell session - -```sh -$ make ci-config -config.yml updated -$ git add -A . # The -A makes sure to include deletions/renames etc. -$ git commit -m "ci: blah blah blah" -Changes detected in .circleci/, running 'make -C .circleci ci-verify' ---> Generated config.yml is up to date! ---> Config file at config.yml is valid. -``` - - -### Syntax and layout - -It is important to understand the layout of the config directory. -Read the documentation on [packing a config] for a full understanding -of how multiple YAML files are merged by the circleci CLI tool. - -[packing a config]: https://circleci.com/docs/2.0/local-cli/#packing-a-config - -Here is an example file tree (with comments added afterwards): - -```sh -$ tree . -. -├── Makefile -├── README.md # This file. -├── config # The source code for config.yml is rooted here. -│   ├── @config.yml # Files beginning with @ are treated specially by `circleci config pack` -│   ├── commands # Subdirectories of config become top-level keys. -│   │   └── go_test.yml # Filenames (minus .yml) become top-level keys under -│   │   └── go_build.yml # their parent (in this case "commands"). -│ │ # The contents of go_test.yml therefore are placed at: .commands.go_test: -│   └── jobs # jobs also becomes a top-level key under config... -│   ├── build.yml # ...and likewise filenames become keys under their parent. -│   └── test.yml -└── config.yml # The generated file in 2.0 syntax. -``` - -About those `@` files... Preceding a filename with `@` -indicates to `$ circleci config pack` that the contents of this YAML file -should be at the top-level, rather than underneath a key named after their filename. -This naming convention is unfortunate as it breaks autocompletion in bash, -but there we go. - diff --git a/.circleci/config.yml b/.circleci/config.yml deleted file mode 100644 index bd21ec984863..000000000000 --- a/.circleci/config.yml +++ /dev/null @@ -1,1216 +0,0 @@ -### *** -### WARNING: DO NOT manually EDIT or MERGE this file, it is generated by 'make ci-config'. -### INSTEAD: Edit or merge the source in config/ then run 'make ci-config'. -### *** -# Orb 'circleci/slack@3.2.0' resolved to 'circleci/slack@3.2.0' -version: 2 -jobs: - install-ui-dependencies: - docker: - - environment: - JOBS: 2 - image: docker.mirror.hashicorp.services/circleci/node:14-browsers - shell: /usr/bin/env bash -euo pipefail -c - working_directory: /home/circleci/go/src/github.com/hashicorp/vault - steps: - - checkout - - restore_cache: - key: yarn-lock-v7-{{ checksum "ui/yarn.lock" }} - name: Restore yarn cache - - run: - command: | - cd ui - yarn install - npm rebuild node-sass - name: Install UI dependencies - - save_cache: - key: yarn-lock-v7-{{ checksum "ui/yarn.lock" }} - name: Save yarn cache - paths: - - ui/node_modules - test-ui: - docker: - - environment: - JOBS: 2 - image: docker.mirror.hashicorp.services/circleci/node:14-browsers - shell: /usr/bin/env bash -euo pipefail -c - working_directory: /home/circleci/go/src/github.com/hashicorp/vault - resource_class: xlarge - steps: - - run: - command: | - case "$CIRCLE_BRANCH" in - main|ui/*|backport/ui/*|release/*|merge*) ;; - *) # If the branch being tested doesn't match one of the above patterns, - # we don't need to run test-ui and can abort the job. - circleci-agent step halt - ;; - esac - - # exit with success either way - exit 0 - name: Check branch name - working_directory: ~/ - - checkout - - restore_cache: - key: yarn-lock-v7-{{ checksum "ui/yarn.lock" }} - name: Restore yarn cache - - attach_workspace: - at: . - - run: - command: | - # Add ./bin to the PATH so vault binary can be run by Ember tests - export PATH="${PWD}/bin:${PATH}" - - # Run Ember tests - cd ui - mkdir -p test-results/qunit - yarn test:oss - name: Test UI - - store_artifacts: - path: ui/test-results - - store_test_results: - path: ui/test-results - build-go-dev: - machine: - image: ubuntu-2004:2022.10.1 - shell: /usr/bin/env bash -euo pipefail -c - working_directory: /home/circleci/go/src/github.com/hashicorp/vault - steps: - - checkout - - run: - command: | - GO_VERSION=$(cat .go-version) - [ -n "$GO_VERSION" ] || { echo "You must set GO_VERSION"; exit 1; } - # Install Go - cd ~ - curl -sSLO "https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz" - sudo rm -rf /usr/local/go - sudo tar -C /usr/local -xzf "go${GO_VERSION}.linux-amd64.tar.gz" - rm -f "go${GO_VERSION}.linux-amd64.tar.gz" - GOPATH="/home/circleci/go" - mkdir $GOPATH 2>/dev/null || { sudo mkdir $GOPATH && sudo chmod 777 $GOPATH; } - mkdir $GOPATH/bin 2>/dev/null || { sudo mkdir $GOPATH/bin && sudo chmod 777 $GOPATH/bin; } - echo "export GOPATH='$GOPATH'" >> "$BASH_ENV" - echo "export PATH='$PATH:$GOPATH/bin:/usr/local/go/bin'" >> "$BASH_ENV" - echo "export GOPROXY=off" >> "$BASH_ENV" - echo "export GOPRIVATE=github.com/hashicorp/*" >> "$BASH_ENV" - - echo "$ go version" - go version - name: Setup Go - - restore_cache: - keys: - - v1.5-{{checksum "go.sum"}}-{{checksum "sdk/go.sum"}}-{{checksum "api/go.sum"}} - name: Restore exact go modules cache - - attach_workspace: - at: . - - run: - command: | - # Move dev UI assets to expected location - rm -rf ./pkg - mkdir ./pkg - - # Build dev binary - make ci-bootstrap dev - name: Build dev binary - - persist_to_workspace: - paths: - - bin - root: . - environment: - - CIRCLECI_CLI_VERSION: 0.1.5546 - - GO_IMAGE: docker.mirror.hashicorp.services/cimg/go:1.20 - - GO_TAGS: '' - - GOFUMPT_VERSION: 0.3.1 - - GOTESTSUM_VERSION: 0.5.2 - test-go-remote-docker: - docker: - - image: docker.mirror.hashicorp.services/cimg/go:1.20 - resource_class: medium - working_directory: /home/circleci/go/src/github.com/hashicorp/vault - parallelism: 8 - steps: - - run: - command: | - # If the branch being tested starts with ui/ or docs/ we want to exit the job without failing - [[ "$CIRCLE_BRANCH" = ui/* || "$CIRCLE_BRANCH" = docs/* || "$CIRCLE_BRANCH" = backport/docs/* ]] && { - # stop the job from this step - circleci-agent step halt - } - # exit with success either way - exit 0 - name: Check branch name - working_directory: ~/ - - checkout - - setup_remote_docker: - docker_layer_caching: true - version: 20.10.17 - - add_ssh_keys: - fingerprints: - - b8:e2:38:f8:5b:1b:82:f3:1f:23:fa:46:6e:95:e7:e9 - - run: - command: | - git config --global url."git@github.com:".insteadOf https://github.com/ - - run: - command: | - TZ=GMT date '+%Y%m%d' > /tmp/go-cache-key - name: Compute test cache key - - restore_cache: - keys: - - go-test-cache-date-v1-{{ checksum "/tmp/go-cache-key" }} - - restore_cache: - keys: - - v1.5-{{checksum "go.sum"}}-{{checksum "sdk/go.sum"}}-{{checksum "api/go.sum"}} - name: Restore exact go modules cache - - run: - command: | - set -exo pipefail - - EXTRA_TAGS= - case "" in - *-race*) export VAULT_CI_GO_TEST_RACE=1;; - *) EXTRA_TAGS=deadlock;; - esac - - # Install CircleCI CLI - curl -sSL \ - "https://github.com/CircleCI-Public/circleci-cli/releases/download/v${CIRCLECI_CLI_VERSION}/circleci-cli_${CIRCLECI_CLI_VERSION}_linux_amd64.tar.gz" \ - | sudo tar --overwrite -xz \ - -C /usr/local/bin \ - "circleci-cli_${CIRCLECI_CLI_VERSION}_linux_amd64/circleci" - - USE_DOCKER=0 - USE_DOCKER=1 - - # Check all directories with a go.mod file - modules=("." "api" "sdk") - all_package_names="" - - for dir in "${modules[@]}" - do - pushd "$dir" - # On its own line so that -e will fail the tests if we detect errors here. - go list -test -json ./... > test-list.json - # Split Go tests by prior test times. If use_docker is true, only run - # tests that depend on docker, otherwise only those that don't. - # The appended true condition ensures the command will succeed if no packages are found - if [ $USE_DOCKER == 1 ]; then - package_names=$(< test-list.json jq -r 'select(.Deps != null) | - select(any(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker"))) | - .ForTest | select(. != null)' | - sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname || true) - else - package_names=$(< test-list.json jq -r 'select(.Deps != null) | - select(all(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker")|not)) | - .ForTest | select(. != null)' | - sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname || true) - fi - # Move back into root directory - popd - # Append the test packages into the global list, if any are found - if [ -n "$package_names" ]; then - all_package_names+=" ${package_names}" - fi - done - - # After running tests split step, we are now running the following steps - # in multiple different containers, each getting a different subset of - # the test packages in their package_names variable. Each container - # has its own remote docker VM. - - make prep - - mkdir -p test-results/go-test - - # We don't want VAULT_LICENSE set when running Go tests, because that's - # not what developers have in their environments and it could break some - # tests; it would be like setting VAULT_TOKEN. However some non-Go - # CI commands, like the UI tests, shouldn't have to worry about licensing. - # So we set VAULT_LICENSE in CI, and here we unset it. Instead of - # VAULT_LICENSE, we populate VAULT_LICENSE_CI, so that tests which want - # an externally supplied license can opt-in to using it. - export VAULT_LICENSE_CI="$VAULT_LICENSE" - VAULT_LICENSE= - - # Create a docker network for our test container - if [ $USE_DOCKER == 1 ]; then - # Despite the fact that we're using a circleci image (thus getting the - # version they chose for the docker cli) and that we're specifying a - # docker version to use for the remote docker instances, we occasionally - # see "client version too new, max supported version 1.39" errors for - # reasons unclear. - export DOCKER_API_VERSION=1.39 - - TEST_DOCKER_NETWORK_NAME="${CIRCLE_WORKFLOW_JOB_ID}-${CIRCLE_NODE_INDEX}" - export TEST_DOCKER_NETWORK_ID=$(docker network list --quiet --no-trunc --filter="name=${TEST_DOCKER_NETWORK_NAME}") - if [ -z $TEST_DOCKER_NETWORK_ID ]; then - docker network prune -f - TEST_DOCKER_NETWORK_ID=$(docker network create "${TEST_DOCKER_NETWORK_NAME}") - fi - - - - # Start a docker test container to run the tests in - CONTAINER_ID="$(docker run -d \ - -e TEST_DOCKER_NETWORK_ID \ - -e GOPRIVATE \ - -e DOCKER_CERT_PATH \ - -e DOCKER_HOST \ - -e DOCKER_MACHINE_NAME \ - -e DOCKER_TLS_VERIFY \ - -e NO_PROXY \ - -e VAULT_TEST_LOG_DIR=/tmp/testlogs \ - --network ${TEST_DOCKER_NETWORK_NAME} \ - $GO_IMAGE \ - tail -f /dev/null)" - mkdir workspace - echo ${CONTAINER_ID} > workspace/container_id - - # Hack: Docker permissions appear to have changed; let's explicitly - # add a new user/group with the correct host uid to the docker - # container, fixing all of these permissions issues correctly. We - # then have to run with this user consistently in the future. - # - # Notably, in this shell pipeline we see: - # uid=1001(circleci) gid=1002(circleci) groups=1002(circleci) - # - # but inside the docker image below, we see: - # uid=3434(circleci) gid=3434(circleci) groups=3434(circleci) - # - # See also: https://github.com/CircleCI-Public/cimg-base/issues/122 - export HOST_GID="$(id -g)" - export HOST_UID="$(id -u)" - export CONT_GID="$(docker exec ${CONTAINER_ID} sh -c 'id -g')" - export CONT_GNAME="$(docker exec ${CONTAINER_ID} sh -c 'id -g -n')" - export CONT_UID="$(docker exec ${CONTAINER_ID} sh -c 'id -u')" - if (( HOST_UID != CONT_UID )); then - # Only provision a group if necessary; otherwise reuse the - # existing one. - if (( HOST_GID != CONT_GID )); then - docker exec -e HOST_GID -e CONT_GNAME ${CONTAINER_ID} sh -c 'sudo groupmod -g $HOST_GID $CONT_GNAME' - fi - - docker exec -e CONT_GNAME -e HOST_UID ${CONTAINER_ID} sh -c 'sudo usermod -a -G $CONT_GNAME -u $HOST_UID circleci' - fi - - # Run tests - test -d /tmp/go-cache && docker cp /tmp/go-cache ${CONTAINER_ID}:/tmp/gocache - docker exec ${CONTAINER_ID} sh -c 'mkdir -p /home/circleci/go/src/github.com/hashicorp/vault' - docker cp . ${CONTAINER_ID}:/home/circleci/go/src/github.com/hashicorp/vault/ - docker cp $DOCKER_CERT_PATH/ ${CONTAINER_ID}:$DOCKER_CERT_PATH - - # Copy the downloaded modules inside the container. - docker exec ${CONTAINER_ID} sh -c 'mkdir -p /home/circleci/go/pkg' - docker cp "$(go env GOPATH)/pkg/mod" ${CONTAINER_ID}:/home/circleci/go/pkg/mod - - docker exec -w /home/circleci/go/src/github.com/hashicorp/vault/ \ - -e CIRCLECI -e VAULT_CI_GO_TEST_RACE \ - -e GOCACHE=/tmp/gocache \ - -e GO_TAGS \ - -e GOPROXY="off" \ - -e VAULT_LICENSE_CI \ - -e GOARCH=amd64 \ - ${CONTAINER_ID} \ - gotestsum --format=short-verbose \ - --junitfile test-results/go-test/results.xml \ - --jsonfile test-results/go-test/results.json \ - -- \ - -tags "${GO_TAGS} ${EXTRA_TAGS}" \ - -timeout=60m \ - -parallel=20 \ - \ - ${all_package_names} - else - GOARCH=amd64 \ - GOCACHE=/tmp/go-cache \ - gotestsum --format=short-verbose \ - --junitfile test-results/go-test/results.xml \ - --jsonfile test-results/go-test/results.json \ - -- \ - -tags "${GO_TAGS} ${EXTRA_TAGS}" \ - -timeout=60m \ - -parallel=20 \ - \ - ${all_package_names} - fi - environment: - GOPRIVATE: github.com/hashicorp/* - name: Run Go tests - no_output_timeout: 60m - - run: - command: | - docker cp $(cat workspace/container_id):/home/circleci/go/src/github.com/hashicorp/vault/test-results . - docker cp $(cat workspace/container_id):/tmp/gocache /tmp/go-cache - name: Copy test results - when: always - - store_artifacts: - path: test-results - - store_test_results: - path: test-results - - store_artifacts: - path: /tmp/testlogs - environment: - - CIRCLECI_CLI_VERSION: 0.1.5546 - - GO_IMAGE: docker.mirror.hashicorp.services/cimg/go:1.20 - - GO_TAGS: '' - - GOFUMPT_VERSION: 0.3.1 - - GOTESTSUM_VERSION: 0.5.2 - fmt: - machine: - image: ubuntu-2004:2022.10.1 - shell: /usr/bin/env bash -euo pipefail -c - working_directory: /home/circleci/go/src/github.com/hashicorp/vault - steps: - - checkout - - run: - command: | - GO_VERSION=$(cat .go-version) - [ -n "$GO_VERSION" ] || { echo "You must set GO_VERSION"; exit 1; } - # Install Go - cd ~ - curl -sSLO "https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz" - sudo rm -rf /usr/local/go - sudo tar -C /usr/local -xzf "go${GO_VERSION}.linux-amd64.tar.gz" - rm -f "go${GO_VERSION}.linux-amd64.tar.gz" - GOPATH="/home/circleci/go" - mkdir $GOPATH 2>/dev/null || { sudo mkdir $GOPATH && sudo chmod 777 $GOPATH; } - mkdir $GOPATH/bin 2>/dev/null || { sudo mkdir $GOPATH/bin && sudo chmod 777 $GOPATH/bin; } - echo "export GOPATH='$GOPATH'" >> "$BASH_ENV" - echo "export PATH='$PATH:$GOPATH/bin:/usr/local/go/bin'" >> "$BASH_ENV" - echo "export GOPROXY=https://proxy.golang.org,direct" >> "$BASH_ENV" - echo "export GOPRIVATE=github.com/hashicorp/*" >> "$BASH_ENV" - - echo "$ go version" - go version - name: Setup Go - - run: - command: | - echo "Using gofumpt version ${GOFUMPT_VERSION}" - go install "mvdan.cc/gofumpt@v${GOFUMPT_VERSION}" - make fmt - if ! git diff --exit-code; then - echo "Code has formatting errors. Run 'make fmt' to fix" - exit 1 - fi - name: make fmt - environment: - - CIRCLECI_CLI_VERSION: 0.1.5546 - - GO_IMAGE: docker.mirror.hashicorp.services/cimg/go:1.20 - - GO_TAGS: '' - - GOFUMPT_VERSION: 0.3.1 - - GOTESTSUM_VERSION: 0.5.2 - test-go-race: - docker: - - image: docker.mirror.hashicorp.services/cimg/go:1.20 - resource_class: xlarge - working_directory: /home/circleci/go/src/github.com/hashicorp/vault - parallelism: 8 - steps: - - run: - command: | - # If the branch being tested starts with ui/ or docs/ we want to exit the job without failing - [[ "$CIRCLE_BRANCH" = ui/* || "$CIRCLE_BRANCH" = docs/* || "$CIRCLE_BRANCH" = backport/docs/* ]] && { - # stop the job from this step - circleci-agent step halt - } - # exit with success either way - exit 0 - name: Check branch name - working_directory: ~/ - - checkout - - add_ssh_keys: - fingerprints: - - b8:e2:38:f8:5b:1b:82:f3:1f:23:fa:46:6e:95:e7:e9 - - run: - command: | - git config --global url."git@github.com:".insteadOf https://github.com/ - - run: - command: | - TZ=GMT date '+%Y%m%d' > /tmp/go-cache-key - name: Compute test cache key - - restore_cache: - keys: - - go-test-cache-date-v1-{{ checksum "/tmp/go-cache-key" }} - - restore_cache: - keys: - - v1.5-{{checksum "go.sum"}}-{{checksum "sdk/go.sum"}}-{{checksum "api/go.sum"}} - name: Restore exact go modules cache - - run: - command: | - set -exo pipefail - - EXTRA_TAGS= - case "-race" in - *-race*) export VAULT_CI_GO_TEST_RACE=1;; - *) EXTRA_TAGS=deadlock;; - esac - - # Install CircleCI CLI - curl -sSL \ - "https://github.com/CircleCI-Public/circleci-cli/releases/download/v${CIRCLECI_CLI_VERSION}/circleci-cli_${CIRCLECI_CLI_VERSION}_linux_amd64.tar.gz" \ - | sudo tar --overwrite -xz \ - -C /usr/local/bin \ - "circleci-cli_${CIRCLECI_CLI_VERSION}_linux_amd64/circleci" - - USE_DOCKER=0 - - # Check all directories with a go.mod file - modules=("." "api" "sdk") - all_package_names="" - - for dir in "${modules[@]}" - do - pushd "$dir" - # On its own line so that -e will fail the tests if we detect errors here. - go list -test -json ./... > test-list.json - # Split Go tests by prior test times. If use_docker is true, only run - # tests that depend on docker, otherwise only those that don't. - # The appended true condition ensures the command will succeed if no packages are found - if [ $USE_DOCKER == 1 ]; then - package_names=$(< test-list.json jq -r 'select(.Deps != null) | - select(any(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker"))) | - .ForTest | select(. != null)' | - sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname || true) - else - package_names=$(< test-list.json jq -r 'select(.Deps != null) | - select(all(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker")|not)) | - .ForTest | select(. != null)' | - sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname || true) - fi - # Move back into root directory - popd - # Append the test packages into the global list, if any are found - if [ -n "$package_names" ]; then - all_package_names+=" ${package_names}" - fi - done - - # After running tests split step, we are now running the following steps - # in multiple different containers, each getting a different subset of - # the test packages in their package_names variable. Each container - # has its own remote docker VM. - - make prep - - mkdir -p test-results/go-test - - # We don't want VAULT_LICENSE set when running Go tests, because that's - # not what developers have in their environments and it could break some - # tests; it would be like setting VAULT_TOKEN. However some non-Go - # CI commands, like the UI tests, shouldn't have to worry about licensing. - # So we set VAULT_LICENSE in CI, and here we unset it. Instead of - # VAULT_LICENSE, we populate VAULT_LICENSE_CI, so that tests which want - # an externally supplied license can opt-in to using it. - export VAULT_LICENSE_CI="$VAULT_LICENSE" - VAULT_LICENSE= - - # Create a docker network for our test container - if [ $USE_DOCKER == 1 ]; then - # Despite the fact that we're using a circleci image (thus getting the - # version they chose for the docker cli) and that we're specifying a - # docker version to use for the remote docker instances, we occasionally - # see "client version too new, max supported version 1.39" errors for - # reasons unclear. - export DOCKER_API_VERSION=1.39 - - TEST_DOCKER_NETWORK_NAME="${CIRCLE_WORKFLOW_JOB_ID}-${CIRCLE_NODE_INDEX}" - export TEST_DOCKER_NETWORK_ID=$(docker network list --quiet --no-trunc --filter="name=${TEST_DOCKER_NETWORK_NAME}") - if [ -z $TEST_DOCKER_NETWORK_ID ]; then - docker network prune -f - TEST_DOCKER_NETWORK_ID=$(docker network create "${TEST_DOCKER_NETWORK_NAME}") - fi - - - - # Start a docker test container to run the tests in - CONTAINER_ID="$(docker run -d \ - -e TEST_DOCKER_NETWORK_ID \ - -e GOPRIVATE \ - -e DOCKER_CERT_PATH \ - -e DOCKER_HOST \ - -e DOCKER_MACHINE_NAME \ - -e DOCKER_TLS_VERIFY \ - -e NO_PROXY \ - -e VAULT_TEST_LOG_DIR=/tmp/testlogs \ - --network ${TEST_DOCKER_NETWORK_NAME} \ - $GO_IMAGE \ - tail -f /dev/null)" - mkdir workspace - echo ${CONTAINER_ID} > workspace/container_id - - # Hack: Docker permissions appear to have changed; let's explicitly - # add a new user/group with the correct host uid to the docker - # container, fixing all of these permissions issues correctly. We - # then have to run with this user consistently in the future. - # - # Notably, in this shell pipeline we see: - # uid=1001(circleci) gid=1002(circleci) groups=1002(circleci) - # - # but inside the docker image below, we see: - # uid=3434(circleci) gid=3434(circleci) groups=3434(circleci) - # - # See also: https://github.com/CircleCI-Public/cimg-base/issues/122 - export HOST_GID="$(id -g)" - export HOST_UID="$(id -u)" - export CONT_GID="$(docker exec ${CONTAINER_ID} sh -c 'id -g')" - export CONT_GNAME="$(docker exec ${CONTAINER_ID} sh -c 'id -g -n')" - export CONT_UID="$(docker exec ${CONTAINER_ID} sh -c 'id -u')" - if (( HOST_UID != CONT_UID )); then - # Only provision a group if necessary; otherwise reuse the - # existing one. - if (( HOST_GID != CONT_GID )); then - docker exec -e HOST_GID -e CONT_GNAME ${CONTAINER_ID} sh -c 'sudo groupmod -g $HOST_GID $CONT_GNAME' - fi - - docker exec -e CONT_GNAME -e HOST_UID ${CONTAINER_ID} sh -c 'sudo usermod -a -G $CONT_GNAME -u $HOST_UID circleci' - fi - - # Run tests - test -d /tmp/go-cache && docker cp /tmp/go-cache ${CONTAINER_ID}:/tmp/gocache - docker exec ${CONTAINER_ID} sh -c 'mkdir -p /home/circleci/go/src/github.com/hashicorp/vault' - docker cp . ${CONTAINER_ID}:/home/circleci/go/src/github.com/hashicorp/vault/ - docker cp $DOCKER_CERT_PATH/ ${CONTAINER_ID}:$DOCKER_CERT_PATH - - # Copy the downloaded modules inside the container. - docker exec ${CONTAINER_ID} sh -c 'mkdir -p /home/circleci/go/pkg' - docker cp "$(go env GOPATH)/pkg/mod" ${CONTAINER_ID}:/home/circleci/go/pkg/mod - - docker exec -w /home/circleci/go/src/github.com/hashicorp/vault/ \ - -e CIRCLECI -e VAULT_CI_GO_TEST_RACE \ - -e GOCACHE=/tmp/gocache \ - -e GO_TAGS \ - -e GOPROXY="off" \ - -e VAULT_LICENSE_CI \ - -e GOARCH=amd64 \ - ${CONTAINER_ID} \ - gotestsum --format=short-verbose \ - --junitfile test-results/go-test/results.xml \ - --jsonfile test-results/go-test/results.json \ - -- \ - -tags "${GO_TAGS} ${EXTRA_TAGS}" \ - -timeout=60m \ - -parallel=20 \ - -race \ - ${all_package_names} - else - GOARCH=amd64 \ - GOCACHE=/tmp/go-cache \ - gotestsum --format=short-verbose \ - --junitfile test-results/go-test/results.xml \ - --jsonfile test-results/go-test/results.json \ - -- \ - -tags "${GO_TAGS} ${EXTRA_TAGS}" \ - -timeout=60m \ - -parallel=20 \ - -race \ - ${all_package_names} - fi - environment: - GOPRIVATE: github.com/hashicorp/* - name: Run Go tests - no_output_timeout: 60m - - store_artifacts: - path: test-results - - store_test_results: - path: test-results - - store_artifacts: - path: /tmp/testlogs - environment: - - CIRCLECI_CLI_VERSION: 0.1.5546 - - GO_IMAGE: docker.mirror.hashicorp.services/cimg/go:1.20 - - GO_TAGS: '' - - GOFUMPT_VERSION: 0.3.1 - - GOTESTSUM_VERSION: 0.5.2 - test-go: - docker: - - image: docker.mirror.hashicorp.services/cimg/go:1.20 - resource_class: large - working_directory: /home/circleci/go/src/github.com/hashicorp/vault - parallelism: 8 - steps: - - run: - command: | - # If the branch being tested starts with ui/ or docs/ we want to exit the job without failing - [[ "$CIRCLE_BRANCH" = ui/* || "$CIRCLE_BRANCH" = docs/* || "$CIRCLE_BRANCH" = backport/docs/* ]] && { - # stop the job from this step - circleci-agent step halt - } - # exit with success either way - exit 0 - name: Check branch name - working_directory: ~/ - - checkout - - add_ssh_keys: - fingerprints: - - b8:e2:38:f8:5b:1b:82:f3:1f:23:fa:46:6e:95:e7:e9 - - run: - command: | - git config --global url."git@github.com:".insteadOf https://github.com/ - - run: - command: | - TZ=GMT date '+%Y%m%d' > /tmp/go-cache-key - name: Compute test cache key - - restore_cache: - keys: - - go-test-cache-date-v1-{{ checksum "/tmp/go-cache-key" }} - - restore_cache: - keys: - - v1.5-{{checksum "go.sum"}}-{{checksum "sdk/go.sum"}}-{{checksum "api/go.sum"}} - name: Restore exact go modules cache - - run: - command: | - set -exo pipefail - - EXTRA_TAGS= - case "" in - *-race*) export VAULT_CI_GO_TEST_RACE=1;; - *) EXTRA_TAGS=deadlock;; - esac - - # Install CircleCI CLI - curl -sSL \ - "https://github.com/CircleCI-Public/circleci-cli/releases/download/v${CIRCLECI_CLI_VERSION}/circleci-cli_${CIRCLECI_CLI_VERSION}_linux_amd64.tar.gz" \ - | sudo tar --overwrite -xz \ - -C /usr/local/bin \ - "circleci-cli_${CIRCLECI_CLI_VERSION}_linux_amd64/circleci" - - USE_DOCKER=0 - - # Check all directories with a go.mod file - modules=("." "api" "sdk") - all_package_names="" - - for dir in "${modules[@]}" - do - pushd "$dir" - # On its own line so that -e will fail the tests if we detect errors here. - go list -test -json ./... > test-list.json - # Split Go tests by prior test times. If use_docker is true, only run - # tests that depend on docker, otherwise only those that don't. - # The appended true condition ensures the command will succeed if no packages are found - if [ $USE_DOCKER == 1 ]; then - package_names=$(< test-list.json jq -r 'select(.Deps != null) | - select(any(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker"))) | - .ForTest | select(. != null)' | - sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname || true) - else - package_names=$(< test-list.json jq -r 'select(.Deps != null) | - select(all(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker")|not)) | - .ForTest | select(. != null)' | - sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname || true) - fi - # Move back into root directory - popd - # Append the test packages into the global list, if any are found - if [ -n "$package_names" ]; then - all_package_names+=" ${package_names}" - fi - done - - # After running tests split step, we are now running the following steps - # in multiple different containers, each getting a different subset of - # the test packages in their package_names variable. Each container - # has its own remote docker VM. - - make prep - - mkdir -p test-results/go-test - - # We don't want VAULT_LICENSE set when running Go tests, because that's - # not what developers have in their environments and it could break some - # tests; it would be like setting VAULT_TOKEN. However some non-Go - # CI commands, like the UI tests, shouldn't have to worry about licensing. - # So we set VAULT_LICENSE in CI, and here we unset it. Instead of - # VAULT_LICENSE, we populate VAULT_LICENSE_CI, so that tests which want - # an externally supplied license can opt-in to using it. - export VAULT_LICENSE_CI="$VAULT_LICENSE" - VAULT_LICENSE= - - # Create a docker network for our test container - if [ $USE_DOCKER == 1 ]; then - # Despite the fact that we're using a circleci image (thus getting the - # version they chose for the docker cli) and that we're specifying a - # docker version to use for the remote docker instances, we occasionally - # see "client version too new, max supported version 1.39" errors for - # reasons unclear. - export DOCKER_API_VERSION=1.39 - - TEST_DOCKER_NETWORK_NAME="${CIRCLE_WORKFLOW_JOB_ID}-${CIRCLE_NODE_INDEX}" - export TEST_DOCKER_NETWORK_ID=$(docker network list --quiet --no-trunc --filter="name=${TEST_DOCKER_NETWORK_NAME}") - if [ -z $TEST_DOCKER_NETWORK_ID ]; then - docker network prune -f - TEST_DOCKER_NETWORK_ID=$(docker network create "${TEST_DOCKER_NETWORK_NAME}") - fi - - - - # Start a docker test container to run the tests in - CONTAINER_ID="$(docker run -d \ - -e TEST_DOCKER_NETWORK_ID \ - -e GOPRIVATE \ - -e DOCKER_CERT_PATH \ - -e DOCKER_HOST \ - -e DOCKER_MACHINE_NAME \ - -e DOCKER_TLS_VERIFY \ - -e NO_PROXY \ - -e VAULT_TEST_LOG_DIR=/tmp/testlogs \ - --network ${TEST_DOCKER_NETWORK_NAME} \ - $GO_IMAGE \ - tail -f /dev/null)" - mkdir workspace - echo ${CONTAINER_ID} > workspace/container_id - - # Hack: Docker permissions appear to have changed; let's explicitly - # add a new user/group with the correct host uid to the docker - # container, fixing all of these permissions issues correctly. We - # then have to run with this user consistently in the future. - # - # Notably, in this shell pipeline we see: - # uid=1001(circleci) gid=1002(circleci) groups=1002(circleci) - # - # but inside the docker image below, we see: - # uid=3434(circleci) gid=3434(circleci) groups=3434(circleci) - # - # See also: https://github.com/CircleCI-Public/cimg-base/issues/122 - export HOST_GID="$(id -g)" - export HOST_UID="$(id -u)" - export CONT_GID="$(docker exec ${CONTAINER_ID} sh -c 'id -g')" - export CONT_GNAME="$(docker exec ${CONTAINER_ID} sh -c 'id -g -n')" - export CONT_UID="$(docker exec ${CONTAINER_ID} sh -c 'id -u')" - if (( HOST_UID != CONT_UID )); then - # Only provision a group if necessary; otherwise reuse the - # existing one. - if (( HOST_GID != CONT_GID )); then - docker exec -e HOST_GID -e CONT_GNAME ${CONTAINER_ID} sh -c 'sudo groupmod -g $HOST_GID $CONT_GNAME' - fi - - docker exec -e CONT_GNAME -e HOST_UID ${CONTAINER_ID} sh -c 'sudo usermod -a -G $CONT_GNAME -u $HOST_UID circleci' - fi - - # Run tests - test -d /tmp/go-cache && docker cp /tmp/go-cache ${CONTAINER_ID}:/tmp/gocache - docker exec ${CONTAINER_ID} sh -c 'mkdir -p /home/circleci/go/src/github.com/hashicorp/vault' - docker cp . ${CONTAINER_ID}:/home/circleci/go/src/github.com/hashicorp/vault/ - docker cp $DOCKER_CERT_PATH/ ${CONTAINER_ID}:$DOCKER_CERT_PATH - - # Copy the downloaded modules inside the container. - docker exec ${CONTAINER_ID} sh -c 'mkdir -p /home/circleci/go/pkg' - docker cp "$(go env GOPATH)/pkg/mod" ${CONTAINER_ID}:/home/circleci/go/pkg/mod - - docker exec -w /home/circleci/go/src/github.com/hashicorp/vault/ \ - -e CIRCLECI -e VAULT_CI_GO_TEST_RACE \ - -e GOCACHE=/tmp/gocache \ - -e GO_TAGS \ - -e GOPROXY="off" \ - -e VAULT_LICENSE_CI \ - -e GOARCH=amd64 \ - ${CONTAINER_ID} \ - gotestsum --format=short-verbose \ - --junitfile test-results/go-test/results.xml \ - --jsonfile test-results/go-test/results.json \ - -- \ - -tags "${GO_TAGS} ${EXTRA_TAGS}" \ - -timeout=60m \ - -parallel=20 \ - \ - ${all_package_names} - else - GOARCH=amd64 \ - GOCACHE=/tmp/go-cache \ - gotestsum --format=short-verbose \ - --junitfile test-results/go-test/results.xml \ - --jsonfile test-results/go-test/results.json \ - -- \ - -tags "${GO_TAGS} ${EXTRA_TAGS}" \ - -timeout=60m \ - -parallel=20 \ - \ - ${all_package_names} - fi - environment: - GOPRIVATE: github.com/hashicorp/* - name: Run Go tests - no_output_timeout: 60m - - store_artifacts: - path: test-results - - store_test_results: - path: test-results - - store_artifacts: - path: /tmp/testlogs - environment: - - CIRCLECI_CLI_VERSION: 0.1.5546 - - GO_IMAGE: docker.mirror.hashicorp.services/cimg/go:1.20 - - GO_TAGS: '' - - GOFUMPT_VERSION: 0.3.1 - - GOTESTSUM_VERSION: 0.5.2 - semgrep: - docker: - - image: docker.mirror.hashicorp.services/returntocorp/semgrep:0.113.0 - shell: /bin/sh - working_directory: /home/circleci/go/src/github.com/hashicorp/vault - steps: - - checkout - - attach_workspace: - at: . - - run: - command: "# Alpine images can't run the make file due to a bash requirement. Run\n# semgrep explicitly here. \nexport PATH=\"$HOME/.local/bin:$PATH\" \necho -n 'Semgrep Version: '\nsemgrep --version\nsemgrep --error --include '*.go' --exclude 'vendor' -f tools/semgrep/ci .\n" - name: Run Semgrep Rules - pre-flight-checks: - machine: - image: ubuntu-2004:2022.10.1 - shell: /usr/bin/env bash -euo pipefail -c - working_directory: /home/circleci/go/src/github.com/hashicorp/vault - steps: - - checkout - - run: - command: | - GO_VERSION=$(cat .go-version) - [ -n "$GO_VERSION" ] || { echo "You must set GO_VERSION"; exit 1; } - # Install Go - cd ~ - curl -sSLO "https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz" - sudo rm -rf /usr/local/go - sudo tar -C /usr/local -xzf "go${GO_VERSION}.linux-amd64.tar.gz" - rm -f "go${GO_VERSION}.linux-amd64.tar.gz" - GOPATH="/home/circleci/go" - mkdir $GOPATH 2>/dev/null || { sudo mkdir $GOPATH && sudo chmod 777 $GOPATH; } - mkdir $GOPATH/bin 2>/dev/null || { sudo mkdir $GOPATH/bin && sudo chmod 777 $GOPATH/bin; } - echo "export GOPATH='$GOPATH'" >> "$BASH_ENV" - echo "export PATH='$PATH:$GOPATH/bin:/usr/local/go/bin'" >> "$BASH_ENV" - echo "export GOPROXY=https://proxy.golang.org,direct" >> "$BASH_ENV" - echo "export GOPRIVATE=github.com/hashicorp/*" >> "$BASH_ENV" - - echo "$ go version" - go version - name: Setup Go - - run: - command: | - export CCI_PATH=/tmp/circleci-cli/$CIRCLECI_CLI_VERSION - mkdir -p $CCI_PATH - NAME=circleci-cli_${CIRCLECI_CLI_VERSION}_${ARCH} - URL=$BASE/v${CIRCLECI_CLI_VERSION}/${NAME}.tar.gz - curl -sSL $URL \ - | tar --overwrite --strip-components=1 -xz -C $CCI_PATH "${NAME}/circleci" - # Add circleci to the path for subsequent steps. - echo "export PATH=$CCI_PATH:\$PATH" >> $BASH_ENV - # Done, print some debug info. - set -x - . $BASH_ENV - which circleci - circleci version - environment: - ARCH: linux_amd64 - BASE: https://github.com/CircleCI-Public/circleci-cli/releases/download - name: Install CircleCI CLI - - run: - command: | - set -x - . $BASH_ENV - make ci-verify - name: Verify CircleCI - - add_ssh_keys: - fingerprints: - - b8:e2:38:f8:5b:1b:82:f3:1f:23:fa:46:6e:95:e7:e9 - - run: - command: | - git config --global url."git@github.com:".insteadOf https://github.com/ - - restore_cache: - keys: - - v1.5-{{checksum "go.sum"}}-{{checksum "sdk/go.sum"}}-{{checksum "api/go.sum"}} - - v1.5-{{checksum "go.sum"}}-{{checksum "sdk/go.sum"}} - - v1.5-{{checksum "go.sum"}} - name: Restore closest matching go modules cache - - run: - command: | - # set GOPATH explicitly to download to the right cache - export GOPATH=$HOME/go - # go list ./... forces downloading some additional versions of modules that 'go mod - # download' misses. We need this because we make use of go list itself during - # code generation in later builds that rely on this module cache. - go list ./... - go mod download -json - ( cd sdk && go mod download -json; ) - ( cd api && go mod download -json; ) - name: go mod download - - run: - command: | - git --no-pager diff --exit-code || { - echo "ERROR: Files modified by go mod download, see above." - exit 1 - } - name: Verify downloading modules did not modify any files - - save_cache: - key: v1.5-{{checksum "go.sum"}}-{{checksum "sdk/go.sum"}}-{{checksum "api/go.sum"}} - name: Save go modules cache - paths: - - /home/circleci/go/pkg/mod - environment: - - CIRCLECI_CLI_VERSION: 0.1.5546 - - GO_IMAGE: docker.mirror.hashicorp.services/cimg/go:1.20 - - GO_TAGS: '' - - GOFUMPT_VERSION: 0.3.1 - - GOTESTSUM_VERSION: 0.5.2 - test-go-race-remote-docker: - docker: - - image: docker.mirror.hashicorp.services/cimg/go:1.20 - resource_class: medium - working_directory: /home/circleci/go/src/github.com/hashicorp/vault - parallelism: 8 - steps: - - run: - command: | - # If the branch being tested starts with ui/ or docs/ we want to exit the job without failing - [[ "$CIRCLE_BRANCH" = ui/* || "$CIRCLE_BRANCH" = docs/* || "$CIRCLE_BRANCH" = backport/docs/* ]] && { - # stop the job from this step - circleci-agent step halt - } - # exit with success either way - exit 0 - name: Check branch name - working_directory: ~/ - - checkout - - setup_remote_docker: - docker_layer_caching: true - version: 20.10.17 - - add_ssh_keys: - fingerprints: - - b8:e2:38:f8:5b:1b:82:f3:1f:23:fa:46:6e:95:e7:e9 - - run: - command: | - git config --global url."git@github.com:".insteadOf https://github.com/ - - run: - command: | - TZ=GMT date '+%Y%m%d' > /tmp/go-cache-key - name: Compute test cache key - - restore_cache: - keys: - - go-test-cache-date-v1-{{ checksum "/tmp/go-cache-key" }} - - restore_cache: - keys: - - v1.5-{{checksum "go.sum"}}-{{checksum "sdk/go.sum"}}-{{checksum "api/go.sum"}} - name: Restore exact go modules cache - - run: - command: | - set -exo pipefail - - EXTRA_TAGS= - case "-race" in - *-race*) export VAULT_CI_GO_TEST_RACE=1;; - *) EXTRA_TAGS=deadlock;; - esac - - # Install CircleCI CLI - curl -sSL \ - "https://github.com/CircleCI-Public/circleci-cli/releases/download/v${CIRCLECI_CLI_VERSION}/circleci-cli_${CIRCLECI_CLI_VERSION}_linux_amd64.tar.gz" \ - | sudo tar --overwrite -xz \ - -C /usr/local/bin \ - "circleci-cli_${CIRCLECI_CLI_VERSION}_linux_amd64/circleci" - - USE_DOCKER=0 - USE_DOCKER=1 - - # Check all directories with a go.mod file - modules=("." "api" "sdk") - all_package_names="" - - for dir in "${modules[@]}" - do - pushd "$dir" - # On its own line so that -e will fail the tests if we detect errors here. - go list -test -json ./... > test-list.json - # Split Go tests by prior test times. If use_docker is true, only run - # tests that depend on docker, otherwise only those that don't. - # The appended true condition ensures the command will succeed if no packages are found - if [ $USE_DOCKER == 1 ]; then - package_names=$(< test-list.json jq -r 'select(.Deps != null) | - select(any(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker"))) | - .ForTest | select(. != null)' | - sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname || true) - else - package_names=$(< test-list.json jq -r 'select(.Deps != null) | - select(all(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker")|not)) | - .ForTest | select(. != null)' | - sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname || true) - fi - # Move back into root directory - popd - # Append the test packages into the global list, if any are found - if [ -n "$package_names" ]; then - all_package_names+=" ${package_names}" - fi - done - - # After running tests split step, we are now running the following steps - # in multiple different containers, each getting a different subset of - # the test packages in their package_names variable. Each container - # has its own remote docker VM. - - make prep - - mkdir -p test-results/go-test - - # We don't want VAULT_LICENSE set when running Go tests, because that's - # not what developers have in their environments and it could break some - # tests; it would be like setting VAULT_TOKEN. However some non-Go - # CI commands, like the UI tests, shouldn't have to worry about licensing. - # So we set VAULT_LICENSE in CI, and here we unset it. Instead of - # VAULT_LICENSE, we populate VAULT_LICENSE_CI, so that tests which want - # an externally supplied license can opt-in to using it. - export VAULT_LICENSE_CI="$VAULT_LICENSE" - VAULT_LICENSE= - - # Create a docker network for our test container - if [ $USE_DOCKER == 1 ]; then - # Despite the fact that we're using a circleci image (thus getting the - # version they chose for the docker cli) and that we're specifying a - # docker version to use for the remote docker instances, we occasionally - # see "client version too new, max supported version 1.39" errors for - # reasons unclear. - export DOCKER_API_VERSION=1.39 - - TEST_DOCKER_NETWORK_NAME="${CIRCLE_WORKFLOW_JOB_ID}-${CIRCLE_NODE_INDEX}" - export TEST_DOCKER_NETWORK_ID=$(docker network list --quiet --no-trunc --filter="name=${TEST_DOCKER_NETWORK_NAME}") - if [ -z $TEST_DOCKER_NETWORK_ID ]; then - docker network prune -f - TEST_DOCKER_NETWORK_ID=$(docker network create "${TEST_DOCKER_NETWORK_NAME}") - fi - - - - # Start a docker test container to run the tests in - CONTAINER_ID="$(docker run -d \ - -e TEST_DOCKER_NETWORK_ID \ - -e GOPRIVATE \ - -e DOCKER_CERT_PATH \ - -e DOCKER_HOST \ - -e DOCKER_MACHINE_NAME \ - -e DOCKER_TLS_VERIFY \ - -e NO_PROXY \ - -e VAULT_TEST_LOG_DIR=/tmp/testlogs \ - --network ${TEST_DOCKER_NETWORK_NAME} \ - $GO_IMAGE \ - tail -f /dev/null)" - mkdir workspace - echo ${CONTAINER_ID} > workspace/container_id - - # Hack: Docker permissions appear to have changed; let's explicitly - # add a new user/group with the correct host uid to the docker - # container, fixing all of these permissions issues correctly. We - # then have to run with this user consistently in the future. - # - # Notably, in this shell pipeline we see: - # uid=1001(circleci) gid=1002(circleci) groups=1002(circleci) - # - # but inside the docker image below, we see: - # uid=3434(circleci) gid=3434(circleci) groups=3434(circleci) - # - # See also: https://github.com/CircleCI-Public/cimg-base/issues/122 - export HOST_GID="$(id -g)" - export HOST_UID="$(id -u)" - export CONT_GID="$(docker exec ${CONTAINER_ID} sh -c 'id -g')" - export CONT_GNAME="$(docker exec ${CONTAINER_ID} sh -c 'id -g -n')" - export CONT_UID="$(docker exec ${CONTAINER_ID} sh -c 'id -u')" - if (( HOST_UID != CONT_UID )); then - # Only provision a group if necessary; otherwise reuse the - # existing one. - if (( HOST_GID != CONT_GID )); then - docker exec -e HOST_GID -e CONT_GNAME ${CONTAINER_ID} sh -c 'sudo groupmod -g $HOST_GID $CONT_GNAME' - fi - - docker exec -e CONT_GNAME -e HOST_UID ${CONTAINER_ID} sh -c 'sudo usermod -a -G $CONT_GNAME -u $HOST_UID circleci' - fi - - # Run tests - test -d /tmp/go-cache && docker cp /tmp/go-cache ${CONTAINER_ID}:/tmp/gocache - docker exec ${CONTAINER_ID} sh -c 'mkdir -p /home/circleci/go/src/github.com/hashicorp/vault' - docker cp . ${CONTAINER_ID}:/home/circleci/go/src/github.com/hashicorp/vault/ - docker cp $DOCKER_CERT_PATH/ ${CONTAINER_ID}:$DOCKER_CERT_PATH - - # Copy the downloaded modules inside the container. - docker exec ${CONTAINER_ID} sh -c 'mkdir -p /home/circleci/go/pkg' - docker cp "$(go env GOPATH)/pkg/mod" ${CONTAINER_ID}:/home/circleci/go/pkg/mod - - docker exec -w /home/circleci/go/src/github.com/hashicorp/vault/ \ - -e CIRCLECI -e VAULT_CI_GO_TEST_RACE \ - -e GOCACHE=/tmp/gocache \ - -e GO_TAGS \ - -e GOPROXY="off" \ - -e VAULT_LICENSE_CI \ - -e GOARCH=amd64 \ - ${CONTAINER_ID} \ - gotestsum --format=short-verbose \ - --junitfile test-results/go-test/results.xml \ - --jsonfile test-results/go-test/results.json \ - -- \ - -tags "${GO_TAGS} ${EXTRA_TAGS}" \ - -timeout=60m \ - -parallel=20 \ - -race \ - ${all_package_names} - else - GOARCH=amd64 \ - GOCACHE=/tmp/go-cache \ - gotestsum --format=short-verbose \ - --junitfile test-results/go-test/results.xml \ - --jsonfile test-results/go-test/results.json \ - -- \ - -tags "${GO_TAGS} ${EXTRA_TAGS}" \ - -timeout=60m \ - -parallel=20 \ - -race \ - ${all_package_names} - fi - environment: - GOPRIVATE: github.com/hashicorp/* - name: Run Go tests - no_output_timeout: 60m - - run: - command: | - docker cp $(cat workspace/container_id):/home/circleci/go/src/github.com/hashicorp/vault/test-results . - docker cp $(cat workspace/container_id):/tmp/gocache /tmp/go-cache - name: Copy test results - when: always - - store_artifacts: - path: test-results - - store_test_results: - path: test-results - - store_artifacts: - path: /tmp/testlogs - environment: - - CIRCLECI_CLI_VERSION: 0.1.5546 - - GO_IMAGE: docker.mirror.hashicorp.services/cimg/go:1.20 - - GO_TAGS: '' - - GOFUMPT_VERSION: 0.3.1 - - GOTESTSUM_VERSION: 0.5.2 -workflows: - ci: - jobs: - - pre-flight-checks - - fmt - - install-ui-dependencies: - requires: - - pre-flight-checks - - build-go-dev: - requires: - - pre-flight-checks - - test-ui: - requires: - - install-ui-dependencies - - build-go-dev - - test-go: - requires: - - pre-flight-checks - - test-go-remote-docker: - requires: - - pre-flight-checks - - test-go-race: - requires: - - pre-flight-checks - - test-go-race-remote-docker: - requires: - - pre-flight-checks - - semgrep: - requires: - - pre-flight-checks - version: 2 diff --git a/.circleci/config/@config.yml b/.circleci/config/@config.yml deleted file mode 100644 index 38fbc6831210..000000000000 --- a/.circleci/config/@config.yml +++ /dev/null @@ -1,5 +0,0 @@ ---- -version: 2.1 - -orbs: - slack: circleci/slack@3.2.0 diff --git a/.circleci/config/commands/@caches.yml b/.circleci/config/commands/@caches.yml deleted file mode 100644 index 7ce217f074d6..000000000000 --- a/.circleci/config/commands/@caches.yml +++ /dev/null @@ -1,59 +0,0 @@ -restore_yarn_cache: - steps: - - restore_cache: - name: Restore yarn cache - key: &YARN_LOCK_CACHE_KEY yarn-lock-v7-{{ checksum "ui/yarn.lock" }} -save_yarn_cache: - steps: - - save_cache: - name: Save yarn cache - key: *YARN_LOCK_CACHE_KEY - paths: - - ui/node_modules -# allows restoring go mod caches by incomplete prefix. This is useful when re-generating -# cache, but not when running builds and tests that require an exact match. -# TODO should we be including arch in cache key? -restore_go_mod_cache_permissive: - steps: - - restore_cache: - name: Restore closest matching go modules cache - keys: - - &gocachekey v1.5-{{checksum "go.sum"}}-{{checksum "sdk/go.sum"}}-{{checksum "api/go.sum"}} - - v1.5-{{checksum "go.sum"}}-{{checksum "sdk/go.sum"}} - - v1.5-{{checksum "go.sum"}} -restore_go_mod_cache: - steps: - - restore_cache: - name: Restore exact go modules cache - keys: - - *gocachekey -save_go_mod_cache: - steps: - - save_cache: - name: Save go modules cache - key: *gocachekey - paths: - - /home/circleci/go/pkg/mod -refresh_go_mod_cache: - steps: - - restore_go_mod_cache_permissive - - run: - name: go mod download - command: | - # set GOPATH explicitly to download to the right cache - export GOPATH=$HOME/go - # go list ./... forces downloading some additional versions of modules that 'go mod - # download' misses. We need this because we make use of go list itself during - # code generation in later builds that rely on this module cache. - go list ./... - go mod download -json - ( cd sdk && go mod download -json; ) - ( cd api && go mod download -json; ) - - run: - name: Verify downloading modules did not modify any files - command: | - git --no-pager diff --exit-code || { - echo "ERROR: Files modified by go mod download, see above." - exit 1 - } - - save_go_mod_cache diff --git a/.circleci/config/commands/configure-git.yml b/.circleci/config/commands/configure-git.yml deleted file mode 100644 index a725ab97e7b9..000000000000 --- a/.circleci/config/commands/configure-git.yml +++ /dev/null @@ -1,7 +0,0 @@ -steps: - - add_ssh_keys: - fingerprints: - # "CircleCI Additional SSH Key" associated with hc-github-team-secure-vault-core GitHub user - - "b8:e2:38:f8:5b:1b:82:f3:1f:23:fa:46:6e:95:e7:e9" - - run: | - git config --global url."git@github.com:".insteadOf https://github.com/ diff --git a/.circleci/config/commands/exit-if-branch-does-not-need-test-ui.yml b/.circleci/config/commands/exit-if-branch-does-not-need-test-ui.yml deleted file mode 100644 index 771ef4d925f8..000000000000 --- a/.circleci/config/commands/exit-if-branch-does-not-need-test-ui.yml +++ /dev/null @@ -1,17 +0,0 @@ -description: > - Check if branch name starts with ui/ or docs/ and if so, exit. -steps: - - run: - working_directory: ~/ - name: Check branch name - command: | - case "$CIRCLE_BRANCH" in - main|ui/*|backport/ui/*|release/*|merge*) ;; - *) # If the branch being tested doesn't match one of the above patterns, - # we don't need to run test-ui and can abort the job. - circleci-agent step halt - ;; - esac - - # exit with success either way - exit 0 diff --git a/.circleci/config/commands/exit-if-ui-or-docs-branch.yml b/.circleci/config/commands/exit-if-ui-or-docs-branch.yml deleted file mode 100644 index 322091f70ba7..000000000000 --- a/.circleci/config/commands/exit-if-ui-or-docs-branch.yml +++ /dev/null @@ -1,14 +0,0 @@ -description: > - Check if branch name starts with ui/ or docs/ and if so, exit. -steps: - - run: - working_directory: ~/ - name: Check branch name - command: | - # If the branch being tested starts with ui/ or docs/ we want to exit the job without failing - [[ "$CIRCLE_BRANCH" = ui/* || "$CIRCLE_BRANCH" = docs/* || "$CIRCLE_BRANCH" = backport/docs/* ]] && { - # stop the job from this step - circleci-agent step halt - } - # exit with success either way - exit 0 diff --git a/.circleci/config/commands/go_test.yml b/.circleci/config/commands/go_test.yml deleted file mode 100644 index 9e4b4daa9da4..000000000000 --- a/.circleci/config/commands/go_test.yml +++ /dev/null @@ -1,226 +0,0 @@ -description: run go tests -parameters: - extra_flags: - type: string - default: "" - log_dir: - type: string - default: "/tmp/testlogs" - cache_dir: - type: string - default: /tmp/go-cache - save_cache: - type: boolean - default: false - use_docker: - type: boolean - default: false - arch: - type: string - # Only supported for use_docker=false, and only other value allowed is 386 - default: amd64 # must be 386 or amd64 -steps: - - configure-git - - run: - name: Compute test cache key - command: | - TZ=GMT date '+%Y%m%d' > /tmp/go-cache-key - - restore_cache: - keys: - - go-test-cache-date-v1-{{ checksum "/tmp/go-cache-key" }} - - restore_go_mod_cache - - run: - name: Run Go tests - no_output_timeout: 60m - environment: - GOPRIVATE: 'github.com/hashicorp/*' - command: | - set -exo pipefail - - EXTRA_TAGS= - case "<< parameters.extra_flags >>" in - *-race*) export VAULT_CI_GO_TEST_RACE=1;; - *) EXTRA_TAGS=deadlock;; - esac - - # Install CircleCI CLI - curl -sSL \ - "https://github.com/CircleCI-Public/circleci-cli/releases/download/v${CIRCLECI_CLI_VERSION}/circleci-cli_${CIRCLECI_CLI_VERSION}_linux_amd64.tar.gz" \ - | sudo tar --overwrite -xz \ - -C /usr/local/bin \ - "circleci-cli_${CIRCLECI_CLI_VERSION}_linux_amd64/circleci" - - USE_DOCKER=0 - <<# parameters.use_docker >> - USE_DOCKER=1 - <> - - # Check all directories with a go.mod file - modules=("." "api" "sdk") - all_package_names="" - - for dir in "${modules[@]}" - do - pushd "$dir" - # On its own line so that -e will fail the tests if we detect errors here. - go list -test -json ./... > test-list.json - # Split Go tests by prior test times. If use_docker is true, only run - # tests that depend on docker, otherwise only those that don't. - # The appended true condition ensures the command will succeed if no packages are found - if [ $USE_DOCKER == 1 ]; then - package_names=$(< test-list.json jq -r 'select(.Deps != null) | - select(any(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker"))) | - .ForTest | select(. != null)' | - sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname || true) - else - package_names=$(< test-list.json jq -r 'select(.Deps != null) | - select(all(.Deps[] ; contains("github.com/hashicorp/vault/helper/testhelpers/docker")|not)) | - .ForTest | select(. != null)' | - sort -u | grep -v vault/integ | circleci tests split --split-by=timings --timings-type=classname || true) - fi - # Move back into root directory - popd - # Append the test packages into the global list, if any are found - if [ -n "$package_names" ]; then - all_package_names+=" ${package_names}" - fi - done - - # After running tests split step, we are now running the following steps - # in multiple different containers, each getting a different subset of - # the test packages in their package_names variable. Each container - # has its own remote docker VM. - - make prep - - mkdir -p test-results/go-test - - # We don't want VAULT_LICENSE set when running Go tests, because that's - # not what developers have in their environments and it could break some - # tests; it would be like setting VAULT_TOKEN. However some non-Go - # CI commands, like the UI tests, shouldn't have to worry about licensing. - # So we set VAULT_LICENSE in CI, and here we unset it. Instead of - # VAULT_LICENSE, we populate VAULT_LICENSE_CI, so that tests which want - # an externally supplied license can opt-in to using it. - export VAULT_LICENSE_CI="$VAULT_LICENSE" - VAULT_LICENSE= - - # Create a docker network for our test container - if [ $USE_DOCKER == 1 ]; then - # Despite the fact that we're using a circleci image (thus getting the - # version they chose for the docker cli) and that we're specifying a - # docker version to use for the remote docker instances, we occasionally - # see "client version too new, max supported version 1.39" errors for - # reasons unclear. - export DOCKER_API_VERSION=1.39 - - TEST_DOCKER_NETWORK_NAME="${CIRCLE_WORKFLOW_JOB_ID}-${CIRCLE_NODE_INDEX}" - export TEST_DOCKER_NETWORK_ID=$(docker network list --quiet --no-trunc --filter="name=${TEST_DOCKER_NETWORK_NAME}") - if [ -z $TEST_DOCKER_NETWORK_ID ]; then - docker network prune -f - TEST_DOCKER_NETWORK_ID=$(docker network create "${TEST_DOCKER_NETWORK_NAME}") - fi - - - - # Start a docker test container to run the tests in - CONTAINER_ID="$(docker run -d \ - -e TEST_DOCKER_NETWORK_ID \ - -e GOPRIVATE \ - -e DOCKER_CERT_PATH \ - -e DOCKER_HOST \ - -e DOCKER_MACHINE_NAME \ - -e DOCKER_TLS_VERIFY \ - -e NO_PROXY \ - -e VAULT_TEST_LOG_DIR=<< parameters.log_dir >> \ - --network ${TEST_DOCKER_NETWORK_NAME} \ - $GO_IMAGE \ - tail -f /dev/null)" - mkdir workspace - echo ${CONTAINER_ID} > workspace/container_id - - # Hack: Docker permissions appear to have changed; let's explicitly - # add a new user/group with the correct host uid to the docker - # container, fixing all of these permissions issues correctly. We - # then have to run with this user consistently in the future. - # - # Notably, in this shell pipeline we see: - # uid=1001(circleci) gid=1002(circleci) groups=1002(circleci) - # - # but inside the docker image below, we see: - # uid=3434(circleci) gid=3434(circleci) groups=3434(circleci) - # - # See also: https://github.com/CircleCI-Public/cimg-base/issues/122 - export HOST_GID="$(id -g)" - export HOST_UID="$(id -u)" - export CONT_GID="$(docker exec ${CONTAINER_ID} sh -c 'id -g')" - export CONT_GNAME="$(docker exec ${CONTAINER_ID} sh -c 'id -g -n')" - export CONT_UID="$(docker exec ${CONTAINER_ID} sh -c 'id -u')" - if (( HOST_UID != CONT_UID )); then - # Only provision a group if necessary; otherwise reuse the - # existing one. - if (( HOST_GID != CONT_GID )); then - docker exec -e HOST_GID -e CONT_GNAME ${CONTAINER_ID} sh -c 'sudo groupmod -g $HOST_GID $CONT_GNAME' - fi - - docker exec -e CONT_GNAME -e HOST_UID ${CONTAINER_ID} sh -c 'sudo usermod -a -G $CONT_GNAME -u $HOST_UID circleci' - fi - - # Run tests - test -d << parameters.cache_dir >> && docker cp << parameters.cache_dir >> ${CONTAINER_ID}:/tmp/gocache - docker exec ${CONTAINER_ID} sh -c 'mkdir -p /home/circleci/go/src/github.com/hashicorp/vault' - docker cp . ${CONTAINER_ID}:/home/circleci/go/src/github.com/hashicorp/vault/ - docker cp $DOCKER_CERT_PATH/ ${CONTAINER_ID}:$DOCKER_CERT_PATH - - # Copy the downloaded modules inside the container. - docker exec ${CONTAINER_ID} sh -c 'mkdir -p /home/circleci/go/pkg' - docker cp "$(go env GOPATH)/pkg/mod" ${CONTAINER_ID}:/home/circleci/go/pkg/mod - - docker exec -w /home/circleci/go/src/github.com/hashicorp/vault/ \ - -e CIRCLECI -e VAULT_CI_GO_TEST_RACE \ - -e GOCACHE=/tmp/gocache \ - -e GO_TAGS \ - -e GOPROXY="off" \ - -e VAULT_LICENSE_CI \ - -e GOARCH=<< parameters.arch >> \ - ${CONTAINER_ID} \ - gotestsum --format=short-verbose \ - --junitfile test-results/go-test/results.xml \ - --jsonfile test-results/go-test/results.json \ - -- \ - -tags "${GO_TAGS} ${EXTRA_TAGS}" \ - -timeout=60m \ - -parallel=20 \ - << parameters.extra_flags >> \ - ${all_package_names} - else - GOARCH=<< parameters.arch >> \ - GOCACHE=<< parameters.cache_dir >> \ - gotestsum --format=short-verbose \ - --junitfile test-results/go-test/results.xml \ - --jsonfile test-results/go-test/results.json \ - -- \ - -tags "${GO_TAGS} ${EXTRA_TAGS}" \ - -timeout=60m \ - -parallel=20 \ - << parameters.extra_flags >> \ - ${all_package_names} - fi - - - when: - condition: << parameters.use_docker >> - steps: - - run: - name: Copy test results - when: always - command: | - docker cp $(cat workspace/container_id):/home/circleci/go/src/github.com/hashicorp/vault/test-results . - docker cp $(cat workspace/container_id):/tmp/gocache << parameters.cache_dir >> - - when: - condition: << parameters.save_cache >> - steps: - - save_cache: - when: always - key: go-test-cache-date-v1-{{ checksum "/tmp/go-cache-key" }} - paths: - - << parameters.cache_dir >> diff --git a/.circleci/config/commands/setup-go.yml b/.circleci/config/commands/setup-go.yml deleted file mode 100644 index 5aec0087e9da..000000000000 --- a/.circleci/config/commands/setup-go.yml +++ /dev/null @@ -1,35 +0,0 @@ ---- -description: > - Ensure the right version of Go is installed and set GOPATH to $HOME/go. -parameters: - GOPROXY: - description: > - Set GOPROXY. By default this is set to "off" meaning you have to have all modules pre-downloaded. - type: string - default: "off" - GOPRIVATE: - description: Set GOPRIVATE, defaults to github.com/hashicorp/* - type: string - default: github.com/hashicorp/* -steps: - - run: - name: Setup Go - command: | - GO_VERSION=$(cat .go-version) - [ -n "$GO_VERSION" ] || { echo "You must set GO_VERSION"; exit 1; } - # Install Go - cd ~ - curl -sSLO "https://dl.google.com/go/go${GO_VERSION}.linux-amd64.tar.gz" - sudo rm -rf /usr/local/go - sudo tar -C /usr/local -xzf "go${GO_VERSION}.linux-amd64.tar.gz" - rm -f "go${GO_VERSION}.linux-amd64.tar.gz" - GOPATH="/home/circleci/go" - mkdir $GOPATH 2>/dev/null || { sudo mkdir $GOPATH && sudo chmod 777 $GOPATH; } - mkdir $GOPATH/bin 2>/dev/null || { sudo mkdir $GOPATH/bin && sudo chmod 777 $GOPATH/bin; } - echo "export GOPATH='$GOPATH'" >> "$BASH_ENV" - echo "export PATH='$PATH:$GOPATH/bin:/usr/local/go/bin'" >> "$BASH_ENV" - echo "export GOPROXY=<>" >> "$BASH_ENV" - echo "export GOPRIVATE=<>" >> "$BASH_ENV" - - echo "$ go version" - go version diff --git a/.circleci/config/executors/@executors.yml b/.circleci/config/executors/@executors.yml deleted file mode 100644 index 3cba4dbbb85c..000000000000 --- a/.circleci/config/executors/@executors.yml +++ /dev/null @@ -1,49 +0,0 @@ -references: - environment: &ENVIRONMENT - CIRCLECI_CLI_VERSION: 0.1.5546 # Pin CircleCI CLI to patch version (ex: 1.2.3) - GOTESTSUM_VERSION: 0.5.2 # Pin gotestsum to patch version (ex: 1.2.3) - GOFUMPT_VERSION: 0.3.1 # Pin gofumpt to patch version (ex: 1.2.3) - GO_TAGS: "" - GO_IMAGE: &GO_IMAGE "docker.mirror.hashicorp.services/cimg/go:1.20" -go-machine: - machine: - image: ubuntu-2004:2022.10.1 - environment: *ENVIRONMENT - shell: /usr/bin/env bash -euo pipefail -c - working_directory: /home/circleci/go/src/github.com/hashicorp/vault -node: - docker: - - image: docker.mirror.hashicorp.services/circleci/node:14-browsers - environment: - # See https://git.io/vdao3 for details. - JOBS: 2 - shell: /usr/bin/env bash -euo pipefail -c - working_directory: /home/circleci/go/src/github.com/hashicorp/vault -python: - docker: - - image: docker.mirror.hashicorp.services/python:3-alpine - shell: /usr/bin/env bash -euo pipefail -c - working_directory: /home/circleci/go/src/github.com/hashicorp/vault -semgrep: - docker: - - image: docker.mirror.hashicorp.services/returntocorp/semgrep:0.113.0 - shell: /bin/sh - working_directory: /home/circleci/go/src/github.com/hashicorp/vault -docker-env-go-test-remote-docker: - resource_class: medium - docker: - - image: *GO_IMAGE - environment: *ENVIRONMENT - working_directory: /home/circleci/go/src/github.com/hashicorp/vault -docker-env-go-test: - resource_class: large - docker: - - image: *GO_IMAGE - environment: *ENVIRONMENT - working_directory: /home/circleci/go/src/github.com/hashicorp/vault -docker-env-go-test-race: - resource_class: xlarge - docker: - - image: *GO_IMAGE - environment: *ENVIRONMENT - working_directory: /home/circleci/go/src/github.com/hashicorp/vault diff --git a/.circleci/config/jobs/build-go-dev.yml b/.circleci/config/jobs/build-go-dev.yml deleted file mode 100644 index cce6d3f61edd..000000000000 --- a/.circleci/config/jobs/build-go-dev.yml +++ /dev/null @@ -1,20 +0,0 @@ -executor: go-machine -steps: - - checkout - - setup-go - - restore_go_mod_cache - - attach_workspace: - at: . - - run: - name: Build dev binary - command: | - # Move dev UI assets to expected location - rm -rf ./pkg - mkdir ./pkg - - # Build dev binary - make ci-bootstrap dev - - persist_to_workspace: - root: . - paths: - - bin diff --git a/.circleci/config/jobs/fmt.yml b/.circleci/config/jobs/fmt.yml deleted file mode 100644 index 7d9a08dcebd1..000000000000 --- a/.circleci/config/jobs/fmt.yml +++ /dev/null @@ -1,17 +0,0 @@ -description: Ensure go formatting is correct. -executor: go-machine -steps: - - checkout - # Setup Go enabling the proxy for downloading modules. - - setup-go: - GOPROXY: https://proxy.golang.org,direct - - run: - name: make fmt - command: | - echo "Using gofumpt version ${GOFUMPT_VERSION}" - go install "mvdan.cc/gofumpt@v${GOFUMPT_VERSION}" - make fmt - if ! git diff --exit-code; then - echo "Code has formatting errors. Run 'make fmt' to fix" - exit 1 - fi diff --git a/.circleci/config/jobs/install-ui-dependencies.yml b/.circleci/config/jobs/install-ui-dependencies.yml deleted file mode 100644 index 845e0c7770e2..000000000000 --- a/.circleci/config/jobs/install-ui-dependencies.yml +++ /dev/null @@ -1,11 +0,0 @@ -executor: node -steps: - - checkout - - restore_yarn_cache - - run: - name: Install UI dependencies - command: | - cd ui - yarn install - npm rebuild node-sass - - save_yarn_cache diff --git a/.circleci/config/jobs/pre-flight-checks.yml b/.circleci/config/jobs/pre-flight-checks.yml deleted file mode 100644 index 924b451b51d2..000000000000 --- a/.circleci/config/jobs/pre-flight-checks.yml +++ /dev/null @@ -1,34 +0,0 @@ -description: Ensure nothing obvious is broken, and pre-cache Go modules. -executor: go-machine -steps: - - checkout - # Setup Go enabling the proxy for downloading modules. - - setup-go: - GOPROXY: https://proxy.golang.org,direct - - run: - name: Install CircleCI CLI - environment: - ARCH: linux_amd64 - BASE: https://github.com/CircleCI-Public/circleci-cli/releases/download - command: | - export CCI_PATH=/tmp/circleci-cli/$CIRCLECI_CLI_VERSION - mkdir -p $CCI_PATH - NAME=circleci-cli_${CIRCLECI_CLI_VERSION}_${ARCH} - URL=$BASE/v${CIRCLECI_CLI_VERSION}/${NAME}.tar.gz - curl -sSL $URL \ - | tar --overwrite --strip-components=1 -xz -C $CCI_PATH "${NAME}/circleci" - # Add circleci to the path for subsequent steps. - echo "export PATH=$CCI_PATH:\$PATH" >> $BASH_ENV - # Done, print some debug info. - set -x - . $BASH_ENV - which circleci - circleci version - - run: - name: Verify CircleCI - command: | - set -x - . $BASH_ENV - make ci-verify - - configure-git - - refresh_go_mod_cache diff --git a/.circleci/config/jobs/semgrep.yml b/.circleci/config/jobs/semgrep.yml deleted file mode 100644 index c5cf749e129d..000000000000 --- a/.circleci/config/jobs/semgrep.yml +++ /dev/null @@ -1,15 +0,0 @@ ---- -executor: semgrep -steps: - - checkout - - attach_workspace: - at: . - - run: - name: Run Semgrep Rules - command: | - # Alpine images can't run the make file due to a bash requirement. Run - # semgrep explicitly here. - export PATH="$HOME/.local/bin:$PATH" - echo -n 'Semgrep Version: ' - semgrep --version - semgrep --error --include '*.go' --exclude 'vendor' -f tools/semgrep/ci . diff --git a/.circleci/config/jobs/test-go-nightly.yml b/.circleci/config/jobs/test-go-nightly.yml deleted file mode 100644 index 502cdfa4e185..000000000000 --- a/.circleci/config/jobs/test-go-nightly.yml +++ /dev/null @@ -1,14 +0,0 @@ -executor: go-machine -steps: - - checkout - - setup-go - - restore_go_mod_cache - - go_test: - log_dir: "/tmp/testlogs" - save_cache: true - - store_artifacts: - path: test-results - - store_test_results: - path: test-results - - store_artifacts: - path: "/tmp/testlogs" diff --git a/.circleci/config/jobs/test-go-race-remote-docker.yml b/.circleci/config/jobs/test-go-race-remote-docker.yml deleted file mode 100644 index 6780c60366e4..000000000000 --- a/.circleci/config/jobs/test-go-race-remote-docker.yml +++ /dev/null @@ -1,18 +0,0 @@ -executor: docker-env-go-test-remote-docker -parallelism: 8 -steps: - - exit-if-ui-or-docs-branch - - checkout - - setup_remote_docker: - version: 20.10.17 - docker_layer_caching: true - - go_test: - extra_flags: "-race" - log_dir: "/tmp/testlogs" - use_docker: true - - store_artifacts: - path: test-results - - store_test_results: - path: test-results - - store_artifacts: - path: "/tmp/testlogs" diff --git a/.circleci/config/jobs/test-go-race.yml b/.circleci/config/jobs/test-go-race.yml deleted file mode 100644 index fcda05e9ceda..000000000000 --- a/.circleci/config/jobs/test-go-race.yml +++ /dev/null @@ -1,14 +0,0 @@ -executor: docker-env-go-test-race -parallelism: 8 -steps: - - exit-if-ui-or-docs-branch - - checkout - - go_test: - extra_flags: "-race" - log_dir: "/tmp/testlogs" - - store_artifacts: - path: test-results - - store_test_results: - path: test-results - - store_artifacts: - path: "/tmp/testlogs" diff --git a/.circleci/config/jobs/test-go-remote-docker.yml b/.circleci/config/jobs/test-go-remote-docker.yml deleted file mode 100644 index f51003f09445..000000000000 --- a/.circleci/config/jobs/test-go-remote-docker.yml +++ /dev/null @@ -1,17 +0,0 @@ -executor: docker-env-go-test-remote-docker -parallelism: 8 -steps: - - exit-if-ui-or-docs-branch - - checkout - - setup_remote_docker: - version: 20.10.17 - docker_layer_caching: true - - go_test: - log_dir: "/tmp/testlogs" - use_docker: true - - store_artifacts: - path: test-results - - store_test_results: - path: test-results - - store_artifacts: - path: "/tmp/testlogs" diff --git a/.circleci/config/jobs/test-go.yml b/.circleci/config/jobs/test-go.yml deleted file mode 100644 index c1674de870d2..000000000000 --- a/.circleci/config/jobs/test-go.yml +++ /dev/null @@ -1,13 +0,0 @@ -executor: docker-env-go-test -parallelism: 8 -steps: - - exit-if-ui-or-docs-branch - - checkout - - go_test: - log_dir: "/tmp/testlogs" - - store_artifacts: - path: test-results - - store_test_results: - path: test-results - - store_artifacts: - path: "/tmp/testlogs" diff --git a/.circleci/config/jobs/test-ui.yml b/.circleci/config/jobs/test-ui.yml deleted file mode 100644 index f2aa19b0508d..000000000000 --- a/.circleci/config/jobs/test-ui.yml +++ /dev/null @@ -1,22 +0,0 @@ -executor: node -resource_class: xlarge -steps: - - exit-if-branch-does-not-need-test-ui - - checkout - - restore_yarn_cache - - attach_workspace: - at: . - - run: - name: Test UI - command: | - # Add ./bin to the PATH so vault binary can be run by Ember tests - export PATH="${PWD}/bin:${PATH}" - - # Run Ember tests - cd ui - mkdir -p test-results/qunit - yarn test:oss - - store_artifacts: - path: ui/test-results - - store_test_results: - path: ui/test-results diff --git a/.circleci/config/workflows/ci.yml b/.circleci/config/workflows/ci.yml deleted file mode 100644 index 5e99293d7ea3..000000000000 --- a/.circleci/config/workflows/ci.yml +++ /dev/null @@ -1,35 +0,0 @@ -jobs: - - pre-flight-checks - - fmt - - install-ui-dependencies: - requires: - - pre-flight-checks - - build-go-dev: - requires: - - pre-flight-checks - - test-ui: - requires: - - install-ui-dependencies - - build-go-dev - # Only main, UI, release and merge branches need to run UI tests. - # We don't filter here however because test-ui is configured in github as - # required so it must run, instead we short-circuit within test-ui. - - test-go: - requires: - - pre-flight-checks - # We don't filter here because this is a required CI check; - # instead we short-circuit within the test command so it ends quickly. - - test-go-remote-docker: - requires: - - pre-flight-checks - # We don't filter here because this is a required CI check; - # instead we short-circuit within the test command so it ends quickly. - - test-go-race: - requires: - - pre-flight-checks - - test-go-race-remote-docker: - requires: - - pre-flight-checks - - semgrep: - requires: - - pre-flight-checks diff --git a/.copywrite.hcl b/.copywrite.hcl index c779cce68070..e3f120ea6d97 100644 --- a/.copywrite.hcl +++ b/.copywrite.hcl @@ -10,5 +10,6 @@ project { header_ignore = [ "builtin/credentials/aws/pkcs7/**", "ui/node_modules/**", + "enos/modules/k8s_deploy_vault/raft-config.hcl", ] } diff --git a/.github/actionlint.yaml b/.github/actionlint.yaml new file mode 100644 index 000000000000..335ce6d5aab0 --- /dev/null +++ b/.github/actionlint.yaml @@ -0,0 +1,10 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +self-hosted-runner: + # Labels of self-hosted runner in array of string + labels: + - small + - large + - ondemand + diff --git a/.github/dependabot.yml b/.github/dependabot.yml new file mode 100644 index 000000000000..81bae9acd600 --- /dev/null +++ b/.github/dependabot.yml @@ -0,0 +1,7 @@ +version: 2 + +updates: + - package-ecosystem: "github-actions" + directory: "/" + schedule: + interval: "daily" diff --git a/.github/enos-run-matrices/build-github-oss-linux-amd64-zip.json b/.github/enos-run-matrices/build-github-oss-linux-amd64-zip.json index ab09a413bad3..80b3d55212b2 100644 --- a/.github/enos-run-matrices/build-github-oss-linux-amd64-zip.json +++ b/.github/enos-run-matrices/build-github-oss-linux-amd64-zip.json @@ -2,7 +2,7 @@ "include": [ { "scenario": "smoke backend:raft consul_version:1.14.2 distro:ubuntu seal:shamir arch:amd64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-1", + "aws_region": "us-east-1", "test_group": 3 }, { @@ -12,7 +12,7 @@ }, { "scenario": "smoke backend:consul consul_version:1.14.2 distro:ubuntu seal:shamir arch:amd64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-1", + "aws_region": "us-east-1", "test_group": 1 }, { @@ -22,7 +22,7 @@ }, { "scenario": "smoke backend:consul consul_version:1.12.7 distro:ubuntu seal:shamir arch:amd64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-1", + "aws_region": "us-east-1", "test_group": 2 }, { @@ -32,7 +32,7 @@ }, { "scenario": "upgrade backend:raft consul_version:1.14.2 distro:ubuntu seal:shamir arch:amd64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-1", + "aws_region": "us-east-1", "test_group": 5 }, { @@ -42,7 +42,7 @@ }, { "scenario": "upgrade backend:consul consul_version:1.13.4 distro:ubuntu seal:shamir arch:amd64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-1", + "aws_region": "us-east-1", "test_group": 2 }, { diff --git a/.github/enos-run-matrices/build-github-oss-linux-arm64-zip.json b/.github/enos-run-matrices/build-github-oss-linux-arm64-zip.json index ec951fdd0a18..a497fb0ebe00 100644 --- a/.github/enos-run-matrices/build-github-oss-linux-arm64-zip.json +++ b/.github/enos-run-matrices/build-github-oss-linux-arm64-zip.json @@ -7,7 +7,7 @@ }, { "scenario": "smoke backend:raft consul_version:1.14.2 distro:ubuntu seal:awskms arch:arm64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-1", + "aws_region": "us-east-1", "test_group": 2 }, { @@ -17,7 +17,7 @@ }, { "scenario": "smoke backend:consul consul_version:1.14.2 distro:ubuntu seal:shamir arch:arm64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-1", + "aws_region": "us-east-1", "test_group": 4 }, { @@ -27,7 +27,7 @@ }, { "scenario": "upgrade backend:raft consul_version:1.14.2 distro:ubuntu seal:shamir arch:arm64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-1", + "aws_region": "us-east-1", "test_group": 1 }, { @@ -37,7 +37,7 @@ }, { "scenario": "upgrade backend:consul consul_version:1.12.7 distro:rhel seal:awskms arch:arm64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-1", + "aws_region": "us-east-1", "test_group": 3 }, { @@ -47,7 +47,7 @@ }, { "scenario": "upgrade backend:consul consul_version:1.14.2 distro:rhel seal:awskms arch:arm64 artifact_source:crt edition:oss artifact_type:bundle", - "aws_region": "us-west-1", + "aws_region": "us-east-1", "test_group": 5 } ] diff --git a/.github/enos-run-matrices/enos_release_testing_oss-artifactory-oss-linux-amd64-zip.json b/.github/enos-run-matrices/enos_release_testing_oss-artifactory-oss-linux-amd64-zip.json index 70e5ea1c3c24..857677b72f07 100644 --- a/.github/enos-run-matrices/enos_release_testing_oss-artifactory-oss-linux-amd64-zip.json +++ b/.github/enos-run-matrices/enos_release_testing_oss-artifactory-oss-linux-amd64-zip.json @@ -2,7 +2,7 @@ "include": [ { "scenario": "smoke backend:raft consul_version:1.14.2 distro:ubuntu seal:shamir arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-1", + "aws_region": "us-east-1", "test_group": 2 }, { @@ -12,7 +12,7 @@ }, { "scenario": "smoke backend:consul consul_version:1.14.2 distro:ubuntu seal:shamir arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-1", + "aws_region": "us-east-1", "test_group": 2 }, { @@ -22,7 +22,7 @@ }, { "scenario": "smoke backend:consul consul_version:1.12.7 distro:ubuntu seal:shamir arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-1", + "aws_region": "us-east-1", "test_group": 2 }, { @@ -32,7 +32,7 @@ }, { "scenario": "upgrade backend:raft consul_version:1.14.2 distro:ubuntu seal:shamir arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-1", + "aws_region": "us-east-1", "test_group": 2 }, { @@ -42,7 +42,7 @@ }, { "scenario": "upgrade backend:consul consul_version:1.13.4 distro:ubuntu seal:shamir arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-1", + "aws_region": "us-east-1", "test_group": 2 }, { diff --git a/.github/enos-run-matrices/enos_release_testing_oss-artifactory-oss-linux-arm64-zip.json b/.github/enos-run-matrices/enos_release_testing_oss-artifactory-oss-linux-arm64-zip.json index e6e9edb10f28..1c67cd3bcfdb 100644 --- a/.github/enos-run-matrices/enos_release_testing_oss-artifactory-oss-linux-arm64-zip.json +++ b/.github/enos-run-matrices/enos_release_testing_oss-artifactory-oss-linux-arm64-zip.json @@ -7,17 +7,17 @@ }, { "scenario": "smoke backend:raft consul_version:1.14.2 distro:ubuntu seal:awskms arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-1", + "aws_region": "us-east-1", "test_group": 2 }, { "scenario": "smoke backend:consul consul_version:1.12.7 distro:ubuntu seal:shamir arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-1", + "aws_region": "us-east-1", "test_group": 1 }, { "scenario": "smoke backend:consul consul_version:1.14.2 distro:ubuntu seal:shamir arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-1", + "aws_region": "us-east-1", "test_group": 2 }, { @@ -27,7 +27,7 @@ }, { "scenario": "upgrade backend:raft consul_version:1.14.2 distro:ubuntu seal:shamir arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-1", + "aws_region": "us-east-1", "test_group": 2 }, { @@ -42,7 +42,7 @@ }, { "scenario": "upgrade backend:consul consul_version:1.13.4 distro:ubuntu seal:shamir arch:amd64 artifact_source:artifactory edition:oss artifact_type:bundle", - "aws_region": "us-west-1", + "aws_region": "us-east-1", "test_group": 1 }, { diff --git a/.github/scripts/generate-test-package-lists.sh b/.github/scripts/generate-test-package-lists.sh new file mode 100755 index 000000000000..493a92c8c87c --- /dev/null +++ b/.github/scripts/generate-test-package-lists.sh @@ -0,0 +1,283 @@ +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +# This script is meant to be sourced into the shell running in a Github +# workflow. + +# This script is a temporary measure until we implement a dynamic test-splitting +# solution. It distributes the entire set of test packages into 16 sublists, +# which should roughly take an equal amount of time to complete. + +test_packages=() + +base="github.com/hashicorp/vault" + +# Total time: 526 +test_packages[1]+=" $base/api" +test_packages[1]+=" $base/command" +test_packages[1]+=" $base/sdk/helper/keysutil" + +# Total time: 1160 +test_packages[2]+=" $base/sdk/helper/ocsp" +if [ "${ENTERPRISE:+x}" == "x" ] ; then + test_packages[2]+=" $base/vault/external_tests/replication-perf" +fi + +# Total time: 1009 +test_packages[3]+=" $base/builtin/credential/approle" +test_packages[3]+=" $base/command/agent/sink/file" +test_packages[3]+=" $base/command/agent/template" +test_packages[3]+=" $base/helper/random" +test_packages[3]+=" $base/helper/storagepacker" +test_packages[3]+=" $base/sdk/helper/certutil" +if [ "${ENTERPRISE:+x}" == "x" ] ; then + test_packages[3]+=" $base/vault/external_tests/entropy" +fi +test_packages[3]+=" $base/vault/external_tests/raft" + +# Total time: 830 +test_packages[4]+=" $base/builtin/plugin" +if [ "${ENTERPRISE:+x}" == "x" ] ; then + test_packages[4]+=" $base/enthelpers/fsm" +fi +test_packages[4]+=" $base/http" +test_packages[4]+=" $base/sdk/helper/pluginutil" +test_packages[4]+=" $base/serviceregistration/kubernetes" +test_packages[4]+=" $base/tools/godoctests/pkg/analyzer" +if [ "${ENTERPRISE:+x}" == "x" ] ; then + test_packages[4]+=" $base/vault/external_tests/apilock" + test_packages[4]+=" $base/vault/external_tests/filteredpaths" + test_packages[4]+=" $base/vault/external_tests/perfstandby" + test_packages[4]+=" $base/vault/external_tests/replication-dr" +fi + + +# Total time: 258 +test_packages[5]+=" $base/builtin/credential/aws" +test_packages[5]+=" $base/builtin/credential/cert" +test_packages[5]+=" $base/builtin/logical/aws" +if [ "${ENTERPRISE:+x}" == "x" ] ; then + test_packages[5]+=" $base/enthelpers/logshipper" + test_packages[5]+=" $base/enthelpers/merkle" +fi +test_packages[5]+=" $base/helper/hostutil" +test_packages[5]+=" $base/helper/pgpkeys" +test_packages[5]+=" $base/sdk/physical/inmem" +test_packages[5]+=" $base/vault/activity" +test_packages[5]+=" $base/vault/diagnose" +test_packages[5]+=" $base/vault/external_tests/pprof" +if [ "${ENTERPRISE:+x}" == "x" ] ; then + test_packages[5]+=" $base/vault/external_tests/resolver" +fi +test_packages[5]+=" $base/vault/external_tests/response" +if [ "${ENTERPRISE:+x}" == "x" ] ; then + test_packages[5]+=" $base/vault/external_tests/seal" +fi +test_packages[5]+=" $base/vault/external_tests/sealmigration" +if [ "${ENTERPRISE:+x}" == "x" ] ; then + test_packages[5]+=" $base/vault/external_tests/transform" +fi + +# Total time: 588 +test_packages[6]+=" $base" +test_packages[6]+=" $base/audit" +test_packages[6]+=" $base/builtin/audit/file" +test_packages[6]+=" $base/builtin/credential/github" +test_packages[6]+=" $base/builtin/credential/okta" +test_packages[6]+=" $base/builtin/logical/database/dbplugin" +test_packages[6]+=" $base/command/agent/auth/cert" +test_packages[6]+=" $base/command/agent/auth/jwt" +test_packages[6]+=" $base/command/agent/auth/kerberos" +test_packages[6]+=" $base/command/agent/auth/kubernetes" +test_packages[6]+=" $base/command/agent/auth/token-file" +test_packages[6]+=" $base/command/agent/cache" +test_packages[6]+=" $base/command/agent/cache/cacheboltdb" +test_packages[6]+=" $base/command/agent/cache/cachememdb" +test_packages[6]+=" $base/command/agent/cache/keymanager" +test_packages[6]+=" $base/command/agent/config" +test_packages[6]+=" $base/command/config" +test_packages[6]+=" $base/command/token" +if [ "${ENTERPRISE:+x}" == "x" ] ; then + test_packages[6]+=" $base/enthelpers/namespace" + test_packages[6]+=" $base/enthelpers/replicatedpaths" + test_packages[6]+=" $base/enthelpers/sealrewrap" +fi +test_packages[6]+=" $base/helper/builtinplugins" +test_packages[6]+=" $base/helper/dhutil" +test_packages[6]+=" $base/helper/fairshare" +test_packages[6]+=" $base/helper/flag-kv" +test_packages[6]+=" $base/helper/flag-slice" +test_packages[6]+=" $base/helper/forwarding" +test_packages[6]+=" $base/helper/logging" +test_packages[6]+=" $base/helper/metricsutil" +test_packages[6]+=" $base/helper/namespace" +test_packages[6]+=" $base/helper/osutil" +test_packages[6]+=" $base/helper/parseip" +test_packages[6]+=" $base/helper/policies" +test_packages[6]+=" $base/helper/testhelpers/logical" +test_packages[6]+=" $base/helper/timeutil" +test_packages[6]+=" $base/helper/useragent" +test_packages[6]+=" $base/helper/versions" +test_packages[6]+=" $base/internalshared/configutil" +test_packages[6]+=" $base/internalshared/listenerutil" +test_packages[6]+=" $base/physical/alicloudoss" +test_packages[6]+=" $base/physical/gcs" +test_packages[6]+=" $base/physical/manta" +test_packages[6]+=" $base/physical/mssql" +test_packages[6]+=" $base/physical/oci" +test_packages[6]+=" $base/physical/s3" +test_packages[6]+=" $base/physical/spanner" +test_packages[6]+=" $base/physical/swift" +test_packages[6]+=" $base/physical/zookeeper" +test_packages[6]+=" $base/plugins/database/hana" +test_packages[6]+=" $base/plugins/database/redshift" +test_packages[6]+=" $base/sdk/database/dbplugin/v5" +test_packages[6]+=" $base/sdk/database/helper/credsutil" +test_packages[6]+=" $base/sdk/helper/authmetadata" +test_packages[6]+=" $base/sdk/helper/compressutil" +test_packages[6]+=" $base/sdk/helper/cryptoutil" +test_packages[6]+=" $base/sdk/helper/identitytpl" +test_packages[6]+=" $base/sdk/helper/kdf" +test_packages[6]+=" $base/sdk/helper/locksutil" +test_packages[6]+=" $base/sdk/helper/pathmanager" +test_packages[6]+=" $base/sdk/helper/roottoken" +test_packages[6]+=" $base/sdk/helper/testhelpers/schema" +test_packages[6]+=" $base/sdk/helper/xor" +test_packages[6]+=" $base/sdk/physical/file" +test_packages[6]+=" $base/sdk/plugin/pb" +test_packages[6]+=" $base/serviceregistration/kubernetes/client" +test_packages[6]+=" $base/shamir" +test_packages[6]+=" $base/vault/cluster" +test_packages[6]+=" $base/vault/eventbus" +test_packages[6]+=" $base/vault/external_tests/api" +if [ "${ENTERPRISE:+x}" == "x" ] ; then + test_packages[6]+=" $base/vault/external_tests/consistencyheaders" +fi +test_packages[6]+=" $base/vault/external_tests/expiration" +test_packages[6]+=" $base/vault/external_tests/hcp_link" +test_packages[6]+=" $base/vault/external_tests/kv" +if [ "${ENTERPRISE:+x}" == "x" ] ; then + test_packages[6]+=" $base/vault/external_tests/plugins" +fi +test_packages[6]+=" $base/vault/external_tests/quotas" +test_packages[6]+=" $base/vault/seal" + +# Total time: 389 +test_packages[7]+=" $base/builtin/credential/userpass" +test_packages[7]+=" $base/builtin/logical/pki" +test_packages[7]+=" $base/builtin/logical/transit" +test_packages[7]+=" $base/command/agent" +test_packages[7]+=" $base/helper/monitor" +test_packages[7]+=" $base/sdk/database/helper/connutil" +test_packages[7]+=" $base/sdk/database/helper/dbutil" +test_packages[7]+=" $base/sdk/helper/cidrutil" +test_packages[7]+=" $base/sdk/helper/custommetadata" +test_packages[7]+=" $base/sdk/helper/jsonutil" +test_packages[7]+=" $base/sdk/helper/ldaputil" +test_packages[7]+=" $base/sdk/helper/logging" +test_packages[7]+=" $base/sdk/helper/policyutil" +test_packages[7]+=" $base/sdk/helper/salt" +test_packages[7]+=" $base/sdk/helper/template" +test_packages[7]+=" $base/sdk/helper/useragent" +test_packages[7]+=" $base/sdk/logical" +test_packages[7]+=" $base/sdk/plugin/mock" +test_packages[7]+=" $base/sdk/queue" +if [ "${ENTERPRISE:+x}" == "x" ] ; then + test_packages[7]+=" $base/vault/autosnapshots" + test_packages[7]+=" $base/vault/external_tests/activity" +fi +test_packages[7]+=" $base/vault/external_tests/approle" +if [ "${ENTERPRISE:+x}" == "x" ] ; then + test_packages[7]+=" $base/vault/external_tests/kmip" +fi +test_packages[7]+=" $base/vault/external_tests/mfa" +test_packages[7]+=" $base/vault/external_tests/misc" +test_packages[7]+=" $base/vault/quotas" + +# Total time: 779 +test_packages[8]+=" $base/builtin/credential/aws/pkcs7" +test_packages[8]+=" $base/builtin/logical/totp" +test_packages[8]+=" $base/command/agent/auth" +test_packages[8]+=" $base/physical/raft" +test_packages[8]+=" $base/sdk/framework" +test_packages[8]+=" $base/sdk/plugin" +test_packages[8]+=" $base/vault" +if [ "${ENTERPRISE:+x}" == "x" ] ; then + test_packages[8]+=" $base/vault/external_tests/barrier" + test_packages[8]+=" $base/vault/external_tests/cubbyholes" +fi +test_packages[8]+=" $base/vault/external_tests/metrics" +if [ "${ENTERPRISE:+x}" == "x" ] ; then + test_packages[8]+=" $base/vault/external_tests/replication" +fi +test_packages[8]+=" $base/vault/external_tests/router" +if [ "${ENTERPRISE:+x}" == "x" ] ; then + test_packages[8]+=" $base/vault/external_tests/system" + test_packages[8]+=" $base/vault/managed_key" +fi + +# Total time: 310 +test_packages[9]+=" $base/vault/hcp_link/capabilities/api_capability" +test_packages[9]+=" $base/vault/external_tests/plugin" + +# Total time: 925 +test_packages[10]+=" $base/builtin/credential/ldap" +test_packages[10]+=" $base/builtin/logical/database" +test_packages[10]+=" $base/physical/etcd" +test_packages[10]+=" $base/physical/postgresql" + +# Total time: 851 +test_packages[11]+=" $base/builtin/logical/rabbitmq" +test_packages[11]+=" $base/physical/dynamodb" +test_packages[11]+=" $base/plugins/database/influxdb" +test_packages[11]+=" $base/vault/external_tests/identity" +test_packages[11]+=" $base/vault/external_tests/token" + +# Total time: 340 +test_packages[12]+=" $base/builtin/logical/consul" +test_packages[12]+=" $base/physical/couchdb" +test_packages[12]+=" $base/plugins/database/mongodb" +test_packages[12]+=" $base/plugins/database/mssql" +test_packages[12]+=" $base/plugins/database/mysql" + +# Total time: 704 +test_packages[13]+=" $base/builtin/logical/pkiext" +test_packages[13]+=" $base/command/server" +test_packages[13]+=" $base/physical/aerospike" +test_packages[13]+=" $base/physical/cockroachdb" +test_packages[13]+=" $base/plugins/database/postgresql" +if [ "${ENTERPRISE:+x}" == "x" ] ; then + test_packages[13]+=" $base/vault/external_tests/filteredpathsext" +fi +test_packages[13]+=" $base/vault/external_tests/policy" + +# Total time: 374 +test_packages[14]+=" $base/builtin/credential/radius" +test_packages[14]+=" $base/builtin/logical/ssh" +if [ "${ENTERPRISE:+x}" == "x" ] ; then + test_packages[14]+=" $base/enthelpers/wal" +fi +test_packages[14]+=" $base/physical/azure" +test_packages[14]+=" $base/serviceregistration/consul" +if [ "${ENTERPRISE:+x}" == "x" ] ; then + test_packages[14]+=" $base/vault/external_tests/quotas-docker" +fi +test_packages[14]+=" $base/vault/external_tests/raftha" + +# Total time: 362 +test_packages[15]+=" $base/builtin/logical/nomad" +test_packages[15]+=" $base/physical/mysql" +test_packages[15]+=" $base/plugins/database/cassandra" +if [ "${ENTERPRISE:+x}" == "x" ] ; then + test_packages[15]+=" $base/vault/external_tests/namespaces" +fi +test_packages[15]+=" $base/vault/external_tests/sealmigrationext" + +# Total time: 635 +test_packages[16]+=" $base/physical/cassandra" +test_packages[16]+=" $base/physical/consul" +if [ "${ENTERPRISE:+x}" == "x" ] ; then + test_packages[16]+=" $base/vault/external_tests/autosnapshots" + test_packages[16]+=" $base/vault/external_tests/replicationext" + test_packages[16]+=" $base/vault/external_tests/sealext" +fi diff --git a/.github/scripts/test-generate-test-package-lists.sh b/.github/scripts/test-generate-test-package-lists.sh new file mode 100755 index 000000000000..c3d1cb60670b --- /dev/null +++ b/.github/scripts/test-generate-test-package-lists.sh @@ -0,0 +1,75 @@ +#!/bin/bash +# Copyright (c) HashiCorp, Inc. +# SPDX-License-Identifier: MPL-2.0 + +set -e${DEBUG+x}o pipefail + +# +# This script is run to make sure that every package returned by +# go list -test ./... (when run from the repo root, api/, and sdk/ directory) +# appear in the test_packages array defined in the sibling file +# generate-test-package-lists.sh +# +# This script is executed as part of the ci workflow triggered by pull_requests +# events. In the event that the job that runs this script fails, examine the +# output of the 'test' step in that job to obtain the list of test packages that +# are missing in the test_packages array or that should be removed from it. +# + +dir=$( cd -- "$( dirname -- "${BASH_SOURCE[0]}" )" &> /dev/null && pwd ) + +source generate-test-package-lists.sh + +get_module_packages() { + local package_list=($(go list -test -json ./... | jq -r '.ForTest | select(.!=null)' | grep -v vault/integ | grep '^github.com/hashicorp/')) + + for package in "${package_list[@]}" ; do + # Check if the current package already exists in all_packages + if ! grep "\b$package\b" <<< "${all_packages[@]}" &> /dev/null; then + all_packages+=($package) + fi + done +} + +find_packages() { + for package in "${all_packages[@]}" ; do + if ! grep "\b${package}\b" <<< "${test_packages[@]}" &> /dev/null ; then + echo "Error: package ${package} is not present in test_packages" + exit 1 + fi + done +} + +count_test_packages() { + count=0 + for test_package in "${test_packages[@]}" ; do + count=$((${count}+$(wc -w <<< "${test_package}"))) + done + + echo $count +} + +all_packages=() + +cd "$dir/../.." +get_module_packages + +cd "$dir/../../sdk" +get_module_packages + +cd "$dir/../../api" +get_module_packages + +find_packages + +test_package_count=$(count_test_packages) +if (( ${#all_packages[@]} != $test_package_count )) ; then + echo "Error: there are currently ${#all_packages[@]} packages in the repository but $test_package_count packages in test_packages" + + unused_packages="${test_packages[@]} " + for ap in ${all_packages[@]} ; do + unused_packages="$(echo "$unused_packages" | sed -r "s~$ap ~ ~" )" + done + + echo "Packages in test_packages that aren't used: ${unused_packages// /}" +fi diff --git a/.github/scripts/verify_changes.sh b/.github/scripts/verify_changes.sh new file mode 100755 index 000000000000..81f3b688eb5b --- /dev/null +++ b/.github/scripts/verify_changes.sh @@ -0,0 +1,33 @@ +#!/bin/bash +# This script validates if the git diff contains on docs changes + +event_type=$1 # GH event type (pull_request) +ref_name=$2 # branch reference that triggered the workflow +head_ref=$3 # PR branch head ref +base_ref=$4 # PR branch base ref + +changed_dir="" + +if [[ "$event_type" == "pull_request" ]]; then + git fetch --no-tags --prune origin $head_ref + git fetch --no-tags --prune origin $base_ref + head_commit="origin/$head_ref" + base_commit="origin/$base_ref" +else + git fetch --no-tags --prune origin $ref_name + head_commit=$(git log origin/$ref_name --oneline | head -1 | awk '{print $1}') + base_commit=$(git log origin/$ref_name --oneline | head -2 | awk 'NR==2 {print $1}') +fi + +# git diff with ... shows the differences count between base_commit and head_commit starting at the last common commit +change_count=$(git diff $base_commit...$head_commit --name-only | awk -F"/" '{ print $1}' | uniq | wc -l) + +if [[ $change_count -eq 1 ]]; then + changed_dir=$(git diff $base_commit...$head_commit --name-only | awk -F"/" '{ print $1}' | uniq) +fi + +if [[ "$changed_dir" == "website" ]]; then + echo "is_docs_change=true" >> "$GITHUB_OUTPUT" +else + echo "is_docs_change=false" >> "$GITHUB_OUTPUT" +fi diff --git a/.github/workflows/actionlint.yml b/.github/workflows/actionlint.yml new file mode 100644 index 000000000000..abe7e7237b81 --- /dev/null +++ b/.github/workflows/actionlint.yml @@ -0,0 +1,15 @@ + +name: Lint GitHub Actions Workflows +on: + push: + pull_request: + paths: + - '.github/**' + +jobs: + actionlint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c + - name: "Check workflow files" + uses: docker://docker.mirror.hashicorp.services/rhysd/actionlint@sha256:93834930f56ca380be3e9a3377670d7aa5921be251b9c774891a39b3629b83b8 diff --git a/.github/workflows/build-vault-oss.yml b/.github/workflows/build-vault-oss.yml index c7d8dc1e6d7a..0ba21db59999 100644 --- a/.github/workflows/build-vault-oss.yml +++ b/.github/workflows/build-vault-oss.yml @@ -63,7 +63,7 @@ jobs: env: GOARCH: ${{ inputs.goarch }} GOOS: ${{ inputs.goos }} - run: echo "ARTIFACT_BASENAME=$(make ci-get-artifact-basename)" >> $GITHUB_ENV + run: echo "ARTIFACT_BASENAME=$(make ci-get-artifact-basename)" >> "$GITHUB_ENV" - name: Bundle Vault env: BUNDLE_PATH: out/${{ env.ARTIFACT_BASENAME }}.zip @@ -93,8 +93,8 @@ jobs: - if: ${{ inputs.create-packages }} name: Determine package file names run: | - echo "RPM_PACKAGE=$(basename out/*.rpm)" >> $GITHUB_ENV - echo "DEB_PACKAGE=$(basename out/*.deb)" >> $GITHUB_ENV + echo "RPM_PACKAGE=$(basename out/*.rpm)" >> "$GITHUB_ENV" + echo "DEB_PACKAGE=$(basename out/*.deb)" >> "$GITHUB_ENV" - if: ${{ inputs.create-packages }} uses: actions/upload-artifact@v3 with: diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 0561661b0969..406b6debf85b 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -10,8 +10,29 @@ on: - release/** jobs: + # verify-changes determines if the changes are only for docs (website) + verify-changes: + runs-on: ubuntu-latest + outputs: + is_docs_change: ${{ steps.get-changeddir.outputs.is_docs_change }} + steps: + - uses: actions/checkout@v3 + - name: Get changed directories + id: get-changeddir + env: + TYPE: ${{ github.event_name }} + REF_NAME: ${{ github.ref_name }} + HEAD_REF: ${{ github.head_ref }} + BASE: ${{ github.base_ref }} + run: ./.github/scripts/verify_changes.sh ${{ env.TYPE }} ${{ env.REF_NAME }} ${{ env.HEAD_REF }} ${{ env.BASE }} + product-metadata: + # do not run build and test steps for docs changes + # Following https://docs.github.com/en/repositories/configuring-branches-and-merges-in-your-repository/managing-protected-branches/troubleshooting-required-status-checks#handling-skipped-but-required-checks + # we conditionally skip the build and tests for docs(website) changes + if: ${{ needs.verify-changes.outputs.is_docs_change == 'false' }} runs-on: ubuntu-latest + needs: verify-changes outputs: build-date: ${{ steps.get-metadata.outputs.build-date }} filepath: ${{ steps.generate-metadata-file.outputs.filepath }} @@ -31,13 +52,14 @@ jobs: # enos-run-matrices. MATRIX_MAX_TEST_GROUPS: 5 run: | - echo "build-date=$(make ci-get-date)" >> $GITHUB_OUTPUT - echo "go-version=$(cat ./.go-version)" >> $GITHUB_OUTPUT - echo "matrix-test-group=$(make ci-get-matrix-group-id)" >> $GITHUB_OUTPUT - echo "package-name=vault" >> $GITHUB_OUTPUT - echo "vault-base-version=$(make ci-get-version-base)" >> $GITHUB_OUTPUT - echo "vault-revision=$(make ci-get-revision)" >> $GITHUB_OUTPUT - echo "vault-version=$(make ci-get-version)" >> $GITHUB_OUTPUT + # shellcheck disable=SC2129 + echo "build-date=$(make ci-get-date)" >> "$GITHUB_OUTPUT" + echo "go-version=$(cat ./.go-version)" >> "$GITHUB_OUTPUT" + echo "matrix-test-group=$(make ci-get-matrix-group-id)" >> "$GITHUB_OUTPUT" + echo "package-name=vault" >> "$GITHUB_OUTPUT" + echo "vault-base-version=$(make ci-get-version-base)" >> "$GITHUB_OUTPUT" + echo "vault-revision=$(make ci-get-revision)" >> "$GITHUB_OUTPUT" + echo "vault-version=$(make ci-get-version)" >> "$GITHUB_OUTPUT" - uses: hashicorp/actions-generate-metadata@v1 id: generate-metadata-file with: @@ -154,13 +176,6 @@ jobs: test: name: Test ${{ matrix.build-artifact-name }} - # Only run the Enos workflow against branches that are created from the - # hashicorp/vault repository. This has the effect of limiting execution of - # Enos scenarios to branches that originate from authors that have write - # access to hashicorp/vault repository. This is required as Github Actions - # will not populate the required secrets for branches created by outside - # contributors in order to protect the secrets integrity. - if: "! github.event.pull_request.head.repo.fork" needs: - product-metadata - build-linux @@ -184,13 +199,6 @@ jobs: test-docker-k8s: name: Test Docker K8s - # Only run the Enos workflow against branches that are created from the - # hashicorp/vault repository. This has the effect of limiting execution of - # Enos scenarios to branches that originate from authors that have write - # access to hashicorp/vault repository. This is required as Github Actions - # will not populate the required secrets for branches created by outside - # contributors in order to protect the secrets integrity. - if: "! github.event.pull_request.head.repo.fork" needs: - product-metadata - build-docker diff --git a/.github/workflows/changelog-checker.yml b/.github/workflows/changelog-checker.yml index d8a380270b26..3811a767fb18 100644 --- a/.github/workflows/changelog-checker.yml +++ b/.github/workflows/changelog-checker.yml @@ -56,9 +56,9 @@ jobs: # Else, we found some toolchain files. Let's make sure the contents are correct. if ! grep -q 'release-note:change' "$toolchain_files" || ! grep -q '^core: Bump Go version to' "$toolchain_files"; then echo "Invalid format for changelog. Expected format:" - echo "```release-note:change" + echo '```release-note:change' echo "core: Bump Go version to x.y.z." - echo "```" + echo '```' exit 1 else echo "Found Go toolchain changelog entry in PR!" diff --git a/.github/workflows/check-legacy-links-format.yml b/.github/workflows/check-legacy-links-format.yml deleted file mode 100644 index 1330579dc0d4..000000000000 --- a/.github/workflows/check-legacy-links-format.yml +++ /dev/null @@ -1,17 +0,0 @@ -name: Legacy Link Format Checker - -on: - push: - paths: - - "website/content/**/*.mdx" - - "website/data/*-nav-data.json" - -jobs: - check-links: - uses: hashicorp/dev-portal/.github/workflows/docs-content-check-legacy-links-format.yml@475289345d312552b745224b46895f51cc5fc490 - with: - repo-owner: "hashicorp" - repo-name: "vault" - commit-sha: ${{ github.sha }} - mdx-directory: "website/content" - nav-data-directory: "website/data" diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml new file mode 100644 index 000000000000..81d9307ae6ee --- /dev/null +++ b/.github/workflows/ci.yml @@ -0,0 +1,290 @@ +name: CI +on: + pull_request: + push: + branches: + - "main" + workflow_dispatch: + +jobs: + setup: + name: Setup + runs-on: ubuntu-latest + outputs: + compute-tiny: ${{ steps.setup-outputs.outputs.compute-tiny }} + compute-standard: ${{ steps.setup-outputs.outputs.compute-standard }} + compute-larger: ${{ steps.setup-outputs.outputs.compute-larger }} + compute-huge: ${{ steps.setup-outputs.outputs.compute-huge }} + enterprise: ${{ steps.setup-outputs.outputs.enterprise }} + go-build-tags: ${{ steps.setup-outputs.outputs.go-build-tags }} + steps: + - id: setup-outputs + name: Setup outputs + run: | + github_repository="${{ github.repository }}" + + if [ "${github_repository##*/}" == "vault-enterprise" ] ; then + # shellcheck disable=SC2129 + echo 'compute-tiny=["self-hosted","ondemand","linux","type=m5.large"]' >> "$GITHUB_OUTPUT" + echo 'compute-standard=["self-hosted","ondemand","linux","type=m5.xlarge"]' >> "$GITHUB_OUTPUT" + echo 'compute-larger=["self-hosted","ondemand","linux","type=m5.2xlarge"]' >> "$GITHUB_OUTPUT" + echo 'compute-huge=["self-hosted","ondemand","linux","type=m5.4xlarge"]' >> "$GITHUB_OUTPUT" + echo 'enterprise=1' >> "$GITHUB_OUTPUT" + echo 'go-build-tags=ent,enterprise' >> "$GITHUB_OUTPUT" + else + # shellcheck disable=SC2129 + echo 'compute-tiny="ubuntu-latest"' >> "$GITHUB_OUTPUT" # 2 cores, 7 GB RAM, 14 GB SSD + echo 'compute-standard="custom-linux-small-vault-latest"' >> "$GITHUB_OUTPUT" # 8 cores, 32 GB RAM, 300 GB SSD + echo 'compute-larger="custom-linux-medium-vault-latest"' >> "$GITHUB_OUTPUT" # 16 cores, 64 GB RAM, 600 GB SSD + echo 'compute-huge="custom-linux-xl-vault-latest"' >> "$GITHUB_OUTPUT" # 32-cores, 128 GB RAM, 1200 GB SSD + echo 'enterprise=' >> "$GITHUB_OUTPUT" + echo 'go-build-tags=' >> "$GITHUB_OUTPUT" + fi + semgrep: + name: Semgrep + needs: + - setup + runs-on: ${{ fromJSON(needs.setup.outputs.compute-tiny) }} + container: + image: returntocorp/semgrep@sha256:ffc6f3567654f9431456d49fd059dfe548f007c494a7eb6cd5a1a3e50d813fb3 + steps: + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c + - name: Run Semgrep Rules + id: semgrep + run: semgrep ci --include '*.go' --config 'tools/semgrep/ci' + setup-go-cache: + name: Go Caches + needs: + - setup + uses: ./.github/workflows/setup-go-cache.yml + with: + runs-on: ${{ needs.setup.outputs.compute-standard }} + secrets: inherit + fmt: + name: Check Format + needs: + - setup + runs-on: ${{ fromJSON(needs.setup.outputs.compute-tiny) }} + steps: + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c + - uses: actions/setup-go@d0a58c1c4d2b25278816e339b944508c875f3613 + with: + go-version-file: ./.go-version + cache: true + - id: format + run: | + echo "Using gofumpt version $(go run mvdan.cc/gofumpt -version)" + make fmt + if ! git diff --exit-code; then + echo "Code has formatting errors. Run 'make fmt' to fix" + exit 1 + fi + diff-oss-ci: + name: Diff OSS + needs: + - setup + if: ${{ needs.setup.outputs.enterprise != '' && github.base_ref != '' }} + runs-on: ${{ fromJSON(needs.setup.outputs.compute-tiny) }} + steps: + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c + with: + fetch-depth: 0 + - id: determine-branch + run: | + branch="${{ github.base_ref }}" + + if [[ $branch = release/* ]] ; then + branch=${branch%%+ent} + + # Add OSS remote + git config --global user.email "github-team-secret-vault-core@hashicorp.com" + git config --global user.name "hc-github-team-secret-vault-core" + git remote add oss https://github.com/hashicorp/vault.git + git fetch oss "$branch" + + branch="oss/$branch" + else + branch="origin/$branch" + fi + + echo "BRANCH=$branch" >> "$GITHUB_OUTPUT" + - id: diff + run: | + ./.github/scripts/oss-diff.sh ${{ steps.determine-branch.outputs.BRANCH }} HEAD + test-go: + name: Run Go tests + needs: + - setup + - setup-go-cache + # Don't run this job for PR branches starting with 'ui/', 'backport/ui/', 'docs/', or 'backport/docs/' + if: | + !startsWith(github.head_ref, 'ui/') && + !startsWith(github.head_ref, 'backport/ui/') && + !startsWith(github.head_ref, 'docs/') && + !startsWith(github.head_ref, 'backport/docs/') + uses: ./.github/workflows/test-go.yml + with: + total-runners: 16 + go-arch: amd64 + go-build-tags: '${{ needs.setup.outputs.go-build-tags }},deadlock' + runs-on: ${{ needs.setup.outputs.compute-larger }} + enterprise: ${{ needs.setup.outputs.enterprise }} + secrets: inherit + test-go-race: + name: Run Go tests with data race detection + needs: + - setup + - setup-go-cache + # Don't run this job for PR branches starting with 'ui/', 'backport/ui/', 'docs/', or 'backport/docs/' + if: | + !startsWith(github.head_ref, 'ui/') && + !startsWith(github.head_ref, 'backport/ui/') && + !startsWith(github.head_ref, 'docs/') && + !startsWith(github.head_ref, 'backport/docs/') + uses: ./.github/workflows/test-go.yml + with: + total-runners: 16 + env-vars: | + { + "VAULT_CI_GO_TEST_RACE": 1 + } + extra-flags: '-race' + go-arch: amd64 + go-build-tags: ${{ needs.setup.outputs.go-build-tags }} + runs-on: ${{ needs.setup.outputs.compute-huge }} + enterprise: ${{ needs.setup.outputs.enterprise }} + secrets: inherit + test-go-fips: + name: Run Go tests with FIPS configuration + # Only run this job for the enterprise repo if the PR branch doesn't start with 'ui/', 'backport/ui/', 'docs/', or 'backport/docs/' + if: | + needs.setup.outputs.enterprise == 1 && + !startsWith(github.head_ref, 'ui/') && + !startsWith(github.head_ref, 'backport/ui/') && + !startsWith(github.head_ref, 'docs/') && + !startsWith(github.head_ref, 'backport/docs/') + needs: + - setup + - setup-go-cache + uses: ./.github/workflows/test-go.yml + with: + total-runners: 16 + env-vars: | + { + "GOEXPERIMENT": "boringcrypto" + } + go-arch: amd64 + go-build-tags: '${{ needs.setup.outputs.go-build-tags }},deadlock,cgo,fips,fips_140_2' + runs-on: ${{ needs.setup.outputs.compute-larger }} + enterprise: ${{ needs.setup.outputs.enterprise }} + secrets: inherit + test-ui: + name: Test UI + # The test-ui job is only run on: + # - pushes to main and branches starting with "release/" + # - PRs where the branch starts with "ui/", "backport/ui/", "merge", or when base branch starts with "release/" + if: | + github.ref_name == 'main' || + startsWith(github.ref_name, 'release/') || + startsWith(github.head_ref, 'ui/') || + startsWith(github.head_ref, 'backport/ui/') || + startsWith(github.head_ref, 'merge') + needs: + - setup + permissions: + id-token: write + contents: read + runs-on: ${{ fromJSON(needs.setup.outputs.compute-larger) }} + steps: + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c + - uses: actions/setup-go@d0a58c1c4d2b25278816e339b944508c875f3613 + with: + go-version-file: ./.go-version + cache: true + # Setup node.js without caching to allow running npm install -g yarn (next step) + - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c + with: + node-version: 14 + - id: install-yarn + run: | + npm install -g yarn + # Setup node.js with caching using the yarn.lock file + - uses: actions/setup-node@64ed1c7eab4cce3362f8c340dee64e5eaeef8f7c + with: + node-version: 14 + cache: yarn + cache-dependency-path: ui/yarn.lock + - id: install-browser-libraries + run: sudo apt install -y libnss3-dev libgdk-pixbuf2.0-dev libgtk-3-dev libxss-dev libasound2 + - id: install-browser + uses: browser-actions/setup-chrome@29abc1a83d1d71557708563b4bc962d0f983a376 + - id: ui-dependencies + name: ui-dependencies + working-directory: ./ui + run: | + yarn install --frozen-lockfile + npm rebuild node-sass + - id: vault-auth + name: Authenticate to Vault + if: github.repository == 'hashicorp/vault-enterprise' + run: vault-auth + - id: secrets + name: Fetch secrets + if: github.repository == 'hashicorp/vault-enterprise' + uses: hashicorp/vault-action@130d1f5f4fe645bb6c83e4225c04d64cfb62de6e + with: + url: ${{ steps.vault-auth.outputs.addr }} + caCertificate: ${{ steps.vault-auth.outputs.ca_certificate }} + token: ${{ steps.vault-auth.outputs.token }} + secrets: | + kv/data/github/hashicorp/vault-enterprise/github-token token | PRIVATE_REPO_GITHUB_TOKEN; + kv/data/github/hashicorp/vault-enterprise/license license_1 | VAULT_LICENSE; + - id: setup-git + name: Setup Git + if: github.repository == 'hashicorp/vault-enterprise' + env: + PRIVATE_REPO_GITHUB_TOKEN: ${{ steps.secrets.outputs.PRIVATE_REPO_GITHUB_TOKEN }} + run: | + git config --global url."https://hc-github-team-secure-vault-core:${PRIVATE_REPO_GITHUB_TOKEN}@github.com".insteadOf https://github.com + - id: build-go-dev + name: build-go-dev + run: | + rm -rf ./pkg + mkdir ./pkg + + make ci-bootstrap dev + - id: test-ui + name: test-ui + env: + VAULT_LICENSE: ${{ steps.secrets.outputs.VAULT_LICENSE }} + run: | + export PATH="${PWD}/bin:${PATH}" + + if [ "${{ github.repository }}" == 'hashicorp/vault' ] ; then + export VAULT_LICENSE="${{ secrets.VAULT_LICENSE }}" + fi + + # Run Ember tests + cd ui + mkdir -p test-results/qunit + yarn test:oss + - uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce + with: + name: test-results-ui + path: ui/test-results + if: always() + - uses: test-summary/action@62bc5c68de2a6a0d02039763b8c754569df99e3f + with: + paths: "ui/test-results/qunit/results.xml" + show: "fail" + if: always() + tests-completed: + needs: + - setup + - test-go + - test-ui + if: always() + runs-on: ${{ fromJSON(needs.setup.outputs.compute-tiny) }} + steps: + - run: | + tr -d '\n' <<< '${{ toJSON(needs.*.result) }}' | grep -q -v -E '(failure|cancelled)' diff --git a/.github/workflows/enos-release-testing-oss.yml b/.github/workflows/enos-release-testing-oss.yml index 5fe50e8da061..7cddbc56ed32 100644 --- a/.github/workflows/enos-release-testing-oss.yml +++ b/.github/workflows/enos-release-testing-oss.yml @@ -27,9 +27,19 @@ jobs: # enos-run-matrices. MATRIX_MAX_TEST_GROUPS: 2 run: | - echo "matrix-test-group=$(make ci-get-matrix-group-id)" >> $GITHUB_OUTPUT - echo "vault-revision=$(make ci-get-revision)" >> $GITHUB_OUTPUT - echo "vault-version=$(make ci-get-version)" >> $GITHUB_OUTPUT + # shellcheck disable=SC2129 + echo "matrix-test-group=$(make ci-get-matrix-group-id)" >> "$GITHUB_OUTPUT" + echo "vault-revision=$(make ci-get-revision)" >> "$GITHUB_OUTPUT" + echo "vault-version=$(make ci-get-version)" >> "$GITHUB_OUTPUT" + # Get the workflow summary similar to CRT workflows + - name: Release Artifact Info + run: | + # shellcheck disable=SC2129 + echo "__Product:__ ${{ github.event.client_payload.payload.product }}" >> "$GITHUB_STEP_SUMMARY" + echo "__Version:__ ${{ github.event.client_payload.payload.version }}" >> "$GITHUB_STEP_SUMMARY" + echo "__Commit:__ ${{ github.event.client_payload.payload.sha }}" >> "$GITHUB_STEP_SUMMARY" + echo "" >> "$GITHUB_STEP_SUMMARY" + echo "[Build Workflow](https://github.com/${{github.event.client_payload.payload.org}}/${{github.event.client_payload.payload.repo}}/actions/runs/${{github.event.client_payload.payload.buildworkflowid}})" >> "$GITHUB_STEP_SUMMARY" test: name: Test ${{ matrix.build-artifact-name }} diff --git a/.github/workflows/enos-run-k8s.yml b/.github/workflows/enos-run-k8s.yml index e306966c1abe..e5200d025151 100644 --- a/.github/workflows/enos-run-k8s.yml +++ b/.github/workflows/enos-run-k8s.yml @@ -44,7 +44,7 @@ jobs: github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} - name: Download Docker Image id: download - uses: actions/download-artifact@v3 + uses: actions/download-artifact@e9ef242655d12993efdcda9058dee2db83a2cb9b with: name: ${{ inputs.artifact-name }} path: ./enos/support/downloads @@ -53,16 +53,16 @@ jobs: IS_ENT: ${{ startsWith(env.ARTIFACT_NAME, 'vault-enterprise' ) }} run: | mkdir -p ./enos/support/terraform-plugin-cache - if ${IS_ENT} == true; then + if [ "$IS_ENT" == true ]; then echo "${{ secrets.VAULT_LICENSE }}" > ./enos/support/vault.hclic || true - echo "edition=ent" >> $GITHUB_ENV + echo "edition=ent" >> "$GITHUB_ENV" echo "edition set to 'ent'" - echo "image_repo=hashicorp/vault-enterprise" >> $GITHUB_ENV + echo "image_repo=hashicorp/vault-enterprise" >> "$GITHUB_ENV" echo "image repo set to 'hashicorp/vault-enterprise'" else - echo "edition=oss" >> $GITHUB_ENV + echo "edition=oss" >> "$GITHUB_ENV" echo "edition set to 'oss'" - echo "image_repo=hashicorp/vault" >> $GITHUB_ENV + echo "image_repo=hashicorp/vault" >> "$GITHUB_ENV" echo "image repo set to 'hashicorp/vault'" fi - name: Run Enos scenario diff --git a/.github/workflows/godoc-test-checker.yml b/.github/workflows/godoc-test-checker.yml index 048042cf752a..c23f46b3fa5f 100644 --- a/.github/workflows/godoc-test-checker.yml +++ b/.github/workflows/godoc-test-checker.yml @@ -11,17 +11,13 @@ jobs: godoc-test-check: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c with: - ref: ${{ github.event.pull_request.head.sha }} fetch-depth: 0 - - name: get metadata - id: get-metadata - run: echo "go-version=$(cat ./.go-version)" >> $GITHUB_OUTPUT - name: Set Up Go - uses: actions/setup-go@v3 + uses: actions/setup-go@d0a58c1c4d2b25278816e339b944508c875f3613 with: cache: true - go-version: ${{ steps.get-metadata.outputs.go-version }} + go-version-file: ./.go-version - name: Verify new tests have go docs - run: make ci-vet-godoctests \ No newline at end of file + run: make ci-vet-godoctests diff --git a/.github/workflows/goversion-checker.yml b/.github/workflows/goversion-checker.yml deleted file mode 100644 index 71ed31b65e5f..000000000000 --- a/.github/workflows/goversion-checker.yml +++ /dev/null @@ -1,23 +0,0 @@ -name: Check Go version - -on: - pull_request: - types: [opened, synchronize] - -jobs: - go-version-check: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v3 - with: - ref: ${{ github.event.pull_request.head.sha }} - fetch-depth: 0 - - name: Verify go versions in tree are consistent with one another - run: | - GOVER=$(cat .go-version) - EXPECTED="docker.mirror.hashicorp.services/cimg/go:$GOVER" - GOT=$(yq .references.environment.GO_IMAGE .circleci/config/executors/@executors.yml) - if [ "$EXPECTED" != "$GOT" ]; then - echo "version mismatch, .go-version has '$GOVER' and circleci config uses '$GOT'" - exit 1 - fi \ No newline at end of file diff --git a/.github/workflows/oss.yml b/.github/workflows/oss.yml index 4e03b9761ba4..d49550ff5a05 100644 --- a/.github/workflows/oss.yml +++ b/.github/workflows/oss.yml @@ -58,15 +58,15 @@ jobs: - 'ui/**' - name: "Default to core board" - run: echo "PROJECT=170" >> $GITHUB_ENV + run: echo "PROJECT=170" >> "$GITHUB_ENV" - if: github.event.pull_request != null && steps.changes.outputs.cryptosec == 'true' - run: echo "PROJECT=172" >> $GITHUB_ENV + run: echo "PROJECT=172" >> "$GITHUB_ENV" - if: github.event.pull_request != null && steps.changes.outputs.ecosystem == 'true' - run: echo "PROJECT=169" >> $GITHUB_ENV + run: echo "PROJECT=169" >> "$GITHUB_ENV" - if: github.event.pull_request != null && steps.changes.outputs.devex == 'true' - run: echo "PROJECT=176" >> $GITHUB_ENV + run: echo "PROJECT=176" >> "$GITHUB_ENV" - if: github.event.pull_request != null && steps.changes.outputs.ui == 'true' - run: echo "PROJECT=171" >> $GITHUB_ENV + run: echo "PROJECT=171" >> "$GITHUB_ENV" - uses: actions/add-to-project@v0.3.0 with: @@ -125,4 +125,4 @@ jobs: # ) { # deletedItemId # } - # }' -f project_id=$PROJECT_ID -f item_id=$item_id || true \ No newline at end of file + # }' -f project_id=$PROJECT_ID -f item_id=$item_id || true diff --git a/.github/workflows/security-scan.yml b/.github/workflows/security-scan.yml index 5d61d8af333a..b00c84cfcfa9 100644 --- a/.github/workflows/security-scan.yml +++ b/.github/workflows/security-scan.yml @@ -10,8 +10,7 @@ on: jobs: scan: - runs-on: - labels: ['linux', 'large'] + runs-on: ['linux', 'large'] if: ${{ github.actor != 'dependabot[bot]' || github.actor != 'hc-github-team-secure-vault-core' }} steps: - uses: actions/checkout@v3 @@ -39,14 +38,14 @@ jobs: env: GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | - mkdir $HOME/.bin - cd $GITHUB_WORKSPACE/security-scanner/pkg/sdk/examples/scan-plugin-semgrep + mkdir "$HOME/.bin" + cd "$GITHUB_WORKSPACE/security-scanner/pkg/sdk/examples/scan-plugin-semgrep" go build -o scan-plugin-semgrep . - mv scan-plugin-semgrep $HOME/.bin + mv scan-plugin-semgrep "$HOME/.bin" - cd $GITHUB_WORKSPACE/security-scanner/pkg/sdk/examples/scan-plugin-codeql + cd "$GITHUB_WORKSPACE/security-scanner/pkg/sdk/examples/scan-plugin-codeql" go build -o scan-plugin-codeql . - mv scan-plugin-codeql $HOME/.bin + mv scan-plugin-codeql "$HOME/.bin" # Semgrep python3 -m pip install semgrep @@ -54,11 +53,11 @@ jobs: # CodeQL LATEST=$(gh release list --repo https://github.com/github/codeql-action | cut -f 3 | sort --version-sort | tail -n1) gh release download --repo https://github.com/github/codeql-action --pattern codeql-bundle-linux64.tar.gz "$LATEST" - tar xf codeql-bundle-linux64.tar.gz -C $HOME/.bin + tar xf codeql-bundle-linux64.tar.gz -C "$HOME/.bin" # Add to PATH - echo "$HOME/.bin" >> $GITHUB_PATH - echo "$HOME/.bin/codeql" >> $GITHUB_PATH + echo "$HOME/.bin" >> "$GITHUB_PATH" + echo "$HOME/.bin/codeql" >> "$GITHUB_PATH" - name: Scan id: scan diff --git a/.github/workflows/setup-go-cache.yml b/.github/workflows/setup-go-cache.yml new file mode 100644 index 000000000000..3b8040a20545 --- /dev/null +++ b/.github/workflows/setup-go-cache.yml @@ -0,0 +1,33 @@ +on: + workflow_call: + inputs: + runs-on: + required: true + type: string +jobs: + setup-go-cache: + runs-on: ${{ fromJSON(inputs.runs-on) }} + steps: + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c #v3.3.0 as of 2023-01-18 + - id: setup-go + name: Setup go + uses: actions/setup-go@d0a58c1c4d2b25278816e339b944508c875f3613 #v3.4.0 as of 2022-12-07 + with: + go-version-file: ./.go-version + cache: true + - id: setup-git + name: Setup Git configuration + run: | + git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN }}@github.com".insteadOf https://github.com + - id: download-modules + name: Download go modules + run: | + # go list ./... forces downloading some additional versions of modules that 'go mod + # download' misses. We need this because we make use of go list itself during + # code generation in later builds that rely on this module cache. + go list ./... + go list -test ./... + + go mod download + ( cd sdk && go mod download ) + ( cd api && go mod download ) diff --git a/.github/workflows/test-ci-cleanup.yml b/.github/workflows/test-ci-cleanup.yml index 5035b86760c0..3afdb7229d23 100644 --- a/.github/workflows/test-ci-cleanup.yml +++ b/.github/workflows/test-ci-cleanup.yml @@ -22,7 +22,7 @@ jobs: - name: Get all regions id: setup run: | - echo "regions=$(aws ec2 describe-regions --region us-east-1 --output json --query 'Regions[].RegionName' | tr -d '\n ')" >> $GITHUB_OUTPUT + echo "regions=$(aws ec2 describe-regions --region us-east-1 --output json --query 'Regions[].RegionName' | tr -d '\n ')" >> "$GITHUB_OUTPUT" aws-nuke: needs: setup diff --git a/.github/workflows/test-enos-scenario-ui.yml b/.github/workflows/test-enos-scenario-ui.yml index b7d882cee137..9fa25bd0f3b2 100644 --- a/.github/workflows/test-enos-scenario-ui.yml +++ b/.github/workflows/test-enos-scenario-ui.yml @@ -42,16 +42,16 @@ jobs: env: IS_ENT: ${{ startsWith(github.event.repository.name, 'vault-enterprise' ) }} run: | - echo "go-version=$(cat ./.go-version)" >> $GITHUB_OUTPUT - echo "node-version=$(cat ./ui/.nvmrc)" >> $GITHUB_OUTPUT - if ${IS_ENT} == true; then + echo "go-version=$(cat ./.go-version)" >> "$GITHUB_OUTPUT" + echo "node-version=$(cat ./ui/.nvmrc)" >> "$GITHUB_OUTPUT" + if [ "$IS_ENT" == true ]; then echo "detected vault_edition=ent" - echo "runs-on=['self-hosted', 'ondemand', 'os=linux', 'type=m5d.4xlarge']" >> $GITHUB_OUTPUT - echo "vault_edition=ent" >> $GITHUB_OUTPUT + echo "runs-on=['self-hosted', 'ondemand', 'os=linux', 'type=m5d.4xlarge']" >> "$GITHUB_OUTPUT" + echo "vault_edition=ent" >> "$GITHUB_OUTPUT" else echo "detected vault_edition=oss" - echo "runs-on=\"custom-linux-xl-vault-latest\"" >> $GITHUB_OUTPUT - echo "vault_edition=oss" >> $GITHUB_OUTPUT + echo "runs-on=\"custom-linux-xl-vault-latest\"" >> "$GITHUB_OUTPUT" + echo "vault_edition=oss" >> "$GITHUB_OUTPUT" fi run-ui-tests: @@ -100,7 +100,7 @@ jobs: run: echo "${{ secrets.VAULT_LICENSE }}" > ./enos/support/vault.hclic || true - name: Check Chrome Installed id: chrome-check - run: echo "chrome-version=$(chrome --version 2> /dev/null || google-chrome --version 2> /dev/null || google-chrome-stable --version 2> /dev/null || echo 'not-installed')" >> $GITHUB_OUTPUT + run: echo "chrome-version=$(chrome --version 2> /dev/null || google-chrome --version 2> /dev/null || google-chrome-stable --version 2> /dev/null || echo 'not-installed')" >> "$GITHUB_OUTPUT" - name: Install Chrome Dependencies if: steps.chrome-check.outputs.chrome-version == 'not-installed' run: | diff --git a/.github/workflows/test-go.yml b/.github/workflows/test-go.yml new file mode 100644 index 000000000000..bc7178c213c9 --- /dev/null +++ b/.github/workflows/test-go.yml @@ -0,0 +1,199 @@ +on: + workflow_call: + inputs: + go-arch: + description: The execution architecture (arm, amd64, etc.) + required: true + type: string + enterprise: + description: A flag indicating if this workflow is executing for the enterprise repository. + required: true + type: string + total-runners: + description: Number of runners to use for executing the tests on. + required: true + type: string + env-vars: + description: A map of environment variables as JSON. + required: false + type: string + default: '{}' + extra-flags: + description: A space-separated list of additional build flags. + required: false + type: string + runs-on: + description: An expression indicating which kind of runners to use. + required: false + type: string + default: ubuntu-latest + go-build-tags: + description: A comma-separated list of additional build tags to consider satisfied during the build. + required: false + type: string + +env: ${{ fromJSON(inputs.env-vars) }} + +jobs: + test-generate-test-package-list: + runs-on: ${{ fromJSON(inputs.runs-on) }} + name: Verify Test Package Distribution + steps: + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c + - id: test + working-directory: .github/scripts + run: | + ENTERPRISE=${{ inputs.enterprise }} ./test-generate-test-package-lists.sh + runner-indexes: + runs-on: ${{ fromJSON(inputs.runs-on) }} + name: Generate runner indexes + # + # This job generates a JSON Array of integers ranging from 1 to 16. + # That array is used in the matrix section of the test-go job below. + # + outputs: + runner-indexes: ${{ steps.generate-index-list.outputs.indexes }} + steps: + - id: generate-index-list + run: | + INDEX_LIST="$(seq 1 ${{ inputs.total-runners }})" + INDEX_JSON="$(jq --null-input --compact-output '. |= [inputs]' <<< "${INDEX_LIST}")" + echo "indexes=${INDEX_JSON}" >> "${GITHUB_OUTPUT}" + test-go: + permissions: + id-token: write # Note: this permission is explicitly required for Vault auth + contents: read + name: "${{ matrix.runner-index }}" + needs: + - runner-indexes + runs-on: ${{ fromJSON(inputs.runs-on) }} + strategy: + fail-fast: false + matrix: + # + # Initialize the runner-index key with the JSON array of integers + # generated above. + # + runner-index: ${{ fromJSON(needs.runner-indexes.outputs.runner-indexes) }} + env: + GOPRIVATE: github.com/hashicorp/* + TIMEOUT_IN_MINUTES: 60 + steps: + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c + - uses: actions/setup-go@d0a58c1c4d2b25278816e339b944508c875f3613 + with: + go-version-file: ./.go-version + cache: true + - name: Authenticate to Vault + id: vault-auth + if: github.repository == 'hashicorp/vault-enterprise' + run: vault-auth + - name: Fetch Secrets + id: secrets + if: github.repository == 'hashicorp/vault-enterprise' + uses: hashicorp/vault-action@130d1f5f4fe645bb6c83e4225c04d64cfb62de6e + with: + url: ${{ steps.vault-auth.outputs.addr }} + caCertificate: ${{ steps.vault-auth.outputs.ca_certificate }} + token: ${{ steps.vault-auth.outputs.token }} + secrets: | + kv/data/github/${{ github.repository }}/datadog-ci DATADOG_API_KEY; + kv/data/github/${{ github.repository }}/github-token username-and-token | github-token; + kv/data/github/${{ github.repository }}/license license_1 | VAULT_LICENSE_CI; + kv/data/github/${{ github.repository }}/license license_2 | VAULT_LICENSE_2; + kv/data/github/${{ github.repository }}/hcp-link HCP_API_ADDRESS; + kv/data/github/${{ github.repository }}/hcp-link HCP_AUTH_URL; + kv/data/github/${{ github.repository }}/hcp-link HCP_CLIENT_ID; + kv/data/github/${{ github.repository }}/hcp-link HCP_CLIENT_SECRET; + kv/data/github/${{ github.repository }}/hcp-link HCP_RESOURCE_ID; + - id: setup-git-private + name: Setup Git configuration (private) + if: github.repository == 'hashicorp/vault-enterprise' + run: | + git config --global url."https://${{ steps.secrets.outputs.github-token }}@github.com".insteadOf https://github.com + - id: setup-git-public + name: Setup Git configuration (public) + if: github.repository != 'hashicorp/vault-enterprise' + run: | + git config --global url."https://${{ secrets.ELEVATED_GITHUB_TOKEN}}@github.com".insteadOf https://github.com + - id: run-go-tests + name: Run Go tests + timeout-minutes: ${{ fromJSON(env.TIMEOUT_IN_MINUTES) }} + run: | + set -exo pipefail + + # + # This script creates a Bash array with 16 elements each + # containing a space delimited list of package names. The + # array element corresponding to this instance's + # matrix.runner-index value. + # + ENTERPRISE=${{ inputs.enterprise }} source .github/scripts/generate-test-package-lists.sh + + # Build the dynamically generated source files. + make prep + + mkdir -p test-results/go-test + + # We don't want VAULT_LICENSE set when running Go tests, because that's + # not what developers have in their environments and it could break some + # tests; it would be like setting VAULT_TOKEN. However some non-Go + # CI commands, like the UI tests, shouldn't have to worry about licensing. + # So we provide the tests which want an externally supplied license with licenses + # via the VAULT_LICENSE_CI and VAULT_LICENSE_2 environment variables, and here we unset it. + # shellcheck disable=SC2034 + VAULT_LICENSE= + + # Assign test licenses to relevant variables if they aren't already + if [[ ${{ github.repository }} == 'hashicorp/vault' ]]; then + export VAULT_LICENSE_CI=${{ secrets.ci_license }} + export VAULT_LICENSE_2=${{ secrets.ci_license_2 }} + export HCP_API_ADDRESS=${{ secrets.HCP_API_ADDRESS }} + export HCP_AUTH_URL=${{ secrets.HCP_AUTH_URL }} + export HCP_CLIENT_ID=${{ secrets.HCP_CLIENT_ID }} + export HCP_CLIENT_SECRET=${{ secrets.HCP_CLIENT_SECRET }} + export HCP_RESOURCE_ID=${{ secrets.HCP_RESOURCE_ID }} + # Temporarily removing this variable to cause HCP Link tests + # to be skipped. + #export HCP_SCADA_ADDRESS=${{ secrets.HCP_SCADA_ADDRESS }} + fi + + # shellcheck disable=SC2086 # can't quote package list + GOARCH=${{ inputs.go-arch }} \ + go run gotest.tools/gotestsum --format=short-verbose \ + --junitfile test-results/go-test/results.xml \ + --jsonfile test-results/go-test/results.json \ + -- \ + -tags "${{ inputs.go-build-tags }}" \ + -timeout=${{ env.TIMEOUT_IN_MINUTES }}m \ + -parallel=20 \ + ${{ inputs.extra-flags }} \ + \ + ${test_packages[${{ matrix.runner-index }}]} + - name: Prepare datadog-ci + if: github.repository == 'hashicorp/vault' + continue-on-error: true + run: | + curl -L --fail "https://github.com/DataDog/datadog-ci/releases/latest/download/datadog-ci_linux-x64" --output "/usr/local/bin/datadog-ci" + chmod +x /usr/local/bin/datadog-ci + - name: Upload test results to DataDog + continue-on-error: true + env: + DD_ENV: ci + run: | + if [[ ${{ github.repository }} == 'hashicorp/vault' ]]; then + export DATADOG_API_KEY=${{ secrets.DATADOG_API_KEY }} + fi + datadog-ci junit upload --service "$GITHUB_REPOSITORY" test-results/go-test/results.xml + - name: Archive test results + uses: actions/upload-artifact@0b7f8abb1508181956e8e162db84b466c27e18ce + with: + name: test-results-${{ matrix.runner-index }} + path: test-results/ + if: always() + - name: Create a summary of tests + uses: test-summary/action@62bc5c68de2a6a0d02039763b8c754569df99e3f + with: + paths: "test-results/go-test/results.xml" + show: "fail" + if: always() diff --git a/.github/workflows/test-run-acc-tests-for-path.yml b/.github/workflows/test-run-acc-tests-for-path.yml index c53fb1aa9537..f82b32c567a1 100644 --- a/.github/workflows/test-run-acc-tests-for-path.yml +++ b/.github/workflows/test-run-acc-tests-for-path.yml @@ -20,15 +20,13 @@ jobs: go-test: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v3 - - id: get-metadata - run: echo "go-version=$(cat ./.go-version)" >> $GITHUB_OUTPUT + - uses: actions/checkout@ac593985615ec2ede58e132d2e21d2b1cbd6127c - name: Set Up Go - uses: actions/setup-go@v3 + uses: actions/setup-go@d0a58c1c4d2b25278816e339b944508c875f3613 with: - go-version: ${{ steps.get-metadata.outputs.go-version }} + go-version-file: ./.go-version - run: go test -v ./${{ inputs.path }}/... 2>&1 | tee ${{ inputs.name }}.txt - - uses: actions/upload-artifact@v3 + - uses: actions/upload-artifact@b7f8abb1508181956e8e162db84b466c27e18ce with: name: ${{ inputs.name }}-output path: ${{ inputs.name }}.txt diff --git a/.github/workflows/test-run-enos-scenario-matrix.yml b/.github/workflows/test-run-enos-scenario-matrix.yml index a687f222af43..b40b6b50b3f2 100644 --- a/.github/workflows/test-run-enos-scenario-matrix.yml +++ b/.github/workflows/test-run-enos-scenario-matrix.yml @@ -77,10 +77,10 @@ jobs: ref: ${{ inputs.vault-revision }} - id: metadata run: | - echo "build-date=$(make ci-get-date)" >> $GITHUB_OUTPUT - echo "version=$(make ci-get-version)" >> $GITHUB_OUTPUT - filtered=$(make ci-filter-matrix) - echo "matrix=$(echo $filtered)}" >> $GITHUB_OUTPUT + # shellcheck disable=SC2129 + echo "build-date=$(make ci-get-date)" >> "$GITHUB_OUTPUT" + echo "version=$(make ci-get-version)" >> "$GITHUB_OUTPUT" + echo "matrix=$(make ci-filter-matrix)" >> "$GITHUB_OUTPUT" # Run the Enos test scenarios run: @@ -104,6 +104,7 @@ jobs: ENOS_VAR_vault_revision: ${{ inputs.vault-revision }} ENOS_VAR_vault_bundle_path: ./support/downloads/${{ inputs.build-artifact-name }} ENOS_VAR_vault_license_path: ./support/vault.hclic + ENOS_DEBUG_DATA_ROOT_DIR: /tmp/enos-debug-data steps: - uses: actions/checkout@v3 - uses: hashicorp/setup-terraform@v2 @@ -123,10 +124,12 @@ jobs: with: github-token: ${{ secrets.ELEVATED_GITHUB_TOKEN }} - name: Prepare scenario dependencies + id: prepare_scenario run: | - mkdir -p ./enos/support/terraform-plugin-cache - echo "${{ secrets.SSH_KEY_PRIVATE_CI }}" > ./enos/support/private_key.pem - chmod 600 ./enos/support/private_key.pem + mkdir -p "./enos/support/terraform-plugin-cache" + echo "${{ secrets.SSH_KEY_PRIVATE_CI }}" > "./enos/support/private_key.pem" + chmod 600 "./enos/support/private_key.pem" + echo "debug_data_artifact_name=enos-debug-data_$(echo "${{ matrix.scenario }}" | sed -e 's/ /_/g' | sed -e 's/:/=/g')" >> "$GITHUB_OUTPUT" - if: contains(inputs.matrix-file-name, 'github') uses: actions/download-artifact@v3 with: @@ -145,6 +148,14 @@ jobs: id: run_retry if: steps.run.outcome == 'failure' run: enos scenario run --timeout 60m0s --chdir ./enos ${{ matrix.scenario }} + - name: Upload Debug Data + if: failure() + uses: actions/upload-artifact@v3 + with: + # The name of the artifact is the same as the matrix scenario name with the spaces replaced with underscores and colons replaced by equals. + name: ${{ steps.prepare_scenario.outputs.debug_data_artifact_name }} + path: ${{ env.ENOS_DEBUG_DATA_ROOT_DIR }} + retention-days: 30 - name: Ensure scenario has been destroyed if: ${{ always() }} # With Enos version 0.0.11 the destroy step returns an error if the infrastructure diff --git a/.gitignore b/.gitignore index cdd542d60689..6138dcc5dd2a 100644 --- a/.gitignore +++ b/.gitignore @@ -67,6 +67,10 @@ enos/.terraform/* enos/.terraform.lock.hcl enos/*.tfstate enos/*.tfstate.* +enos/**/.terraform/* +enos/**/.terraform.lock.hcl +enos/**/*.tfstate +enos/**/*.tfstate.* .DS_Store .idea @@ -127,4 +131,4 @@ website/components/node_modules .releaser/ *.log -tools/godoctests/.bin \ No newline at end of file +tools/godoctests/.bin diff --git a/.go-version b/.go-version index 5fb5a6b4f547..f5b00dc262be 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.20 +1.20.3 diff --git a/.hooks/pre-commit b/.hooks/pre-commit index 17309e55a9d7..f40519e53516 100755 --- a/.hooks/pre-commit +++ b/.hooks/pre-commit @@ -35,9 +35,7 @@ block() { # Add all check functions to this space separated list. # They are executed in this order (see end of file). -CHECKS="ui_lint circleci_verify" - -MIN_CIRCLECI_VERSION=0.1.5575 +CHECKS="ui_lint" # Run ui linter if changes in that dir detected. ui_lint() { @@ -62,82 +60,6 @@ ui_lint() { $LINTER || block "UI lint failed" } -# Check .circleci/config.yml is up to date and valid, and that all changes are -# included together in this commit. -circleci_verify() { - # Change to the root dir of the repo. - cd "$(git rev-parse --show-toplevel)" - - # Fail early if we accidentally used '.yaml' instead of '.yml' - if ! git diff --name-only --cached --exit-code -- '.circleci/***.yaml'; then - # This is just for consistency, as I keep making this mistake - Sam. - block "ERROR: File(s) with .yaml extension detected. Please rename them .yml instead." - fi - - # Succeed early if no changes to yml files in .circleci/ are currently staged. - # make ci-verify is slow so we really don't want to run it unnecessarily. - if git diff --name-only --cached --exit-code -- '.circleci/***.yml'; then - return 0 - fi - # Make sure to add no explicit output before this line, as it would just be noise - # for those making non-circleci changes. - echo "==> Verifying config changes in .circleci/" - echo "--> OK: All files are .yml not .yaml" - - # Ensure commit includes _all_ files in .circleci/ - # So not only are the files up to date, but we are also committing them in one go. - if ! git diff --name-only --exit-code -- '.circleci/***.yml'; then - echo "ERROR: Some .yml diffs in .circleci/ are staged, others not." - block "Please commit the entire .circleci/ directory together, or omit it altogether." - fi - - echo "--> OK: All .yml files in .circleci are staged." - - if ! REASON=$(check_circleci_cli_version); then - echo "*** WARNING: Unable to verify changes in .circleci/:" - echo "--> $REASON" - # We let this pass if there is no valid circleci version installed. - return 0 - fi - - if ! make -C .circleci ci-verify; then - block "ERROR: make ci-verify failed" - fi - - echo "--> OK: make ci-verify succeeded." -} - -check_circleci_cli_version() { - if ! command -v circleci > /dev/null 2>&1; then - echo "circleci cli not installed." - return 1 - fi - - CCI="circleci --skip-update-check" - - if ! THIS_VERSION=$($CCI version) > /dev/null 2>&1; then - # Guards against very old versions that do not have --skip-update-check. - echo "The installed circleci cli is too old. Please upgrade to at least $MIN_CIRCLECI_VERSION." - return 1 - fi - - # SORTED_MIN is the lower of the THIS_VERSION and MIN_CIRCLECI_VERSION. - if ! SORTED_MIN="$(printf "%s\n%s" "$MIN_CIRCLECI_VERSION" "$THIS_VERSION" | sort -V | head -n1)"; then - echo "Failed to sort versions. Please open an issue to report this." - return 1 - fi - - if [ "$THIS_VERSION" != "${THIS_VERSION#$MIN_CIRCLECI_VERSION}" ]; then - return 0 # OK - Versions have the same prefix, so we consider them equal. - elif [ "$SORTED_MIN" = "$MIN_CIRCLECI_VERSION" ]; then - return 0 # OK - MIN_CIRCLECI_VERSION is lower than THIS_VERSION. - fi - - # Version too low. - echo "The installed circleci cli v$THIS_VERSION is too old. Please upgrade to at least $MIN_CIRCLECI_VERSION" - return 1 -} - for CHECK in $CHECKS; do # Force each check into a subshell to avoid crosstalk. ( $CHECK ) || exit $? diff --git a/Makefile b/Makefile index ac68d55f0714..fdab28e7cf72 100644 --- a/Makefile +++ b/Makefile @@ -30,13 +30,16 @@ default: dev bin: prep @CGO_ENABLED=$(CGO_ENABLED) BUILD_TAGS='$(BUILD_TAGS) ui' sh -c "'$(CURDIR)/scripts/build.sh'" +testonly: + $(eval BUILD_TAGS += testonly) + # dev creates binaries for testing Vault locally. These are put # into ./bin/ as well as $GOPATH/bin -dev: prep +dev: prep testonly @CGO_ENABLED=$(CGO_ENABLED) BUILD_TAGS='$(BUILD_TAGS)' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'" -dev-ui: assetcheck prep +dev-ui: assetcheck prep testonly @CGO_ENABLED=$(CGO_ENABLED) BUILD_TAGS='$(BUILD_TAGS) ui' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'" -dev-dynamic: prep +dev-dynamic: prep testonly @CGO_ENABLED=1 BUILD_TAGS='$(BUILD_TAGS)' VAULT_DEV_BUILD=1 sh -c "'$(CURDIR)/scripts/build.sh'" # *-mem variants will enable memory profiling which will write snapshots of heap usage @@ -51,14 +54,14 @@ dev-dynamic-mem: dev-dynamic # Creates a Docker image by adding the compiled linux/amd64 binary found in ./bin. # The resulting image is tagged "vault:dev". -docker-dev: prep +docker-dev: prep testonly docker build --build-arg VERSION=$(GO_VERSION_MIN) --build-arg BUILD_TAGS="$(BUILD_TAGS)" -f scripts/docker/Dockerfile -t vault:dev . -docker-dev-ui: prep +docker-dev-ui: prep testonly docker build --build-arg VERSION=$(GO_VERSION_MIN) --build-arg BUILD_TAGS="$(BUILD_TAGS)" -f scripts/docker/Dockerfile.ui -t vault:dev-ui . # test runs the unit tests and vets the code -test: prep +test: prep testonly @CGO_ENABLED=$(CGO_ENABLED) \ VAULT_ADDR= \ VAULT_TOKEN= \ @@ -66,13 +69,13 @@ test: prep VAULT_ACC= \ $(GO_CMD) test -tags='$(BUILD_TAGS)' $(TEST) $(TESTARGS) -timeout=$(TEST_TIMEOUT) -parallel=20 -testcompile: prep +testcompile: prep testonly @for pkg in $(TEST) ; do \ $(GO_CMD) test -v -c -tags='$(BUILD_TAGS)' $$pkg -parallel=4 ; \ done # testacc runs acceptance tests -testacc: prep +testacc: prep testonly @if [ "$(TEST)" = "./..." ]; then \ echo "ERROR: Set TEST to a specific package"; \ exit 1; \ @@ -80,7 +83,7 @@ testacc: prep VAULT_ACC=1 $(GO_CMD) test -tags='$(BUILD_TAGS)' $(TEST) -v $(TESTARGS) -timeout=$(EXTENDED_TEST_TIMEOUT) # testrace runs the race checker -testrace: prep +testrace: prep testonly @CGO_ENABLED=1 \ VAULT_ADDR= \ VAULT_TOKEN= \ @@ -189,6 +192,7 @@ proto: bootstrap @sh -c "'$(CURDIR)/scripts/protocversioncheck.sh' '$(PROTOC_VERSION_MIN)'" protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative vault/*.proto protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative vault/activity/activity_log.proto + protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative vault/activity/generation/generate_data.proto protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative helper/storagepacker/types.proto protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative helper/forwarding/types.proto protoc --go_out=. --go_opt=paths=source_relative --go-grpc_out=. --go-grpc_opt=paths=source_relative sdk/logical/*.proto @@ -217,7 +221,7 @@ fmtcheck: #@sh -c "'$(CURDIR)/scripts/gofmtcheck.sh'" fmt: - find . -name '*.go' | grep -v pb.go | grep -v vendor | xargs gofumpt -w + find . -name '*.go' | grep -v pb.go | grep -v vendor | xargs go run mvdan.cc/gofumpt -w semgrep: semgrep --include '*.go' --exclude 'vendor' -a -f tools/semgrep . @@ -257,13 +261,6 @@ hana-database-plugin: mongodb-database-plugin: @CGO_ENABLED=0 $(GO_CMD) build -o bin/mongodb-database-plugin ./plugins/database/mongodb/mongodb-database-plugin -.PHONY: ci-config -ci-config: - @$(MAKE) -C .circleci ci-config -.PHONY: ci-verify -ci-verify: - @$(MAKE) -C .circleci ci-verify - .PHONY: bin default prep test vet bootstrap ci-bootstrap fmt fmtcheck mysql-database-plugin mysql-legacy-database-plugin cassandra-database-plugin influxdb-database-plugin postgresql-database-plugin mssql-database-plugin hana-database-plugin mongodb-database-plugin ember-dist ember-dist-dev static-dist static-dist-dev assetcheck check-vault-in-path packages build build-ci semgrep semgrep-ci vet-godoctests ci-vet-godoctests .NOTPARALLEL: ember-dist ember-dist-dev diff --git a/api/go.mod b/api/go.mod index 3aa9e832615c..62288bd371f0 100644 --- a/api/go.mod +++ b/api/go.mod @@ -17,7 +17,7 @@ require ( github.com/hashicorp/go-secure-stdlib/strutil v0.1.2 github.com/hashicorp/hcl v1.0.0 github.com/mitchellh/mapstructure v1.5.0 - golang.org/x/net v0.5.0 + golang.org/x/net v0.7.0 golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 gopkg.in/square/go-jose.v2 v2.5.1 ) @@ -30,7 +30,7 @@ require ( github.com/mattn/go-isatty v0.0.12 // indirect github.com/mitchellh/go-homedir v1.1.0 // indirect github.com/ryanuber/go-glob v1.0.0 // indirect - golang.org/x/crypto v0.5.0 // indirect - golang.org/x/sys v0.4.0 // indirect - golang.org/x/text v0.6.0 // indirect + golang.org/x/crypto v0.6.0 // indirect + golang.org/x/sys v0.5.0 // indirect + golang.org/x/text v0.7.0 // indirect ) diff --git a/api/go.sum b/api/go.sum index b89be959556b..808c56f7fe08 100644 --- a/api/go.sum +++ b/api/go.sum @@ -62,19 +62,19 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.7.0 h1:nwc3DEeHmmLAfoZucVR881uASk0Mfjw8xYJ99tb5CcY= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= -golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= -golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1 h1:NusfzzA6yGQ+ua51ck7E3omNUX/JuqbFSaRGqU8CcLI= golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= diff --git a/builtin/credential/approle/path_role.go b/builtin/credential/approle/path_role.go index 35e3bfbbb686..7759c07037bb 100644 --- a/builtin/credential/approle/path_role.go +++ b/builtin/credential/approle/path_role.go @@ -1964,12 +1964,21 @@ func (b *backend) pathRoleSecretIDAccessorDestroyUpdateDelete(ctx context.Contex return nil, fmt.Errorf("failed to create HMAC of role_name: %w", err) } - entryIndex := fmt.Sprintf("%s%s/%s", role.SecretIDPrefix, roleNameHMAC, accessorEntry.SecretIDHMAC) - lock := b.secretIDLock(accessorEntry.SecretIDHMAC) lock.Lock() defer lock.Unlock() + // Verify we have a valid SecretID Storage Entry + entry, err := b.nonLockedSecretIDStorageEntry(ctx, req.Storage, role.SecretIDPrefix, roleNameHMAC, accessorEntry.SecretIDHMAC) + if err != nil { + return nil, err + } + if entry == nil { + return logical.ErrorResponse("invalid secret id accessor"), logical.ErrPermissionDenied + } + + entryIndex := fmt.Sprintf("%s%s/%s", role.SecretIDPrefix, roleNameHMAC, accessorEntry.SecretIDHMAC) + // Delete the accessor of the SecretID first if err := b.deleteSecretIDAccessorEntry(ctx, req.Storage, secretIDAccessor, role.SecretIDPrefix); err != nil { return nil, err diff --git a/builtin/credential/approle/path_role_test.go b/builtin/credential/approle/path_role_test.go index 3b201eefb4aa..d6ceed858154 100644 --- a/builtin/credential/approle/path_role_test.go +++ b/builtin/credential/approle/path_role_test.go @@ -2696,3 +2696,59 @@ func TestAppRole_SecretID_WithTTL(t *testing.T) { }) } } + +// TestAppRole_RoleSecretIDAccessorCrossDelete tests deleting a secret id via +// secret id accessor belonging to a different role +func TestAppRole_RoleSecretIDAccessorCrossDelete(t *testing.T) { + var resp *logical.Response + var err error + b, storage := createBackendWithStorage(t) + + // Create First Role + createRole(t, b, storage, "role1", "a,b") + _ = b.requestNoErr(t, &logical.Request{ + Operation: logical.UpdateOperation, + Storage: storage, + Path: "role/role1/secret-id", + }) + + // Create Second Role + createRole(t, b, storage, "role2", "a,b") + _ = b.requestNoErr(t, &logical.Request{ + Operation: logical.UpdateOperation, + Storage: storage, + Path: "role/role2/secret-id", + }) + + // Get role2 secretID Accessor + resp = b.requestNoErr(t, &logical.Request{ + Operation: logical.ListOperation, + Storage: storage, + Path: "role/role2/secret-id", + }) + + // Read back role2 secretID Accessor information + hmacSecretID := resp.Data["keys"].([]string)[0] + _ = b.requestNoErr(t, &logical.Request{ + Operation: logical.UpdateOperation, + Storage: storage, + Path: "role/role2/secret-id-accessor/lookup", + Data: map[string]interface{}{ + "secret_id_accessor": hmacSecretID, + }, + }) + + // Attempt to destroy role2 secretID accessor using role1 path + _, err = b.HandleRequest(context.Background(), &logical.Request{ + Operation: logical.UpdateOperation, + Storage: storage, + Path: "role/role1/secret-id-accessor/destroy", + Data: map[string]interface{}{ + "secret_id_accessor": hmacSecretID, + }, + }) + + if err == nil { + t.Fatalf("expected error") + } +} diff --git a/builtin/credential/cert/backend_test.go b/builtin/credential/cert/backend_test.go index ed4b250ce5c7..56d44ea4a2d0 100644 --- a/builtin/credential/cert/backend_test.go +++ b/builtin/credential/cert/backend_test.go @@ -1965,6 +1965,27 @@ func testAccStepCertWithExtraParams(t *testing.T, name string, cert []byte, poli } } +func testAccStepReadCertPolicy(t *testing.T, name string, expectError bool, expected map[string]interface{}) logicaltest.TestStep { + return logicaltest.TestStep{ + Operation: logical.ReadOperation, + Path: "certs/" + name, + ErrorOk: expectError, + Data: nil, + Check: func(resp *logical.Response) error { + if (resp == nil || len(resp.Data) == 0) && expectError { + return fmt.Errorf("expected error but received nil") + } + for key, expectedValue := range expected { + actualValue := resp.Data[key] + if expectedValue != actualValue { + return fmt.Errorf("Expected to get [%v]=[%v] but read [%v]=[%v] from server for certs/%v: %v", key, expectedValue, key, actualValue, name, resp) + } + } + return nil + }, + } +} + func testAccStepCertLease( t *testing.T, name string, cert []byte, policies string, ) logicaltest.TestStep { diff --git a/builtin/credential/cert/path_certs.go b/builtin/credential/cert/path_certs.go index 13f6da78c495..a0f84a9fe18d 100644 --- a/builtin/credential/cert/path_certs.go +++ b/builtin/credential/cert/path_certs.go @@ -278,6 +278,11 @@ func (b *backend) pathCertRead(ctx context.Context, req *logical.Request, d *fra "allowed_organizational_units": cert.AllowedOrganizationalUnits, "required_extensions": cert.RequiredExtensions, "allowed_metadata_extensions": cert.AllowedMetadataExtensions, + "ocsp_ca_certificates": cert.OcspCaCertificates, + "ocsp_enabled": cert.OcspEnabled, + "ocsp_servers_override": cert.OcspServersOverride, + "ocsp_fail_open": cert.OcspFailOpen, + "ocsp_query_all_servers": cert.OcspQueryAllServers, } cert.PopulateTokenData(data) diff --git a/builtin/credential/cert/path_login_test.go b/builtin/credential/cert/path_login_test.go index f69444270f39..a2d6f2e292a2 100644 --- a/builtin/credential/cert/path_login_test.go +++ b/builtin/credential/cert/path_login_test.go @@ -345,6 +345,7 @@ func TestCert_RoleResolveOCSP(t *testing.T) { Steps: []logicaltest.TestStep{ testAccStepCertWithExtraParams(t, "web", ca, "foo", allowed{dns: "example.com"}, false, map[string]interface{}{"ocsp_enabled": true, "ocsp_fail_open": c.failOpen}), + testAccStepReadCertPolicy(t, "web", false, map[string]interface{}{"ocsp_enabled": true, "ocsp_fail_open": c.failOpen}), loginStep, resolveStep, }, diff --git a/builtin/credential/github/path_config.go b/builtin/credential/github/path_config.go index 84c03d3dbb79..707115c567e3 100644 --- a/builtin/credential/github/path_config.go +++ b/builtin/credential/github/path_config.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "net/url" + "os" "strings" "time" @@ -94,7 +95,8 @@ func (b *backend) pathConfigWrite(ctx context.Context, req *logical.Request, dat } if c.OrganizationID == 0 { - client, err := b.Client("") + githubToken := os.Getenv("VAULT_AUTH_CONFIG_GITHUB_TOKEN") + client, err := b.Client(githubToken) if err != nil { return nil, err } diff --git a/builtin/credential/github/path_config_test.go b/builtin/credential/github/path_config_test.go index e8d0cf5fdb39..d59599f32620 100644 --- a/builtin/credential/github/path_config_test.go +++ b/builtin/credential/github/path_config_test.go @@ -6,6 +6,7 @@ import ( "fmt" "net/http" "net/http/httptest" + "os" "strings" "testing" @@ -120,6 +121,43 @@ func TestGitHub_WriteReadConfig_OrgID(t *testing.T) { assert.Equal(t, "foo-org", resp.Data["organization"]) } +// TestGitHub_WriteReadConfig_Token tests that we can successfully read and +// write the github auth config with a token environment variable +func TestGitHub_WriteReadConfig_Token(t *testing.T) { + b, s := createBackendWithStorage(t) + // use a test server to return our mock GH org info + ts := setupTestServer(t) + defer ts.Close() + + err := os.Setenv("VAULT_AUTH_CONFIG_GITHUB_TOKEN", "foobar") + assert.NoError(t, err) + + resp, err := b.HandleRequest(context.Background(), &logical.Request{ + Path: "config", + Operation: logical.UpdateOperation, + Data: map[string]interface{}{ + "organization": "foo-org", + "base_url": ts.URL, // base_url will call the test server + }, + Storage: s, + }) + assert.NoError(t, err) + assert.Nil(t, resp) + assert.NoError(t, resp.Error()) + + // Read the config + resp, err = b.HandleRequest(context.Background(), &logical.Request{ + Path: "config", + Operation: logical.ReadOperation, + Storage: s, + }) + assert.NoError(t, err) + assert.NoError(t, resp.Error()) + + // the token should not be returned in the read config response. + assert.Nil(t, resp.Data["token"]) +} + // TestGitHub_ErrorNoOrgID tests that an error is returned when we cannot fetch // the org ID for the given org name func TestGitHub_ErrorNoOrgID(t *testing.T) { diff --git a/builtin/credential/ldap/backend_test.go b/builtin/credential/ldap/backend_test.go index 74b4e18a17e3..74dfdf99ed07 100644 --- a/builtin/credential/ldap/backend_test.go +++ b/builtin/credential/ldap/backend_test.go @@ -829,6 +829,7 @@ func testAccStepConfigUrl(t *testing.T, cfg *ldaputil.ConfigEntry) logicaltest.T "case_sensitive_names": true, "token_policies": "abc,xyz", "request_timeout": cfg.RequestTimeout, + "connection_timeout": cfg.ConnectionTimeout, "username_as_alias": cfg.UsernameAsAlias, }, } @@ -851,6 +852,7 @@ func testAccStepConfigUrlWithAuthBind(t *testing.T, cfg *ldaputil.ConfigEntry) l "case_sensitive_names": true, "token_policies": "abc,xyz", "request_timeout": cfg.RequestTimeout, + "connection_timeout": cfg.ConnectionTimeout, }, } } @@ -871,6 +873,7 @@ func testAccStepConfigUrlWithDiscover(t *testing.T, cfg *ldaputil.ConfigEntry) l "case_sensitive_names": true, "token_policies": "abc,xyz", "request_timeout": cfg.RequestTimeout, + "connection_timeout": cfg.ConnectionTimeout, }, } } @@ -888,6 +891,7 @@ func testAccStepConfigUrlNoGroupDN(t *testing.T, cfg *ldaputil.ConfigEntry) logi "discoverdn": true, "case_sensitive_names": true, "request_timeout": cfg.RequestTimeout, + "connection_timeout": cfg.ConnectionTimeout, }, } } @@ -908,6 +912,7 @@ func testAccStepConfigUrlWarningCheck(t *testing.T, cfg *ldaputil.ConfigEntry, o "case_sensitive_names": true, "token_policies": "abc,xyz", "request_timeout": cfg.RequestTimeout, + "connection_timeout": cfg.ConnectionTimeout, }, Check: func(response *logical.Response) error { if len(response.Warnings) == 0 { @@ -1189,6 +1194,8 @@ func TestLdapAuthBackend_ConfigUpgrade(t *testing.T) { "token_period": "5m", "token_explicit_max_ttl": "24h", "request_timeout": cfg.RequestTimeout, + "max_page_size": cfg.MaximumPageSize, + "connection_timeout": cfg.ConnectionTimeout, }, Storage: storage, Connection: &logical.Connection{}, @@ -1230,7 +1237,9 @@ func TestLdapAuthBackend_ConfigUpgrade(t *testing.T) { CaseSensitiveNames: falseBool, UsePre111GroupCNBehavior: new(bool), RequestTimeout: cfg.RequestTimeout, + ConnectionTimeout: cfg.ConnectionTimeout, UsernameAsAlias: false, + MaximumPageSize: 1000, }, } diff --git a/builtin/logical/aws/secret_access_keys.go b/builtin/logical/aws/secret_access_keys.go index eb83ed5fa16e..c3818d5d9727 100644 --- a/builtin/logical/aws/secret_access_keys.go +++ b/builtin/logical/aws/secret_access_keys.go @@ -155,15 +155,28 @@ func (b *backend) getFederationToken(ctx context.Context, s logical.Storage, return logical.ErrorResponse("Error generating STS keys: %s", err), awsutil.CheckAWSError(err) } - // STS credentials cannot be revoked so do not create a lease - return &logical.Response{ - Data: map[string]interface{}{ - "access_key": *tokenResp.Credentials.AccessKeyId, - "secret_key": *tokenResp.Credentials.SecretAccessKey, - "security_token": *tokenResp.Credentials.SessionToken, - "ttl": uint64(tokenResp.Credentials.Expiration.Sub(time.Now()).Seconds()), - }, - }, nil + // While STS credentials cannot be revoked/renewed, we will still create a lease since users are + // relying on a non-zero `lease_duration` in order to manage their lease lifecycles manually. + // + ttl := tokenResp.Credentials.Expiration.Sub(time.Now()) + resp := b.Secret(secretAccessKeyType).Response(map[string]interface{}{ + "access_key": *tokenResp.Credentials.AccessKeyId, + "secret_key": *tokenResp.Credentials.SecretAccessKey, + "security_token": *tokenResp.Credentials.SessionToken, + "ttl": uint64(ttl.Seconds()), + }, map[string]interface{}{ + "username": username, + "policy": policy, + "is_sts": true, + }) + + // Set the secret TTL to appropriately match the expiration of the token + resp.Secret.TTL = ttl + + // STS are purposefully short-lived and aren't renewable + resp.Secret.Renewable = false + + return resp, nil } func (b *backend) assumeRole(ctx context.Context, s logical.Storage, @@ -230,16 +243,29 @@ func (b *backend) assumeRole(ctx context.Context, s logical.Storage, return logical.ErrorResponse("Error assuming role: %s", err), awsutil.CheckAWSError(err) } - // STS credentials cannot be revoked so do not create a lease - return &logical.Response{ - Data: map[string]interface{}{ - "access_key": *tokenResp.Credentials.AccessKeyId, - "secret_key": *tokenResp.Credentials.SecretAccessKey, - "security_token": *tokenResp.Credentials.SessionToken, - "arn": *tokenResp.AssumedRoleUser.Arn, - "ttl": uint64(tokenResp.Credentials.Expiration.Sub(time.Now()).Seconds()), - }, - }, nil + // While STS credentials cannot be revoked/renewed, we will still create a lease since users are + // relying on a non-zero `lease_duration` in order to manage their lease lifecycles manually. + // + ttl := tokenResp.Credentials.Expiration.Sub(time.Now()) + resp := b.Secret(secretAccessKeyType).Response(map[string]interface{}{ + "access_key": *tokenResp.Credentials.AccessKeyId, + "secret_key": *tokenResp.Credentials.SecretAccessKey, + "security_token": *tokenResp.Credentials.SessionToken, + "arn": *tokenResp.AssumedRoleUser.Arn, + "ttl": uint64(ttl.Seconds()), + }, map[string]interface{}{ + "username": roleSessionName, + "policy": roleArn, + "is_sts": true, + }) + + // Set the secret TTL to appropriately match the expiration of the token + resp.Secret.TTL = ttl + + // STS are purposefully short-lived and aren't renewable + resp.Secret.Renewable = false + + return resp, nil } func readConfig(ctx context.Context, storage logical.Storage) (rootConfig, error) { diff --git a/builtin/logical/pki/backend.go b/builtin/logical/pki/backend.go index e9f509025463..d54c2bfbcb5a 100644 --- a/builtin/logical/pki/backend.go +++ b/builtin/logical/pki/backend.go @@ -92,6 +92,12 @@ func Backend(conf *logical.BackendConfig) *backend { "issuer/+/crl/delta/der", "issuer/+/crl/delta/pem", "issuer/+/crl/delta", + "issuer/+/unified-crl/der", + "issuer/+/unified-crl/pem", + "issuer/+/unified-crl", + "issuer/+/unified-crl/delta/der", + "issuer/+/unified-crl/delta/pem", + "issuer/+/unified-crl/delta", "issuer/+/pem", "issuer/+/der", "issuer/+/json", @@ -162,6 +168,7 @@ func Backend(conf *logical.BackendConfig) *backend { // Issuer APIs pathListIssuers(&b), pathGetIssuer(&b), + pathGetUnauthedIssuer(&b), pathGetIssuerCRL(&b), pathImportIssuer(&b), pathIssuerIssue(&b), diff --git a/builtin/logical/pki/backend_test.go b/builtin/logical/pki/backend_test.go index 51f986b5329c..74402bf27c79 100644 --- a/builtin/logical/pki/backend_test.go +++ b/builtin/logical/pki/backend_test.go @@ -31,6 +31,10 @@ import ( "testing" "time" + "github.com/hashicorp/vault/helper/testhelpers/teststorage" + + "github.com/hashicorp/vault/helper/testhelpers" + "github.com/stretchr/testify/require" "github.com/armon/go-metrics" @@ -4935,12 +4939,13 @@ func TestIssuanceTTLs(t *testing.T) { }) require.Error(t, err, "expected issuance to fail due to longer default ttl than cert ttl") - resp, err = CBWrite(b, s, "issuer/root", map[string]interface{}{ - "issuer_name": "root", + resp, err = CBPatch(b, s, "issuer/root", map[string]interface{}{ "leaf_not_after_behavior": "permit", }) require.NoError(t, err) require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.Equal(t, resp.Data["leaf_not_after_behavior"], "permit") _, err = CBWrite(b, s, "issue/local-testing", map[string]interface{}{ "common_name": "testing", @@ -4953,6 +4958,8 @@ func TestIssuanceTTLs(t *testing.T) { }) require.NoError(t, err) require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.Equal(t, resp.Data["leaf_not_after_behavior"], "truncate") _, err = CBWrite(b, s, "issue/local-testing", map[string]interface{}{ "common_name": "testing", @@ -6346,6 +6353,482 @@ func TestUserIDsInLeafCerts(t *testing.T) { requireSubjectUserIDAttr(t, resp.Data["certificate"].(string), "humanoid") } +// TestStandby_Operations test proper forwarding for PKI requests from a standby node to the +// active node within a cluster. +func TestStandby_Operations(t *testing.T) { + conf, opts := teststorage.ClusterSetup(&vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "pki": Factory, + }, + }, nil, teststorage.InmemBackendSetup) + cluster := vault.NewTestCluster(t, conf, opts) + cluster.Start() + defer cluster.Cleanup() + + testhelpers.WaitForActiveNodeAndStandbys(t, cluster) + standbyCores := testhelpers.DeriveStandbyCores(t, cluster) + require.Greater(t, len(standbyCores), 0, "Need at least one standby core.") + client := standbyCores[0].Client + + mountPKIEndpoint(t, client, "pki") + + _, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "key_type": "ec", + "common_name": "root-ca.com", + "ttl": "600h", + }) + require.NoError(t, err, "error setting up pki role: %v", err) + + _, err = client.Logical().Write("pki/roles/example", map[string]interface{}{ + "allowed_domains": "example.com", + "allow_subdomains": "true", + "no_store": "false", // make sure we store this cert + "ttl": "5h", + "key_type": "ec", + }) + require.NoError(t, err, "error setting up pki role: %v", err) + + resp, err := client.Logical().Write("pki/issue/example", map[string]interface{}{ + "common_name": "test.example.com", + }) + require.NoError(t, err, "error issuing certificate: %v", err) + require.NotNil(t, resp, "got nil response from issuing request") + serialOfCert := resp.Data["serial_number"].(string) + + resp, err = client.Logical().Write("pki/revoke", map[string]interface{}{ + "serial_number": serialOfCert, + }) + require.NoError(t, err, "error revoking certificate: %v", err) + require.NotNil(t, resp, "got nil response from revoke request") +} + +type pathAuthCheckerFunc func(t *testing.T, client *api.Client, path string, token string) + +func isPermDenied(err error) bool { + return err != nil && strings.Contains(err.Error(), "permission denied") +} + +func isUnsupportedPathOperation(err error) bool { + return err != nil && (strings.Contains(err.Error(), "unsupported path") || strings.Contains(err.Error(), "unsupported operation")) +} + +func isDeniedOp(err error) bool { + return isPermDenied(err) || isUnsupportedPathOperation(err) +} + +func pathShouldBeAuthed(t *testing.T, client *api.Client, path string, token string) { + client.SetToken("") + resp, err := client.Logical().ReadWithContext(ctx, path) + if err == nil || !isPermDenied(err) { + t.Fatalf("expected failure to read %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().ListWithContext(ctx, path) + if err == nil || !isPermDenied(err) { + t.Fatalf("expected failure to list %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) + if err == nil || !isPermDenied(err) { + t.Fatalf("expected failure to write %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().DeleteWithContext(ctx, path) + if err == nil || !isPermDenied(err) { + t.Fatalf("expected failure to delete %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) + if err == nil || !isPermDenied(err) { + t.Fatalf("expected failure to patch %v while unauthed: %v / %v", path, err, resp) + } +} + +func pathShouldBeUnauthedReadList(t *testing.T, client *api.Client, path string, token string) { + // Should be able to read both with and without a token. + client.SetToken("") + resp, err := client.Logical().ReadWithContext(ctx, path) + if err != nil && isPermDenied(err) { + // Read will sometimes return permission denied, when the handler + // does not support the given operation. Retry with the token. + client.SetToken(token) + resp2, err2 := client.Logical().ReadWithContext(ctx, path) + if err2 != nil && !isUnsupportedPathOperation(err2) { + t.Fatalf("unexpected failure to read %v while unauthed: %v / %v\nWhile authed: %v / %v", path, err, resp, err2, resp2) + } + client.SetToken("") + } + resp, err = client.Logical().ListWithContext(ctx, path) + if err != nil && isPermDenied(err) { + // List will sometimes return permission denied, when the handler + // does not support the given operation. Retry with the token. + client.SetToken(token) + resp2, err2 := client.Logical().ListWithContext(ctx, path) + if err2 != nil && !isUnsupportedPathOperation(err2) { + t.Fatalf("unexpected failure to list %v while unauthed: %v / %v\nWhile authed: %v / %v", path, err, resp, err2, resp2) + } + client.SetToken("") + } + + // These should all be denied. + resp, err = client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) + if err == nil || !isDeniedOp(err) { + if !strings.Contains(path, "ocsp") || !strings.Contains(err.Error(), "Code: 40") { + t.Fatalf("unexpected failure during write on read-only path %v while unauthed: %v / %v", path, err, resp) + } + } + resp, err = client.Logical().DeleteWithContext(ctx, path) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during delete on read-only path %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during patch on read-only path %v while unauthed: %v / %v", path, err, resp) + } + + // Retrying with token should allow read/list, but not modification still. + client.SetToken(token) + resp, err = client.Logical().ReadWithContext(ctx, path) + if err != nil && isPermDenied(err) { + t.Fatalf("unexpected failure to read %v while authed: %v / %v", path, err, resp) + } + resp, err = client.Logical().ListWithContext(ctx, path) + if err != nil && isPermDenied(err) { + t.Fatalf("unexpected failure to list %v while authed: %v / %v", path, err, resp) + } + + // Should all be denied. + resp, err = client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) + if err == nil || !isDeniedOp(err) { + if !strings.Contains(path, "ocsp") || !strings.Contains(err.Error(), "Code: 40") { + t.Fatalf("unexpected failure during write on read-only path %v while authed: %v / %v", path, err, resp) + } + } + resp, err = client.Logical().DeleteWithContext(ctx, path) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during delete on read-only path %v while authed: %v / %v", path, err, resp) + } + resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) + if err == nil || !isDeniedOp(err) { + t.Fatalf("unexpected failure during patch on read-only path %v while authed: %v / %v", path, err, resp) + } +} + +func pathShouldBeUnauthedWriteOnly(t *testing.T, client *api.Client, path string, token string) { + client.SetToken("") + resp, err := client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) + if err != nil && isPermDenied(err) { + t.Fatalf("unexpected failure to write %v while unauthed: %v / %v", path, err, resp) + } + + // These should all be denied. However, on OSS, we might end up with + // a regular 404, which looks like err == resp == nil; hence we only + // fail when there's a non-nil response and/or a non-nil err. + resp, err = client.Logical().ReadWithContext(ctx, path) + if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { + t.Fatalf("unexpected failure during read on write-only path %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().ListWithContext(ctx, path) + if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { + t.Fatalf("unexpected failure during list on write-only path %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().DeleteWithContext(ctx, path) + if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { + t.Fatalf("unexpected failure during delete on write-only path %v while unauthed: %v / %v", path, err, resp) + } + resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) + if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { + t.Fatalf("unexpected failure during patch on write-only path %v while unauthed: %v / %v", path, err, resp) + } + + // Retrying with token should allow writing, but nothing else. + client.SetToken(token) + resp, err = client.Logical().WriteWithContext(ctx, path, map[string]interface{}{}) + if err != nil && isPermDenied(err) { + t.Fatalf("unexpected failure to write %v while unauthed: %v / %v", path, err, resp) + } + + // These should all be denied. + resp, err = client.Logical().ReadWithContext(ctx, path) + if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { + t.Fatalf("unexpected failure during read on write-only path %v while authed: %v / %v", path, err, resp) + } + resp, err = client.Logical().ListWithContext(ctx, path) + if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { + if resp != nil || err != nil { + t.Fatalf("unexpected failure during list on write-only path %v while authed: %v / %v", path, err, resp) + } + } + resp, err = client.Logical().DeleteWithContext(ctx, path) + if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { + t.Fatalf("unexpected failure during delete on write-only path %v while authed: %v / %v", path, err, resp) + } + resp, err = client.Logical().JSONMergePatch(ctx, path, map[string]interface{}{}) + if (err == nil && resp != nil) || (err != nil && !isDeniedOp(err)) { + t.Fatalf("unexpected failure during patch on write-only path %v while authed: %v / %v", path, err, resp) + } +} + +type pathAuthChecker int + +const ( + shouldBeAuthed pathAuthChecker = iota + shouldBeUnauthedReadList + shouldBeUnauthedWriteOnly +) + +var pathAuthChckerMap = map[pathAuthChecker]pathAuthCheckerFunc{ + shouldBeAuthed: pathShouldBeAuthed, + shouldBeUnauthedReadList: pathShouldBeUnauthedReadList, + shouldBeUnauthedWriteOnly: pathShouldBeUnauthedWriteOnly, +} + +func TestProperAuthing(t *testing.T) { + t.Parallel() + ctx := context.Background() + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "pki": Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + client := cluster.Cores[0].Client + token := client.Token() + + // Mount PKI. + err := client.Sys().MountWithContext(ctx, "pki", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "60h", + }, + }) + if err != nil { + t.Fatal(err) + } + + // Setup basic configuration. + _, err = client.Logical().WriteWithContext(ctx, "pki/root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "myvault.com", + }) + if err != nil { + t.Fatal(err) + } + + _, err = client.Logical().WriteWithContext(ctx, "pki/roles/test", map[string]interface{}{ + "allow_localhost": true, + }) + if err != nil { + t.Fatal(err) + } + + resp, err := client.Logical().WriteWithContext(ctx, "pki/issue/test", map[string]interface{}{ + "common_name": "localhost", + }) + if err != nil || resp == nil { + t.Fatal(err) + } + serial := resp.Data["serial_number"].(string) + + paths := map[string]pathAuthChecker{ + "ca_chain": shouldBeUnauthedReadList, + "cert/ca_chain": shouldBeUnauthedReadList, + "ca": shouldBeUnauthedReadList, + "ca/pem": shouldBeUnauthedReadList, + "cert/" + serial: shouldBeUnauthedReadList, + "cert/" + serial + "/raw": shouldBeUnauthedReadList, + "cert/" + serial + "/raw/pem": shouldBeUnauthedReadList, + "cert/crl": shouldBeUnauthedReadList, + "cert/crl/raw": shouldBeUnauthedReadList, + "cert/crl/raw/pem": shouldBeUnauthedReadList, + "cert/delta-crl": shouldBeUnauthedReadList, + "cert/delta-crl/raw": shouldBeUnauthedReadList, + "cert/delta-crl/raw/pem": shouldBeUnauthedReadList, + "cert/unified-crl": shouldBeUnauthedReadList, + "cert/unified-crl/raw": shouldBeUnauthedReadList, + "cert/unified-crl/raw/pem": shouldBeUnauthedReadList, + "cert/unified-delta-crl": shouldBeUnauthedReadList, + "cert/unified-delta-crl/raw": shouldBeUnauthedReadList, + "cert/unified-delta-crl/raw/pem": shouldBeUnauthedReadList, + "certs": shouldBeAuthed, + "certs/revoked": shouldBeAuthed, + "certs/revocation-queue": shouldBeAuthed, + "certs/unified-revoked": shouldBeAuthed, + "config/auto-tidy": shouldBeAuthed, + "config/ca": shouldBeAuthed, + "config/cluster": shouldBeAuthed, + "config/crl": shouldBeAuthed, + "config/issuers": shouldBeAuthed, + "config/keys": shouldBeAuthed, + "config/urls": shouldBeAuthed, + "crl": shouldBeUnauthedReadList, + "crl/pem": shouldBeUnauthedReadList, + "crl/delta": shouldBeUnauthedReadList, + "crl/delta/pem": shouldBeUnauthedReadList, + "crl/rotate": shouldBeAuthed, + "crl/rotate-delta": shouldBeAuthed, + "intermediate/cross-sign": shouldBeAuthed, + "intermediate/generate/exported": shouldBeAuthed, + "intermediate/generate/internal": shouldBeAuthed, + "intermediate/generate/existing": shouldBeAuthed, + "intermediate/generate/kms": shouldBeAuthed, + "intermediate/set-signed": shouldBeAuthed, + "issue/test": shouldBeAuthed, + "issuer/default": shouldBeAuthed, + "issuer/default/der": shouldBeUnauthedReadList, + "issuer/default/json": shouldBeUnauthedReadList, + "issuer/default/pem": shouldBeUnauthedReadList, + "issuer/default/crl": shouldBeUnauthedReadList, + "issuer/default/crl/pem": shouldBeUnauthedReadList, + "issuer/default/crl/der": shouldBeUnauthedReadList, + "issuer/default/crl/delta": shouldBeUnauthedReadList, + "issuer/default/crl/delta/der": shouldBeUnauthedReadList, + "issuer/default/crl/delta/pem": shouldBeUnauthedReadList, + "issuer/default/unified-crl": shouldBeUnauthedReadList, + "issuer/default/unified-crl/pem": shouldBeUnauthedReadList, + "issuer/default/unified-crl/der": shouldBeUnauthedReadList, + "issuer/default/unified-crl/delta": shouldBeUnauthedReadList, + "issuer/default/unified-crl/delta/der": shouldBeUnauthedReadList, + "issuer/default/unified-crl/delta/pem": shouldBeUnauthedReadList, + "issuer/default/issue/test": shouldBeAuthed, + "issuer/default/resign-crls": shouldBeAuthed, + "issuer/default/revoke": shouldBeAuthed, + "issuer/default/sign-intermediate": shouldBeAuthed, + "issuer/default/sign-revocation-list": shouldBeAuthed, + "issuer/default/sign-self-issued": shouldBeAuthed, + "issuer/default/sign-verbatim": shouldBeAuthed, + "issuer/default/sign-verbatim/test": shouldBeAuthed, + "issuer/default/sign/test": shouldBeAuthed, + "issuers": shouldBeUnauthedReadList, + "issuers/generate/intermediate/exported": shouldBeAuthed, + "issuers/generate/intermediate/internal": shouldBeAuthed, + "issuers/generate/intermediate/existing": shouldBeAuthed, + "issuers/generate/intermediate/kms": shouldBeAuthed, + "issuers/generate/root/exported": shouldBeAuthed, + "issuers/generate/root/internal": shouldBeAuthed, + "issuers/generate/root/existing": shouldBeAuthed, + "issuers/generate/root/kms": shouldBeAuthed, + "issuers/import/cert": shouldBeAuthed, + "issuers/import/bundle": shouldBeAuthed, + "key/default": shouldBeAuthed, + "keys": shouldBeAuthed, + "keys/generate/internal": shouldBeAuthed, + "keys/generate/exported": shouldBeAuthed, + "keys/generate/kms": shouldBeAuthed, + "keys/import": shouldBeAuthed, + "ocsp": shouldBeUnauthedWriteOnly, + "ocsp/dGVzdAo=": shouldBeUnauthedReadList, + "revoke": shouldBeAuthed, + "revoke-with-key": shouldBeAuthed, + "roles/test": shouldBeAuthed, + "roles": shouldBeAuthed, + "root": shouldBeAuthed, + "root/generate/exported": shouldBeAuthed, + "root/generate/internal": shouldBeAuthed, + "root/generate/existing": shouldBeAuthed, + "root/generate/kms": shouldBeAuthed, + "root/replace": shouldBeAuthed, + "root/rotate/internal": shouldBeAuthed, + "root/rotate/exported": shouldBeAuthed, + "root/rotate/existing": shouldBeAuthed, + "root/rotate/kms": shouldBeAuthed, + "root/sign-intermediate": shouldBeAuthed, + "root/sign-self-issued": shouldBeAuthed, + "sign-verbatim": shouldBeAuthed, + "sign-verbatim/test": shouldBeAuthed, + "sign/test": shouldBeAuthed, + "tidy": shouldBeAuthed, + "tidy-cancel": shouldBeAuthed, + "tidy-status": shouldBeAuthed, + "unified-crl": shouldBeUnauthedReadList, + "unified-crl/pem": shouldBeUnauthedReadList, + "unified-crl/delta": shouldBeUnauthedReadList, + "unified-crl/delta/pem": shouldBeUnauthedReadList, + "unified-ocsp": shouldBeUnauthedWriteOnly, + "unified-ocsp/dGVzdAo=": shouldBeUnauthedReadList, + } + for path, checkerType := range paths { + checker := pathAuthChckerMap[checkerType] + checker(t, client, "pki/"+path, token) + } + + client.SetToken(token) + openAPIResp, err := client.Logical().ReadWithContext(ctx, "sys/internal/specs/openapi") + if err != nil { + t.Fatalf("failed to get openapi data: %v", err) + } + + validatedPath := false + for openapi_path, raw_data := range openAPIResp.Data["paths"].(map[string]interface{}) { + if !strings.HasPrefix(openapi_path, "/pki/") { + t.Logf("Skipping path: %v", openapi_path) + continue + } + + t.Logf("Validating path: %v", openapi_path) + validatedPath = true + // Substitute values in from our testing map. + raw_path := openapi_path[5:] + if strings.Contains(raw_path, "roles/") && strings.Contains(raw_path, "{name}") { + raw_path = strings.ReplaceAll(raw_path, "{name}", "test") + } + if strings.Contains(raw_path, "{role}") { + raw_path = strings.ReplaceAll(raw_path, "{role}", "test") + } + if strings.Contains(raw_path, "ocsp/") && strings.Contains(raw_path, "{req}") { + raw_path = strings.ReplaceAll(raw_path, "{req}", "dGVzdAo=") + } + if strings.Contains(raw_path, "{issuer_ref}") { + raw_path = strings.ReplaceAll(raw_path, "{issuer_ref}", "default") + } + if strings.Contains(raw_path, "{key_ref}") { + raw_path = strings.ReplaceAll(raw_path, "{key_ref}", "default") + } + if strings.Contains(raw_path, "{exported}") { + raw_path = strings.ReplaceAll(raw_path, "{exported}", "internal") + } + if strings.Contains(raw_path, "{serial}") { + raw_path = strings.ReplaceAll(raw_path, "{serial}", serial) + } + + handler, present := paths[raw_path] + if !present { + t.Fatalf("OpenAPI reports PKI mount contains %v->%v but was not tested to be authed or authed.", openapi_path, raw_path) + } + + openapi_data := raw_data.(map[string]interface{}) + hasList := false + rawGetData, hasGet := openapi_data["get"] + if hasGet { + getData := rawGetData.(map[string]interface{}) + getParams, paramsPresent := getData["parameters"].(map[string]interface{}) + if getParams != nil && paramsPresent { + if _, hasList = getParams["list"]; hasList { + // LIST is exclusive from GET on the same endpoint usually. + hasGet = false + } + } + } + _, hasPost := openapi_data["post"] + _, hasDelete := openapi_data["delete"] + + if handler == shouldBeUnauthedReadList { + if hasPost || hasDelete { + t.Fatalf("Unauthed read-only endpoints should not have POST/DELETE capabilities: %v->%v", openapi_path, raw_path) + } + } else if handler == shouldBeUnauthedWriteOnly { + if hasGet || hasList { + t.Fatalf("Unauthed write-only endpoints should not have GET/LIST capabilities: %v->%v", openapi_path, raw_path) + } + } + } + + if !validatedPath { + t.Fatalf("Expected to have validated at least one path.") + } +} + var ( initTest sync.Once rsaCAKey string diff --git a/builtin/logical/pki/crl_util.go b/builtin/logical/pki/crl_util.go index 7cd975f75f1c..c159dc429f50 100644 --- a/builtin/logical/pki/crl_util.go +++ b/builtin/logical/pki/crl_util.go @@ -153,7 +153,6 @@ func (cb *crlBuilder) reloadConfigIfRequired(sc *storageContext) error { } previousConfig := cb.config - // Set the default config if none was returned to us. if config != nil { cb.config = *config @@ -351,7 +350,7 @@ func (cb *crlBuilder) _getPresentDeltaWALForClearing(sc *storageContext, path st // Clearing of the delta WAL occurs after a new complete CRL has been built. walSerials, err := sc.Storage.List(sc.Context, path) if err != nil { - return nil, fmt.Errorf("error fetching list of delta WAL certificates to clear: %s", err) + return nil, fmt.Errorf("error fetching list of delta WAL certificates to clear: %w", err) } // We _should_ remove the special WAL entries here, but we don't really @@ -365,7 +364,28 @@ func (cb *crlBuilder) getPresentLocalDeltaWALForClearing(sc *storageContext) ([] } func (cb *crlBuilder) getPresentUnifiedDeltaWALForClearing(sc *storageContext) ([]string, error) { - return cb._getPresentDeltaWALForClearing(sc, unifiedDeltaWALPath) + walClusters, err := sc.Storage.List(sc.Context, unifiedDeltaWALPrefix) + if err != nil { + return nil, fmt.Errorf("error fetching list of clusters with delta WAL entries: %w", err) + } + + var allPaths []string + for index, cluster := range walClusters { + prefix := unifiedDeltaWALPrefix + cluster + clusterPaths, err := cb._getPresentDeltaWALForClearing(sc, prefix) + if err != nil { + return nil, fmt.Errorf("error fetching delta WAL entries for cluster (%v / %v): %w", index, cluster, err) + } + + // Here, we don't want to include the unifiedDeltaWALPrefix because + // clearUnifiedDeltaWAL handles that for us. Instead, just include + // the cluster identifier. + for _, clusterPath := range clusterPaths { + allPaths = append(allPaths, cluster+clusterPath) + } + } + + return allPaths, nil } func (cb *crlBuilder) _clearDeltaWAL(sc *storageContext, walSerials []string, path string) error { @@ -377,7 +397,7 @@ func (cb *crlBuilder) _clearDeltaWAL(sc *storageContext, walSerials []string, pa } if err := sc.Storage.Delete(sc.Context, path+serial); err != nil { - return fmt.Errorf("error clearing delta WAL certificate: %s", err) + return fmt.Errorf("error clearing delta WAL certificate: %w", err) } } @@ -512,49 +532,70 @@ func (cb *crlBuilder) _shouldRebuildUnifiedCRLs(sc *storageContext, override boo return false, nil } - // Fetch two storage entries to see if we actually need to do this - // rebuild, given we're within the window. - lastWALEntry, err := sc.Storage.Get(sc.Context, unifiedDeltaWALLastRevokedSerial) - if err != nil || !override && (lastWALEntry == nil || lastWALEntry.Value == nil) { - // If this entry does not exist, we don't need to rebuild the - // delta WAL due to the expiration assumption above. There must - // not have been any new revocations. Since err should be nil - // in this case, we can safely return it. - return false, err + // If we're overriding whether we should build Delta CRLs, always return + // true, even if storage errors might've happen. + if override { + return true, nil } - lastBuildEntry, err := sc.Storage.Get(sc.Context, unifiedDeltaWALLastBuildSerial) + // Fetch two storage entries to see if we actually need to do this + // rebuild, given we're within the window. We need to fetch these + // two entries per cluster. + clusters, err := sc.Storage.List(sc.Context, unifiedDeltaWALPrefix) if err != nil { - return false, err + return false, fmt.Errorf("failed to get the list of clusters having written Delta WALs: %w", err) } - if !override && lastBuildEntry != nil && lastBuildEntry.Value != nil { - // If the last build entry doesn't exist, we still want to build a - // new delta WAL, since this could be our very first time doing so. - // + // If any cluster tells us to rebuild, we should rebuild. + shouldRebuild := false + for index, cluster := range clusters { + prefix := unifiedDeltaWALPrefix + cluster + clusterUnifiedLastRevokedWALEntry := prefix + deltaWALLastRevokedSerialName + clusterUnifiedLastBuiltWALEntry := prefix + deltaWALLastBuildSerialName + + lastWALEntry, err := sc.Storage.Get(sc.Context, clusterUnifiedLastRevokedWALEntry) + if err != nil { + return false, fmt.Errorf("failed fetching last revoked WAL entry for cluster (%v / %v): %w", index, cluster, err) + } + + if lastWALEntry == nil || lastWALEntry.Value == nil { + continue + } + + lastBuildEntry, err := sc.Storage.Get(sc.Context, clusterUnifiedLastBuiltWALEntry) + if err != nil { + return false, fmt.Errorf("failed fetching last built CRL WAL entry for cluster (%v / %v): %w", index, cluster, err) + } + + if lastBuildEntry == nil || lastBuildEntry.Value == nil { + // If the last build entry doesn't exist, we still want to build a + // new delta WAL, since this could be our very first time doing so. + shouldRebuild = true + break + } + // Otherwise, here, now that we know it exists, we want to check this // value against the other value. Since we previously guarded the WAL // entry being non-empty, we're good to decode everything within this // guard. var walInfo lastWALInfo if err := lastWALEntry.DecodeJSON(&walInfo); err != nil { - return false, err + return false, fmt.Errorf("failed decoding last revoked WAL entry for cluster (%v / %v): %w", index, cluster, err) } var deltaInfo lastDeltaInfo if err := lastBuildEntry.DecodeJSON(&deltaInfo); err != nil { - return false, err + return false, fmt.Errorf("failed decoding last built CRL WAL entry for cluster (%v / %v): %w", index, cluster, err) } - // Here, everything decoded properly and we know that no new certs - // have been revoked since we built this last delta CRL. We can exit - // without rebuilding then. - if walInfo.Serial == deltaInfo.Serial { - return false, nil + if walInfo.Serial != deltaInfo.Serial { + shouldRebuild = true + break } } - return true, nil + // No errors occurred, so return the result. + return shouldRebuild, nil } func (cb *crlBuilder) rebuildDeltaCRLs(sc *storageContext, forceNew bool) error { @@ -655,7 +696,7 @@ func (cb *crlBuilder) processRevocationQueue(sc *storageContext) error { (!sc.Backend.System().LocalMount() && sc.Backend.System().ReplicationState().HasState(consts.ReplicationPerformanceSecondary)) if err := cb.maybeGatherQueueForFirstProcess(sc, isNotPerfPrimary); err != nil { - return fmt.Errorf("failed to gather first queue: %v", err) + return fmt.Errorf("failed to gather first queue: %w", err) } revQueue := cb.revQueue.Iterate() @@ -970,18 +1011,30 @@ func revokeCert(sc *storageContext, config *crlConfig, cert *x509.Certificate) ( revEntry, err := logical.StorageEntryJSON(revokedPath+hyphenSerial, revInfo) if err != nil { - return nil, fmt.Errorf("error creating revocation entry") + return nil, fmt.Errorf("error creating revocation entry: %w", err) } certsCounted := sc.Backend.certsCounted.Load() err = sc.Storage.Put(sc.Context, revEntry) if err != nil { - return nil, fmt.Errorf("error saving revoked certificate to new location") + return nil, fmt.Errorf("error saving revoked certificate to new location: %w", err) } sc.Backend.incrementTotalRevokedCertificatesCount(certsCounted, revEntry.Key) + // From here on out, the certificate has been revoked locally. Any other + // persistence issues might still err, but any other failure messages + // should be added as warnings to the revocation. + resp := &logical.Response{ + Data: map[string]interface{}{ + "revocation_time": revInfo.RevocationTime, + "revocation_time_rfc3339": revInfo.RevocationTimeUTC.Format(time.RFC3339Nano), + "state": "revoked", + }, + } + // If this flag is enabled after the fact, existing local entries will be published to // the unified storage space through a periodic function. + failedWritingUnifiedCRL := false if config.UnifiedCRL { entry := &unifiedRevocationEntry{ SerialNumber: colonSerial, @@ -994,9 +1047,12 @@ func revokeCert(sc *storageContext, config *crlConfig, cert *x509.Certificate) ( if ignoreErr != nil { // Just log the error if we fail to write across clusters, a separate background // thread will reattempt it later on as we have the local write done. - sc.Backend.Logger().Debug("Failed to write unified revocation entry, will re-attempt later", + sc.Backend.Logger().Error("Failed to write unified revocation entry, will re-attempt later", "serial_number", colonSerial, "error", ignoreErr) sc.Backend.unifiedTransferStatus.forceRun() + + resp.AddWarning(fmt.Sprintf("Failed to write unified revocation entry, will re-attempt later: %v", err)) + failedWritingUnifiedCRL = true } } @@ -1015,26 +1071,20 @@ func revokeCert(sc *storageContext, config *crlConfig, cert *x509.Certificate) ( } } } else if config.EnableDelta { - if err := writeRevocationDeltaWALs(sc, config, hyphenSerial, colonSerial); err != nil { + if err := writeRevocationDeltaWALs(sc, config, resp, failedWritingUnifiedCRL, hyphenSerial, colonSerial); err != nil { return nil, fmt.Errorf("failed to write WAL entries for Delta CRLs: %w", err) } } - return &logical.Response{ - Data: map[string]interface{}{ - "revocation_time": revInfo.RevocationTime, - "revocation_time_rfc3339": revInfo.RevocationTimeUTC.Format(time.RFC3339Nano), - "state": "revoked", - }, - }, nil + return resp, nil } -func writeRevocationDeltaWALs(sc *storageContext, config *crlConfig, hyphenSerial string, colonSerial string) error { +func writeRevocationDeltaWALs(sc *storageContext, config *crlConfig, resp *logical.Response, failedWritingUnifiedCRL bool, hyphenSerial string, colonSerial string) error { if err := writeSpecificRevocationDeltaWALs(sc, hyphenSerial, colonSerial, localDeltaWALPath); err != nil { return fmt.Errorf("failed to write local delta WAL entry: %w", err) } - if config.UnifiedCRL { + if config.UnifiedCRL && !failedWritingUnifiedCRL { // We only need to write cross-cluster unified Delta WAL entries when // it is enabled; in particular, because we rebuild CRLs when enabling // this flag, any revocations that happened prior to enabling unified @@ -1044,9 +1094,21 @@ func writeRevocationDeltaWALs(sc *storageContext, config *crlConfig, hyphenSeria // listing for the unified CRL rebuild, this revocation will not // appear on either the main or the next delta CRL, but will need to // wait for a subsequent complete CRL rebuild). - if err := writeSpecificRevocationDeltaWALs(sc, hyphenSerial, colonSerial, unifiedDeltaWALPath); err != nil { - return fmt.Errorf("failed to write cross-cluster delta WAL entry: %w", err) + // + // Lastly, we don't attempt this if the unified CRL entry failed to + // write, as we need that entry before the delta WAL entry will make + // sense. + if ignoredErr := writeSpecificRevocationDeltaWALs(sc, hyphenSerial, colonSerial, unifiedDeltaWALPath); ignoredErr != nil { + // Just log the error if we fail to write across clusters, a separate background + // thread will reattempt it later on as we have the local write done. + sc.Backend.Logger().Error("Failed to write cross-cluster delta WAL entry, will re-attempt later", + "serial_number", colonSerial, "error", ignoredErr) + sc.Backend.unifiedTransferStatus.forceRun() + + resp.AddWarning(fmt.Sprintf("Failed to write cross-cluster delta WAL entry, will re-attempt later: %v", ignoredErr)) } + } else if failedWritingUnifiedCRL { + resp.AddWarning("Skipping cross-cluster delta WAL entry as cross-cluster revocation failed to write; will re-attempt later.") } return nil @@ -1082,7 +1144,7 @@ func writeSpecificRevocationDeltaWALs(sc *storageContext, hyphenSerial string, c } if err = sc.Storage.Put(sc.Context, walEntry); err != nil { - return fmt.Errorf("error saving delta CRL WAL entry") + return fmt.Errorf("error saving delta CRL WAL entry: %w", err) } // In order for periodic delta rebuild to be mildly efficient, we @@ -1094,7 +1156,7 @@ func writeSpecificRevocationDeltaWALs(sc *storageContext, hyphenSerial string, c return fmt.Errorf("unable to create last delta CRL WAL entry") } if err = sc.Storage.Put(sc.Context, lastWALEntry); err != nil { - return fmt.Errorf("error saving last delta CRL WAL entry") + return fmt.Errorf("error saving last delta CRL WAL entry: %w", err) } return nil @@ -1269,7 +1331,7 @@ func buildAnyCRLs(sc *storageContext, forceNew bool, isDelta bool) error { } func getLastWALSerial(sc *storageContext, path string) (string, error) { - lastWALEntry, err := sc.Storage.Get(sc.Context, localDeltaWALLastRevokedSerial) + lastWALEntry, err := sc.Storage.Get(sc.Context, path) if err != nil { return "", err } @@ -1432,11 +1494,23 @@ func buildAnyUnifiedCRLs( // (and potentially more) in it; when we're done writing the delta CRL, // we'll write this serial as a sentinel to see if we need to rebuild it // in the future. - var lastDeltaSerial string + // + // We need to do this per-cluster. + lastDeltaSerial := map[string]string{} if isDelta { - lastDeltaSerial, err = getLastWALSerial(sc, unifiedDeltaWALLastRevokedSerial) + clusters, err := sc.Storage.List(sc.Context, unifiedDeltaWALPrefix) if err != nil { - return nil, err + return nil, fmt.Errorf("error listing clusters for unified delta WAL building: %w", err) + } + + for index, cluster := range clusters { + path := unifiedDeltaWALPrefix + cluster + deltaWALLastRevokedSerialName + serial, err := getLastWALSerial(sc, path) + if err != nil { + return nil, fmt.Errorf("error getting last written Delta WAL serial for cluster (%v / %v): %w", index, cluster, err) + } + + lastDeltaSerial[cluster] = serial } } @@ -1507,12 +1581,20 @@ func buildAnyUnifiedCRLs( // for a while. sc.Backend.crlBuilder.lastDeltaRebuildCheck = time.Now() - if len(lastDeltaSerial) > 0 { - // When we have a last delta serial, write out the relevant info - // so we can skip extra CRL rebuilds. - deltaInfo := lastDeltaInfo{Serial: lastDeltaSerial} + // Persist all of our known last revoked serial numbers here, as the + // last seen serial during build. This will allow us to detect if any + // new revocations have occurred, forcing us to rebuild the delta CRL. + for cluster, serial := range lastDeltaSerial { + if len(serial) == 0 { + continue + } - lastDeltaBuildEntry, err := logical.StorageEntryJSON(unifiedDeltaWALLastBuildSerial, deltaInfo) + // Make sure to use the cluster-specific path. Since we're on the + // active node of the primary cluster, we own this entry and can + // safely write it. + path := unifiedDeltaWALPrefix + cluster + deltaWALLastBuildSerialName + deltaInfo := lastDeltaInfo{Serial: serial} + lastDeltaBuildEntry, err := logical.StorageEntryJSON(path, deltaInfo) if err != nil { return nil, fmt.Errorf("error creating last delta CRL rebuild serial entry: %w", err) } @@ -1841,12 +1923,12 @@ func getLocalRevokedCertEntries(sc *storageContext, issuerIDCertMap map[issuerID // we should update the entry to make future CRL builds faster. revokedEntry, err = logical.StorageEntryJSON(revokedPath+serial, revInfo) if err != nil { - return nil, nil, fmt.Errorf("error creating revocation entry for existing cert: %v", serial) + return nil, nil, fmt.Errorf("error creating revocation entry for existing cert: %v: %w", serial, err) } err = sc.Storage.Put(sc.Context, revokedEntry) if err != nil { - return nil, nil, fmt.Errorf("error updating revoked certificate at existing location: %v", serial) + return nil, nil, fmt.Errorf("error updating revoked certificate at existing location: %v: %w", serial, err) } } } diff --git a/builtin/logical/pki/path_fetch_issuers.go b/builtin/logical/pki/path_fetch_issuers.go index 591bd0e5ddc3..9040d45fdd0a 100644 --- a/builtin/logical/pki/path_fetch_issuers.go +++ b/builtin/logical/pki/path_fetch_issuers.go @@ -75,11 +75,16 @@ their identifier and their name (if set). ) func pathGetIssuer(b *backend) *framework.Path { - pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "(/der|/pem|/json)?" + pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "$" + return buildPathIssuer(b, pattern) +} + +func pathGetUnauthedIssuer(b *backend) *framework.Path { + pattern := "issuer/" + framework.GenericNameRegex(issuerRefParam) + "/(json|der|pem)$" return buildPathGetIssuer(b, pattern) } -func buildPathGetIssuer(b *backend, pattern string) *framework.Path { +func buildPathIssuer(b *backend, pattern string) *framework.Path { fields := map[string]*framework.FieldSchema{} fields = addIssuerRefNameFields(fields) @@ -180,6 +185,26 @@ to be set on all PR secondary clusters.`, } } +func buildPathGetIssuer(b *backend, pattern string) *framework.Path { + fields := map[string]*framework.FieldSchema{} + fields = addIssuerRefField(fields) + + return &framework.Path{ + // Returns a JSON entry. + Pattern: pattern, + Fields: fields, + + Operations: map[logical.Operation]framework.OperationHandler{ + logical.ReadOperation: &framework.PathOperation{ + Callback: b.pathGetIssuer, + }, + }, + + HelpSynopsis: pathGetIssuerHelpSyn, + HelpDescription: pathGetIssuerHelpDesc, + } +} + func (b *backend) pathGetIssuer(ctx context.Context, req *logical.Request, data *framework.FieldData) (*logical.Response, error) { // Handle raw issuers first. if strings.HasSuffix(req.Path, "/der") || strings.HasSuffix(req.Path, "/pem") || strings.HasSuffix(req.Path, "/json") { @@ -576,7 +601,7 @@ func (b *backend) pathPatchIssuer(ctx context.Context, req *logical.Request, dat } // Leaf Not After Changes - rawLeafBehaviorData, ok := data.GetOk("leaf_not_after_behaivor") + rawLeafBehaviorData, ok := data.GetOk("leaf_not_after_behavior") if ok { rawLeafBehavior := rawLeafBehaviorData.(string) var newLeafBehavior certutil.NotAfterBehavior diff --git a/builtin/logical/pki/path_ocsp.go b/builtin/logical/pki/path_ocsp.go index 24ea933ebca6..f62f1d808a0f 100644 --- a/builtin/logical/pki/path_ocsp.go +++ b/builtin/logical/pki/path_ocsp.go @@ -462,13 +462,19 @@ func genResponse(cfg *crlConfig, caBundle *certutil.ParsedCertBundle, info *ocsp revSigAlg = x509.SHA512WithRSA } + // Due to a bug in Go's ocsp.ParseResponse(...), we do not provision + // Certificate any more on the response to help Go based OCSP clients. + // This was technically unnecessary, as the Certificate given here + // both signed the OCSP response and issued the leaf cert, and so + // should already be trusted by the client. + // + // See also: https://github.com/golang/go/issues/59641 template := ocsp.Response{ IssuerHash: reqHash, Status: info.ocspStatus, SerialNumber: info.serialNumber, ThisUpdate: curTime, NextUpdate: curTime.Add(duration), - Certificate: caBundle.Certificate, ExtraExtensions: []pkix.Extension{}, SignatureAlgorithm: revSigAlg, } diff --git a/builtin/logical/pki/path_ocsp_test.go b/builtin/logical/pki/path_ocsp_test.go index 23209b10fac4..deda682d131f 100644 --- a/builtin/logical/pki/path_ocsp_test.go +++ b/builtin/logical/pki/path_ocsp_test.go @@ -359,7 +359,6 @@ func TestOcsp_MultipleMatchingIssuersOneWithoutSigningUsage(t *testing.T) { require.Equal(t, crypto.SHA1, ocspResp.IssuerHash) require.Equal(t, 0, ocspResp.RevocationReason) require.Equal(t, testEnv.leafCertIssuer1.SerialNumber, ocspResp.SerialNumber) - require.Equal(t, rotatedCert, ocspResp.Certificate) requireOcspSignatureAlgoForKey(t, rotatedCert.SignatureAlgorithm, ocspResp.SignatureAlgorithm) requireOcspResponseSignedBy(t, ocspResp, rotatedCert) @@ -436,7 +435,6 @@ func TestOcsp_HigherLevel(t *testing.T) { require.NoError(t, err, "parsing ocsp get response") require.Equal(t, ocsp.Revoked, ocspResp.Status) - require.Equal(t, issuerCert, ocspResp.Certificate) require.Equal(t, certToRevoke.SerialNumber, ocspResp.SerialNumber) // Test OCSP Get request for ocsp @@ -457,7 +455,6 @@ func TestOcsp_HigherLevel(t *testing.T) { require.NoError(t, err, "parsing ocsp get response") require.Equal(t, ocsp.Revoked, ocspResp.Status) - require.Equal(t, issuerCert, ocspResp.Certificate) require.Equal(t, certToRevoke.SerialNumber, ocspResp.SerialNumber) } @@ -521,7 +518,6 @@ func runOcspRequestTest(t *testing.T, requestType string, caKeyType string, caKe require.Equal(t, ocsp.Good, ocspResp.Status) require.Equal(t, requestHash, ocspResp.IssuerHash) - require.Equal(t, testEnv.issuer1, ocspResp.Certificate) require.Equal(t, 0, ocspResp.RevocationReason) require.Equal(t, testEnv.leafCertIssuer1.SerialNumber, ocspResp.SerialNumber) @@ -546,7 +542,6 @@ func runOcspRequestTest(t *testing.T, requestType string, caKeyType string, caKe require.Equal(t, ocsp.Revoked, ocspResp.Status) require.Equal(t, requestHash, ocspResp.IssuerHash) - require.Equal(t, testEnv.issuer1, ocspResp.Certificate) require.Equal(t, 0, ocspResp.RevocationReason) require.Equal(t, testEnv.leafCertIssuer1.SerialNumber, ocspResp.SerialNumber) @@ -566,7 +561,6 @@ func runOcspRequestTest(t *testing.T, requestType string, caKeyType string, caKe require.Equal(t, ocsp.Good, ocspResp.Status) require.Equal(t, requestHash, ocspResp.IssuerHash) - require.Equal(t, testEnv.issuer2, ocspResp.Certificate) require.Equal(t, 0, ocspResp.RevocationReason) require.Equal(t, testEnv.leafCertIssuer2.SerialNumber, ocspResp.SerialNumber) diff --git a/builtin/logical/pki/path_revoke.go b/builtin/logical/pki/path_revoke.go index d488450aaa18..f35daefb50fc 100644 --- a/builtin/logical/pki/path_revoke.go +++ b/builtin/logical/pki/path_revoke.go @@ -12,6 +12,8 @@ import ( "strings" "time" + "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/framework" "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/hashicorp/vault/sdk/helper/errutil" @@ -490,6 +492,13 @@ func (b *backend) pathRevokeWrite(ctx context.Context, req *logical.Request, dat } } + // Assumption: this check is cheap. Call this twice, in the cert-import + // case, to allow cert verification to get rejected on the standby node, + // but we still need it to protect the serial number case. + if b.System().ReplicationState().HasState(consts.ReplicationPerformanceStandby) { + return nil, logical.ErrReadOnly + } + b.revokeStorageLock.Lock() defer b.revokeStorageLock.Unlock() diff --git a/builtin/logical/pki/periodic.go b/builtin/logical/pki/periodic.go index 606b3ae748dd..5b824efef8da 100644 --- a/builtin/logical/pki/periodic.go +++ b/builtin/logical/pki/periodic.go @@ -8,6 +8,7 @@ import ( "time" "github.com/hashicorp/vault/sdk/helper/consts" + "github.com/hashicorp/vault/sdk/logical" ) const ( @@ -87,7 +88,16 @@ func runUnifiedTransfer(sc *storageContext) { if err != nil { b.Logger().Error("an error occurred running unified transfer", "error", err.Error()) status.forceRerun.Store(true) + } else { + if config.EnableDelta { + err = doUnifiedTransferMissingDeltaWALSerials(sc, clusterId) + if err != nil { + b.Logger().Error("an error occurred running unified transfer", "error", err.Error()) + status.forceRerun.Store(true) + } + } } + status.lastRun = time.Now() } @@ -119,7 +129,7 @@ func doUnifiedTransferMissingLocalSerials(sc *storageContext, clusterId string) err := readRevocationEntryAndTransfer(sc, serialNum) if err != nil { errCount++ - sc.Backend.Logger().Debug("Failed transferring local revocation to unified space", + sc.Backend.Logger().Error("Failed transferring local revocation to unified space", "serial", serialNum, "error", err) } } @@ -132,6 +142,152 @@ func doUnifiedTransferMissingLocalSerials(sc *storageContext, clusterId string) return nil } +func doUnifiedTransferMissingDeltaWALSerials(sc *storageContext, clusterId string) error { + // We need to do a similar thing for Delta WAL entry certificates. + // When the delta WAL failed to write for one or more entries, + // we'll need to replicate these up to the primary cluster. When it + // has performed a new delta WAL build, it will empty storage and + // update to a last written WAL entry that exceeds what we've seen + // locally. + thisUnifiedWALEntryPath := unifiedDeltaWALPath + deltaWALLastRevokedSerialName + lastUnifiedWALEntry, err := getLastWALSerial(sc, thisUnifiedWALEntryPath) + if err != nil { + return fmt.Errorf("failed to fetch last cross-cluster unified revoked delta WAL serial number: %w", err) + } + + lastLocalWALEntry, err := getLastWALSerial(sc, localDeltaWALLastRevokedSerial) + if err != nil { + return fmt.Errorf("failed to fetch last locally revoked delta WAL serial number: %w", err) + } + + // We now need to transfer all the entries and then write the last WAL + // entry at the end. Start by listing all certificates; any missing + // certificates will be copied over and then the WAL entry will be + // updated once. + // + // We do not delete entries either locally or remotely, as either + // cluster could've rebuilt delta CRLs with out-of-sync information, + // removing some entries (and, we cannot differentiate between these + // two cases). On next full CRL rebuild (on either cluster), the state + // should get synchronized, and future delta CRLs after this function + // returns without issue will see the remaining entries. + // + // Lastly, we need to ensure we don't accidentally write any unified + // delta WAL entries that aren't present in the main cross-cluster + // revoked storage location. This would mean the above function failed + // to copy them for some reason, despite them presumably appearing + // locally. + _unifiedWALEntries, err := sc.Storage.List(sc.Context, unifiedDeltaWALPath) + if err != nil { + return fmt.Errorf("failed to list cross-cluster unified delta WAL storage: %w", err) + } + unifiedWALEntries := sliceToMapKey(_unifiedWALEntries) + + _unifiedRevokedSerials, err := listClusterSpecificUnifiedRevokedCerts(sc, clusterId) + if err != nil { + return fmt.Errorf("failed to list cross-cluster revoked certificates: %w", err) + } + unifiedRevokedSerials := sliceToMapKey(_unifiedRevokedSerials) + + localWALEntries, err := sc.Storage.List(sc.Context, localDeltaWALPath) + if err != nil { + return fmt.Errorf("failed to list local delta WAL storage: %w", err) + } + + if lastUnifiedWALEntry == lastLocalWALEntry && len(_unifiedWALEntries) == len(localWALEntries) { + // Writing the last revoked WAL entry is the last thing that we do. + // Because these entries match (across clusters) and we have the same + // number of entries, assume we don't have anything to sync and exit + // early. + // + // We need both checks as, in the event of PBPWF failing and then + // returning while more revocations are happening, we could have + // been schedule to run, but then skip running (if only the first + // condition was checked) because a later revocation succeeded + // in writing a unified WAL entry, before we started replicating + // the rest back up. + // + // The downside of this approach is that, if the main cluster + // does a full rebuild in the mean time, we could re-sync more + // entries back up to the primary cluster that are already + // included in the complete CRL. Users can manually rebuild the + // full CRL (clearing these duplicate delta CRL entries) if this + // affects them. + return nil + } + + errCount := 0 + for index, serial := range localWALEntries { + if index%25 == 0 { + config, _ := sc.Backend.crlBuilder.getConfigWithUpdate(sc) + if config != nil && (!config.UnifiedCRL || !config.EnableDelta) { + return errors.New("unified or delta CRLs have been disabled after we started, stopping") + } + } + + if serial == deltaWALLastBuildSerialName || serial == deltaWALLastRevokedSerialName { + // Skip our special serial numbers. + continue + } + + _, isAlreadyPresent := unifiedWALEntries[serial] + if isAlreadyPresent { + // Serial exists on both local and unified cluster. We're + // presuming we don't need to read and re-write these entries + // and that only missing entries need to be updated. + continue + } + + _, isRevokedCopied := unifiedRevokedSerials[serial] + if !isRevokedCopied { + // We need to wait here to copy over. + errCount += 1 + sc.Backend.Logger().Debug("Delta WAL exists locally, but corresponding cross-cluster full revocation entry is missing; skipping", "serial", serial) + continue + } + + // All good: read the local entry and write to the remote variant. + localPath := localDeltaWALPath + serial + unifiedPath := unifiedDeltaWALPath + serial + + entry, err := sc.Storage.Get(sc.Context, localPath) + if err != nil || entry == nil { + errCount += 1 + sc.Backend.Logger().Error("Failed reading local delta WAL entry to copy to cross-cluster", "serial", serial, "err", err) + continue + } + + entry.Key = unifiedPath + err = sc.Storage.Put(sc.Context, entry) + if err != nil { + errCount += 1 + sc.Backend.Logger().Error("Failed sync local delta WAL entry to cross-cluster unified delta WAL location", "serial", serial, "err", err) + continue + } + } + + if errCount > 0 { + // See note above about why we don't fail here. + sc.Backend.Logger().Warn(fmt.Sprintf("Failed transfering %d local delta WAL serials to unified storage", errCount)) + return nil + } + + // Everything worked. Here, we can write over the delta WAL last revoked + // value. By using the earlier value, even if new revocations have + // occurred, we ensure any further missing entries can be handled in the + // next round. + lastRevSerial := lastWALInfo{Serial: lastLocalWALEntry} + lastWALEntry, err := logical.StorageEntryJSON(thisUnifiedWALEntryPath, lastRevSerial) + if err != nil { + return fmt.Errorf("unable to create cross-cluster unified last delta CRL WAL entry: %w", err) + } + if err = sc.Storage.Put(sc.Context, lastWALEntry); err != nil { + return fmt.Errorf("error saving cross-cluster unified last delta CRL WAL entry: %w", err) + } + + return nil +} + func readRevocationEntryAndTransfer(sc *storageContext, serial string) error { hyphenSerial := normalizeSerial(serial) revInfo, err := sc.fetchRevocationInfo(hyphenSerial) diff --git a/builtin/logical/pki/storage.go b/builtin/logical/pki/storage.go index f098f5835864..45fbf017fba0 100644 --- a/builtin/logical/pki/storage.go +++ b/builtin/logical/pki/storage.go @@ -706,7 +706,7 @@ func (sc *storageContext) upgradeIssuerIfRequired(issuer *issuerEntry) *issuerEn // Remove CRL signing usage if it exists on the issuer but doesn't // exist in the KU of the x509 certificate. if hadCRL && (cert.KeyUsage&x509.KeyUsageCRLSign) == 0 { - issuer.Usage.ToggleUsage(OCSPSigningUsage) + issuer.Usage.ToggleUsage(CRLSigningUsage) } // Handle our new OCSPSigning usage flag for earlier versions. If we diff --git a/changelog/17848.txt b/changelog/17848.txt deleted file mode 100644 index 40579e4e184c..000000000000 --- a/changelog/17848.txt +++ /dev/null @@ -1,3 +0,0 @@ -```release-note:improvement -autopilot: Update version to v.0.2.0 to add better support for respecting min quorum -``` \ No newline at end of file diff --git a/changelog/18499.txt b/changelog/18499.txt new file mode 100644 index 000000000000..b329ed0db08b --- /dev/null +++ b/changelog/18499.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli/pki: Added "Reissue" command which allows extracting fields from an existing certificate to create a new certificate. +``` \ No newline at end of file diff --git a/changelog/19018.txt b/changelog/19018.txt index 45ea82785e6c..bd79dbd15911 100644 --- a/changelog/19018.txt +++ b/changelog/19018.txt @@ -1,5 +1,5 @@ -```release-note:improvement -secrets/gcp: added support for impersonated accounts +```release-note:feature +**GCP Secrets Impersonated Account Support**: Add support for GCP service account impersonation, allowing callers to generate a GCP access token without requiring Vault to store or retrieve a GCP service account key for each role. ``` ```release-note:bug diff --git a/changelog/19032.txt b/changelog/19032.txt new file mode 100644 index 000000000000..a474c22ce6b7 --- /dev/null +++ b/changelog/19032.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/ldap: Add max_page_size configurable to LDAP configuration +``` diff --git a/changelog/19116.txt b/changelog/19116.txt new file mode 100644 index 000000000000..5dfcd9ecfada --- /dev/null +++ b/changelog/19116.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: Allows license-banners to be dismissed. Saves preferences in localStorage. +``` \ No newline at end of file diff --git a/changelog/19145.txt b/changelog/19145.txt new file mode 100644 index 000000000000..9cca8e85d634 --- /dev/null +++ b/changelog/19145.txt @@ -0,0 +1,4 @@ +```release-note:improvement +secrets/kv: Emit events on write if events system enabled +``` + diff --git a/changelog/19186.txt b/changelog/19186.txt new file mode 100644 index 000000000000..cb3b59a9f92c --- /dev/null +++ b/changelog/19186.txt @@ -0,0 +1,3 @@ +```release-note:bug +auth/approle: Add nil check for the secret ID entry when deleting via secret id accessor preventing cross role secret id deletion +``` diff --git a/changelog/19190.txt b/changelog/19190.txt new file mode 100644 index 000000000000..480006b1ebc8 --- /dev/null +++ b/changelog/19190.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: show Get credentials button for static roles detail page when a user has the proper permissions. +``` diff --git a/changelog/19194.txt b/changelog/19194.txt new file mode 100644 index 000000000000..b2a5ff383f12 --- /dev/null +++ b/changelog/19194.txt @@ -0,0 +1,4 @@ +```release-note:feature +**Event System (Alpha)**: Vault has a new opt-in experimental event system. Not yet suitable for production use. Events are currently only generated on writes to the KV secrets engine, but external plugins can also be updated to start generating events. +``` + diff --git a/changelog/19196.txt b/changelog/19196.txt new file mode 100644 index 000000000000..aab2638ceac7 --- /dev/null +++ b/changelog/19196.txt @@ -0,0 +1,5 @@ +```release-note:feature +**PKI Cross-Cluster Revocations**: Revocation information can now be +synchronized across primary and performance replica clusters offering +a unified CRL/OCSP view of revocations across cluster boundaries. +``` diff --git a/changelog/19216.txt b/changelog/19216.txt new file mode 100644 index 000000000000..e03e866e08b4 --- /dev/null +++ b/changelog/19216.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: adds allowed_response_headers as param for secret engine mount config +``` diff --git a/changelog/19220.txt b/changelog/19220.txt new file mode 100644 index 000000000000..cbfe7e5a9336 --- /dev/null +++ b/changelog/19220.txt @@ -0,0 +1,3 @@ +```release-note:improvement +ui: remove wizard +``` diff --git a/changelog/19230.txt b/changelog/19230.txt new file mode 100644 index 000000000000..ab2853d45edb --- /dev/null +++ b/changelog/19230.txt @@ -0,0 +1,4 @@ +```release-note:feature +**User Lockout**: Adds support to configure the user-lockout behaviour for failed logins to prevent +brute force attacks for userpass, approle and ldap auth methods. +``` \ No newline at end of file diff --git a/changelog/19244.txt b/changelog/19244.txt new file mode 100644 index 000000000000..63a663e9d6e4 --- /dev/null +++ b/changelog/19244.txt @@ -0,0 +1,4 @@ +```release-note:improvement +auth/github: Allow for an optional Github auth token environment variable to make authenticated requests when fetching org id +website/docs: Add docs for `VAULT_AUTH_CONFIG_GITHUB_TOKEN` environment variable when writing Github config +``` diff --git a/changelog/19265.txt b/changelog/19265.txt new file mode 100644 index 000000000000..23d957e2d594 --- /dev/null +++ b/changelog/19265.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli/pki: Decode integer values properly in health-check configuration file +``` diff --git a/changelog/19269.txt b/changelog/19269.txt new file mode 100644 index 000000000000..57ff2072a18c --- /dev/null +++ b/changelog/19269.txt @@ -0,0 +1,3 @@ +```release-note:improvement +cli/pki: Change the pki health-check --list default config output to JSON so it's a usable configuration file +``` diff --git a/changelog/19274.txt b/changelog/19274.txt new file mode 100644 index 000000000000..a7f5d8c29293 --- /dev/null +++ b/changelog/19274.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli/pki: Fix path for role health-check warning messages +``` diff --git a/changelog/19276.txt b/changelog/19276.txt new file mode 100644 index 000000000000..373199478f92 --- /dev/null +++ b/changelog/19276.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli/pki: Properly report permission issues within health-check mount tune checks +``` diff --git a/changelog/19290.txt b/changelog/19290.txt new file mode 100644 index 000000000000..1a4511590c69 --- /dev/null +++ b/changelog/19290.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Remove `default` and add `default-service` and `default-batch` to UI token_type for auth mount and tuning. +``` diff --git a/changelog/19311.txt b/changelog/19311.txt new file mode 100644 index 000000000000..5ad6e2c01a81 --- /dev/null +++ b/changelog/19311.txt @@ -0,0 +1,3 @@ +```release-note:bug +server/config: Use file.Stat when checking file permissions when VAULT_ENABLE_FILE_PERMISSIONS_CHECK is enabled +``` diff --git a/changelog/19334.txt b/changelog/19334.txt new file mode 100644 index 000000000000..7df68268aabe --- /dev/null +++ b/changelog/19334.txt @@ -0,0 +1,3 @@ +```release-note:deprecation +secrets/ad: Marks the Active Directory (AD) secrets engine as deprecated. +``` \ No newline at end of file diff --git a/changelog/19373.txt b/changelog/19373.txt new file mode 100644 index 000000000000..87751805e7d8 --- /dev/null +++ b/changelog/19373.txt @@ -0,0 +1,3 @@ +```release-note:bug +cli/transit: Fix import, import-version command invocation +``` diff --git a/changelog/19428.txt b/changelog/19428.txt new file mode 100644 index 000000000000..c1ae6d54bbcb --- /dev/null +++ b/changelog/19428.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fixes crypto.randomUUID error in unsecure contexts from third party ember-data library +``` \ No newline at end of file diff --git a/changelog/19429.txt b/changelog/19429.txt new file mode 100644 index 000000000000..341fbf5a7347 --- /dev/null +++ b/changelog/19429.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: pass encodeBase64 param to HMAC transit-key-actions. +``` diff --git a/changelog/19448.txt b/changelog/19448.txt new file mode 100644 index 000000000000..8c75b79f140c --- /dev/null +++ b/changelog/19448.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes SSH engine config deletion +``` diff --git a/changelog/19460.txt b/changelog/19460.txt new file mode 100644 index 000000000000..6334c7fdc5d2 --- /dev/null +++ b/changelog/19460.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: use URLSearchParams interface to capture namespace param from SSOs (ex. ADFS) with decoded state param in callback url +``` diff --git a/changelog/19483.txt b/changelog/19483.txt new file mode 100644 index 000000000000..c7ba6f66d97d --- /dev/null +++ b/changelog/19483.txt @@ -0,0 +1,3 @@ +```release-note:bug +agent: Fix panic when SIGHUP is issued to Agent while it has a non-TLS listener. +``` diff --git a/changelog/19541.txt b/changelog/19541.txt new file mode 100644 index 000000000000..9bdecc35832d --- /dev/null +++ b/changelog/19541.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes oidc tabs in auth form submitting with the root's default_role value after a namespace has been inputted +``` diff --git a/changelog/19545.txt b/changelog/19545.txt new file mode 100644 index 000000000000..615742cd3265 --- /dev/null +++ b/changelog/19545.txt @@ -0,0 +1,3 @@ +```release-note:improvement +database/elasticsearch: Update error messages resulting from Elasticsearch API errors +``` \ No newline at end of file diff --git a/changelog/19585.txt b/changelog/19585.txt new file mode 100644 index 000000000000..f68c0dc6f603 --- /dev/null +++ b/changelog/19585.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fixed issue with remounting mounts that have a non-trailing space in the 'to' or 'from' paths. +``` diff --git a/changelog/19591.txt b/changelog/19591.txt new file mode 100644 index 000000000000..f15d3979ad12 --- /dev/null +++ b/changelog/19591.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core: validate name identifiers in mssql physical storage backend prior use +``` diff --git a/changelog/19593.txt b/changelog/19593.txt new file mode 100644 index 000000000000..8f170578ec07 --- /dev/null +++ b/changelog/19593.txt @@ -0,0 +1,4 @@ +```release-note:improvement +events: Suppress log warnings triggered when events are sent but the events system is not enabled. +``` + diff --git a/changelog/19600.txt b/changelog/19600.txt new file mode 100644 index 000000000000..f2c1f71fa027 --- /dev/null +++ b/changelog/19600.txt @@ -0,0 +1,3 @@ +```release-note:bug +openapi: Fix logic for labeling unauthenticated/sudo paths. +``` diff --git a/changelog/19624.txt b/changelog/19624.txt new file mode 100644 index 000000000000..7bc2df63ea85 --- /dev/null +++ b/changelog/19624.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Fix PKI revocation request forwarding from standby nodes due to an error wrapping bug +``` diff --git a/changelog/19640.txt b/changelog/19640.txt new file mode 100644 index 000000000000..8dcf59bf87fb --- /dev/null +++ b/changelog/19640.txt @@ -0,0 +1,3 @@ +```release-note:bug + secrets/ldap: Invalidates WAL entry for static role if `password_policy` has changed. +``` diff --git a/changelog/19676.txt b/changelog/19676.txt new file mode 100644 index 000000000000..090dc801b2df --- /dev/null +++ b/changelog/19676.txt @@ -0,0 +1,4 @@ +```release-note:improvement +core: Allow overriding gRPC connect timeout via VAULT_GRPC_MIN_CONNECT_TIMEOUT. This is an env var rather than a config setting because we don't expect this to ever be needed. It's being added as a last-ditch +option in case all else fails for some replication issues we may not have fully reproduced. +``` diff --git a/changelog/19703.txt b/changelog/19703.txt new file mode 100644 index 000000000000..6bf8e5c18989 --- /dev/null +++ b/changelog/19703.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes issue navigating back a level using the breadcrumb from secret metadata view +``` \ No newline at end of file diff --git a/changelog/19721.txt b/changelog/19721.txt new file mode 100644 index 000000000000..9818a0facfe2 --- /dev/null +++ b/changelog/19721.txt @@ -0,0 +1,3 @@ +```release-note:bug +core: Fix regression breaking non-raft clusters whose nodes share the same cluster_addr/api_addr. +``` \ No newline at end of file diff --git a/changelog/19799.txt b/changelog/19799.txt new file mode 100644 index 000000000000..aee76ca689aa --- /dev/null +++ b/changelog/19799.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix bad link to namespace when namespace name includes `.` +``` \ No newline at end of file diff --git a/changelog/19875.txt b/changelog/19875.txt new file mode 100644 index 000000000000..1167e39b3ee7 --- /dev/null +++ b/changelog/19875.txt @@ -0,0 +1,3 @@ +```release-note:bug +helper/random: Fix race condition in string generator helper +``` diff --git a/changelog/20019.txt b/changelog/20019.txt new file mode 100644 index 000000000000..0483d1763fae --- /dev/null +++ b/changelog/20019.txt @@ -0,0 +1,3 @@ +```release-note:improvement +core/activity: add an endpoint to write test activity log data, guarded by a build flag +``` \ No newline at end of file diff --git a/changelog/20034.txt b/changelog/20034.txt new file mode 100644 index 000000000000..c1050795bdc4 --- /dev/null +++ b/changelog/20034.txt @@ -0,0 +1,3 @@ +```release-note: bug +secrets/aws: Revert changes that removed the lease on STS credentials, while leaving the new ttl field in place. +``` diff --git a/changelog/20044.txt b/changelog/20044.txt new file mode 100644 index 000000000000..014e61b46743 --- /dev/null +++ b/changelog/20044.txt @@ -0,0 +1,4 @@ +```release-note:improvement +core: Add a `raft` sub-field to the `storage` and `ha_storage` details provided by the +`/sys/config/state/sanitized` endpoint in order to include the `max_entry_size`. +``` diff --git a/changelog/20057.txt b/changelog/20057.txt new file mode 100644 index 000000000000..585a07d91b3a --- /dev/null +++ b/changelog/20057.txt @@ -0,0 +1,3 @@ +```release-note: bug +secrets/pki: Ensure cross-cluster delta WAL write failure only logs to avoid unattended forwarding. +``` diff --git a/changelog/20058.txt b/changelog/20058.txt new file mode 100644 index 000000000000..e43a1f4adf93 --- /dev/null +++ b/changelog/20058.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Fix building of unified delta CRLs and recovery during unified delta WAL write failures. +``` diff --git a/changelog/20064.txt b/changelog/20064.txt new file mode 100644 index 000000000000..c539119f713d --- /dev/null +++ b/changelog/20064.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes browser console formatting for help command output +``` \ No newline at end of file diff --git a/changelog/20070.txt b/changelog/20070.txt new file mode 100644 index 000000000000..34e6e5540d69 --- /dev/null +++ b/changelog/20070.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: fixes remaining doc links to include /vault in path +``` \ No newline at end of file diff --git a/changelog/20109.txt b/changelog/20109.txt new file mode 100644 index 000000000000..8c7cb3b32de1 --- /dev/null +++ b/changelog/20109.txt @@ -0,0 +1,3 @@ +```release-note:improvement +sys/wrapping: Add example how to unwrap without authentication in Vault +``` diff --git a/changelog/20144.txt b/changelog/20144.txt new file mode 100644 index 000000000000..ef8b9a01810c --- /dev/null +++ b/changelog/20144.txt @@ -0,0 +1,4 @@ +```release-note:improvement +sdk/ldaputil: added `connection_timeout` to tune connection timeout duration +for all LDAP plugins. +``` diff --git a/changelog/20154.txt b/changelog/20154.txt new file mode 100644 index 000000000000..7bda3624fba1 --- /dev/null +++ b/changelog/20154.txt @@ -0,0 +1,2 @@ +```release-note:bug +auth/cert: Include OCSP parameters in read CA certificate role response. diff --git a/changelog/20181.txt b/changelog/20181.txt new file mode 100644 index 000000000000..121c869e4aaf --- /dev/null +++ b/changelog/20181.txt @@ -0,0 +1,4 @@ +```release-note:bug +sdk/helper/ocsp: Workaround bug in Go's ocsp.ParseResponse(...), causing validation to fail with embedded CA certificates. +auth/cert: Fix OCSP validation against Vault's PKI engine. +``` diff --git a/changelog/20201.txt b/changelog/20201.txt new file mode 100644 index 000000000000..d50c9bcb9da8 --- /dev/null +++ b/changelog/20201.txt @@ -0,0 +1,3 @@ +```release-note:improvement +secrets/pki: Decrease size and improve compatibility of OCSP responses by removing issuer certificate. +``` diff --git a/changelog/20216.txt b/changelog/20216.txt new file mode 100644 index 000000000000..59ee78c889e3 --- /dev/null +++ b/changelog/20216.txt @@ -0,0 +1,3 @@ +```release-note:bug +website/docs: Fix Kubernetes Auth Code Example to use the correct whitespace in import. +``` diff --git a/changelog/20220.txt b/changelog/20220.txt new file mode 100644 index 000000000000..1cf72aa81ceb --- /dev/null +++ b/changelog/20220.txt @@ -0,0 +1,3 @@ +```release-note:bug +pki: Fix automatically turning off CRL signing on upgrade to Vault >= 1.12, if CA Key Usage disallows it +``` diff --git a/changelog/20235.txt b/changelog/20235.txt new file mode 100644 index 000000000000..d1b9f8a6e923 --- /dev/null +++ b/changelog/20235.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: remove use of htmlSafe except when first sanitized +``` diff --git a/changelog/20243.txt b/changelog/20243.txt new file mode 100644 index 000000000000..8d5b04420b97 --- /dev/null +++ b/changelog/20243.txt @@ -0,0 +1,4 @@ +```release-note:improvement +cli/namespace: Add detailed flag to output additional namespace information +such as namespace IDs and custom metadata. +``` diff --git a/changelog/20257.txt b/changelog/20257.txt new file mode 100644 index 000000000000..c2dba4579126 --- /dev/null +++ b/changelog/20257.txt @@ -0,0 +1,3 @@ +```release-note:bug +command/server: Fix incorrect paths in generated config for `-dev-tls` flag on Windows +``` diff --git a/changelog/20263.txt b/changelog/20263.txt new file mode 100644 index 000000000000..8556fe8865b3 --- /dev/null +++ b/changelog/20263.txt @@ -0,0 +1,3 @@ +```release-note:bug +ui: Fix OIDC provider logo showing when domain doesn't match +``` diff --git a/changelog/20294.txt b/changelog/20294.txt new file mode 100644 index 000000000000..92f7c291892b --- /dev/null +++ b/changelog/20294.txt @@ -0,0 +1,3 @@ +```release-note:improvement +Add debug symbols back to builds to fix Dynatrace support +``` diff --git a/changelog/20341.txt b/changelog/20341.txt new file mode 100644 index 000000000000..652e5735ea7b --- /dev/null +++ b/changelog/20341.txt @@ -0,0 +1,3 @@ +```release-note:bug +secrets/pki: Fix patching of leaf_not_after_behavior on issuers. +``` diff --git a/changelog/_go-ver-1130.txt b/changelog/_go-ver-1130.txt index c63e249c4588..d1b6a1489383 100644 --- a/changelog/_go-ver-1130.txt +++ b/changelog/_go-ver-1130.txt @@ -1,3 +1,3 @@ ```release-note:change -core: Bump Go version to 1.20. +core: Bump Go version to 1.20.1. ``` diff --git a/changelog/_go-ver-1132.txt b/changelog/_go-ver-1132.txt new file mode 100644 index 000000000000..48c63ff472ba --- /dev/null +++ b/changelog/_go-ver-1132.txt @@ -0,0 +1,3 @@ +```release-note:change +core: Bump Go version to 1.20.3. +``` diff --git a/command/agent.go b/command/agent.go index bf153c8529a6..6bc896de5109 100644 --- a/command/agent.go +++ b/command/agent.go @@ -1253,7 +1253,7 @@ func (c *AgentCommand) newLogger() (log.InterceptLogger, error) { } logCfg := &logging.LogConfig{ - Name: "vault-agent", + Name: "agent", LogLevel: logLevel, LogFormat: logFormat, LogFilePath: c.config.LogFile, @@ -1359,9 +1359,12 @@ func (c *AgentCommand) reloadCerts() error { defer c.tlsReloadFuncsLock.RUnlock() for _, reloadFunc := range c.tlsReloadFuncs { - err := reloadFunc() - if err != nil { - errors = multierror.Append(errors, err) + // Non-TLS listeners will have a nil reload func. + if reloadFunc != nil { + err := reloadFunc() + if err != nil { + errors = multierror.Append(errors, err) + } } } diff --git a/command/agent_test.go b/command/agent_test.go index 9cf4b4704852..0dbfaa0b2398 100644 --- a/command/agent_test.go +++ b/command/agent_test.go @@ -2428,6 +2428,75 @@ func TestAgent_Config_ReloadTls(t *testing.T) { wg.Wait() } +// TestAgent_NonTLSListener_SIGHUP tests giving a SIGHUP signal to a listener +// without a TLS configuration. Prior to fixing GitHub issue #19480, this +// would cause a panic. +func TestAgent_NonTLSListener_SIGHUP(t *testing.T) { + logger := logging.NewVaultLogger(hclog.Trace) + cluster := vault.NewTestCluster(t, nil, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + cluster.Start() + defer cluster.Cleanup() + + serverClient := cluster.Cores[0].Client + + // Unset the environment variable so that agent picks up the right test + // cluster address + defer os.Setenv(api.EnvVaultAddress, os.Getenv(api.EnvVaultAddress)) + os.Unsetenv(api.EnvVaultAddress) + + listenAddr := generateListenerAddress(t) + listenConfig := fmt.Sprintf(` +listener "tcp" { + address = "%s" + tls_disable = true +} +`, listenAddr) + + config := fmt.Sprintf(` +vault { + address = "%s" + tls_skip_verify = true +} +%s +`, serverClient.Address(), listenConfig) + configPath := makeTempFile(t, "config.hcl", config) + defer os.Remove(configPath) + + // Start the agent + ui, cmd := testAgentCommand(t, logger) + + cmd.startedCh = make(chan struct{}) + + wg := &sync.WaitGroup{} + wg.Add(1) + go func() { + if code := cmd.Run([]string{"-config", configPath}); code != 0 { + output := ui.ErrorWriter.String() + ui.OutputWriter.String() + t.Errorf("got a non-zero exit status: %s", output) + } + wg.Done() + }() + + select { + case <-cmd.startedCh: + case <-time.After(5 * time.Second): + t.Errorf("timeout") + } + + // Reload + cmd.SighupCh <- struct{}{} + select { + case <-cmd.reloadedCh: + case <-time.After(5 * time.Second): + t.Fatalf("timeout") + } + + close(cmd.ShutdownCh) + wg.Wait() +} + // Get a randomly assigned port and then free it again before returning it. // There is still a race when trying to use it, but should work better // than a static port. diff --git a/command/commands.go b/command/commands.go index 2938ee1bb902..21da8141bc54 100644 --- a/command/commands.go +++ b/command/commands.go @@ -555,6 +555,11 @@ func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) map[string]cli.Co BaseCommand: getBaseCommand(), }, nil }, + "pki reissue": func() (cli.Command, error) { + return &PKIReIssueCACommand{ + BaseCommand: getBaseCommand(), + }, nil + }, "pki verify-sign": func() (cli.Command, error) { return &PKIVerifySignCommand{ BaseCommand: getBaseCommand(), @@ -699,6 +704,11 @@ func initCommands(ui, serverCmdUi cli.Ui, runOpts *RunOptions) map[string]cli.Co BaseCommand: getBaseCommand(), }, nil }, + "transit": func() (cli.Command, error) { + return &TransitCommand{ + BaseCommand: getBaseCommand(), + }, nil + }, "transit import": func() (cli.Command, error) { return &TransitImportCommand{ BaseCommand: getBaseCommand(), diff --git a/command/debug_test.go b/command/debug_test.go index 046474af8660..de51c770f5ac 100644 --- a/command/debug_test.go +++ b/command/debug_test.go @@ -534,6 +534,10 @@ func TestDebugCommand_NoConnection(t *testing.T) { t.Fatal(err) } + if err := client.SetAddress(""); err != nil { + t.Fatal(err) + } + _, cmd := testDebugCommand(t) cmd.client = client cmd.skipTimingChecks = true diff --git a/command/healthcheck/healthcheck.go b/command/healthcheck/healthcheck.go index 1949de04e57b..2ce9c2deea5d 100644 --- a/command/healthcheck/healthcheck.go +++ b/command/healthcheck/healthcheck.go @@ -119,6 +119,10 @@ func (e *Executor) Execute() (map[string][]*Result, error) { return nil, fmt.Errorf("failed to evaluate %v: %w", checker.Name(), err) } + if results == nil { + results = []*Result{} + } + for _, result := range results { result.Endpoint = e.templatePath(result.Endpoint) result.StatusDisplay = ResultStatusNameMap[result.Status] diff --git a/command/healthcheck/pki.go b/command/healthcheck/pki.go index edec1523c4b6..406163b355cb 100644 --- a/command/healthcheck/pki.go +++ b/command/healthcheck/pki.go @@ -47,7 +47,7 @@ func parsePEM(contents string) ([]byte, error) { return pemBlock.Bytes, nil } -func parsePEMCert(contents string) (*x509.Certificate, error) { +func ParsePEMCert(contents string) (*x509.Certificate, error) { parsed, err := parsePEM(contents) if err != nil { return nil, err @@ -89,7 +89,7 @@ func pkiFetchIssuer(e *Executor, issuer string, versionError func()) (bool, *Pat } if len(issuerRet.ParsedCache) == 0 { - cert, err := parsePEMCert(issuerRet.Secret.Data["certificate"].(string)) + cert, err := ParsePEMCert(issuerRet.Secret.Data["certificate"].(string)) if err != nil { return true, issuerRet, nil, fmt.Errorf("unable to parse issuer %v's certificate: %w", issuer, err) } @@ -114,7 +114,7 @@ func pkiFetchIssuerEntry(e *Executor, issuer string, versionError func()) (bool, } if len(issuerRet.ParsedCache) == 0 { - cert, err := parsePEMCert(issuerRet.Secret.Data["certificate"].(string)) + cert, err := ParsePEMCert(issuerRet.Secret.Data["certificate"].(string)) if err != nil { return true, issuerRet, nil, fmt.Errorf("unable to parse issuer %v's certificate: %w", issuer, err) } @@ -222,7 +222,7 @@ func pkiFetchLeaf(e *Executor, serial string, versionError func()) (bool, *PathF } if len(leafRet.ParsedCache) == 0 { - cert, err := parsePEMCert(leafRet.Secret.Data["certificate"].(string)) + cert, err := ParsePEMCert(leafRet.Secret.Data["certificate"].(string)) if err != nil { return true, leafRet, nil, fmt.Errorf("unable to parse leaf %v's certificate: %w", serial, err) } diff --git a/command/healthcheck/pki_allow_if_modified_since.go b/command/healthcheck/pki_allow_if_modified_since.go index 59f96611b118..1cff1cda5d58 100644 --- a/command/healthcheck/pki_allow_if_modified_since.go +++ b/command/healthcheck/pki_allow_if_modified_since.go @@ -12,6 +12,7 @@ type AllowIfModifiedSince struct { UnsupportedVersion bool TuneData map[string]interface{} + Fetcher *PathFetch } func NewAllowIfModifiedSinceCheck() Check { @@ -42,15 +43,16 @@ func (h *AllowIfModifiedSince) LoadConfig(config map[string]interface{}) error { } func (h *AllowIfModifiedSince) FetchResources(e *Executor) error { - exit, _, data, err := fetchMountTune(e, func() { + var exit bool + var err error + + exit, h.Fetcher, h.TuneData, err = fetchMountTune(e, func() { h.UnsupportedVersion = true }) - if exit { + + if exit || err != nil { return err } - - h.TuneData = data - return nil } @@ -59,22 +61,39 @@ func (h *AllowIfModifiedSince) Evaluate(e *Executor) (results []*Result, err err ret := Result{ Status: ResultInvalidVersion, Endpoint: "/sys/mounts/{{mount}}/tune", - Message: "This health check requires Vault 1.9+ but an earlier version of Vault Server was contacted, preventing this health check from running.", + Message: "This health check requires Vault 1.12+ but an earlier version of Vault Server was contacted, preventing this health check from running.", } return []*Result{&ret}, nil } - req, err := stringList(h.TuneData["passthrough_request_headers"]) + if h.Fetcher.IsSecretPermissionsError() { + ret := Result{ + Status: ResultInsufficientPermissions, + Endpoint: "/sys/mounts/{{mount}}/tune", + Message: "Without this information, this health check is unable to function.", + } + + if e.Client.Token() == "" { + ret.Message = "No token available so unable read the tune endpoint for this mount. " + ret.Message + } else { + ret.Message = "This token lacks permission to read the tune endpoint for this mount. " + ret.Message + } + + results = append(results, &ret) + return + } + + req, err := StringList(h.TuneData["passthrough_request_headers"]) if err != nil { return nil, fmt.Errorf("unable to parse value from server for passthrough_request_headers: %w", err) } - resp, err := stringList(h.TuneData["allowed_response_headers"]) + resp, err := StringList(h.TuneData["allowed_response_headers"]) if err != nil { return nil, fmt.Errorf("unable to parse value from server for allowed_response_headers: %w", err) } - var foundIMS bool = false + foundIMS := false for _, param := range req { if strings.EqualFold(param, "If-Modified-Since") { foundIMS = true @@ -82,7 +101,7 @@ func (h *AllowIfModifiedSince) Evaluate(e *Executor) (results []*Result, err err } } - var foundLM bool = false + foundLM := false for _, param := range resp { if strings.EqualFold(param, "Last-Modified") { foundLM = true diff --git a/command/healthcheck/pki_audit_visibility.go b/command/healthcheck/pki_audit_visibility.go index 24f9be4f6a1f..1984fb97d7ad 100644 --- a/command/healthcheck/pki_audit_visibility.go +++ b/command/healthcheck/pki_audit_visibility.go @@ -58,6 +58,7 @@ type AuditVisibility struct { IgnoredParameters map[string]bool TuneData map[string]interface{} + Fetcher *PathFetch } func NewAuditVisibilityCheck() Check { @@ -83,7 +84,7 @@ func (h *AuditVisibility) DefaultConfig() map[string]interface{} { func (h *AuditVisibility) LoadConfig(config map[string]interface{}) error { var err error - coerced, err := stringList(config["ignored_parameters"]) + coerced, err := StringList(config["ignored_parameters"]) if err != nil { return fmt.Errorf("error parsing %v.ignored_parameters: %v", h.Name(), err) } @@ -100,35 +101,52 @@ func (h *AuditVisibility) LoadConfig(config map[string]interface{}) error { } func (h *AuditVisibility) FetchResources(e *Executor) error { - exit, _, data, err := fetchMountTune(e, func() { + var exit bool + var err error + + exit, h.Fetcher, h.TuneData, err = fetchMountTune(e, func() { h.UnsupportedVersion = true }) - if exit { + + if exit || err != nil { return err } - - h.TuneData = data - return nil } func (h *AuditVisibility) Evaluate(e *Executor) (results []*Result, err error) { if h.UnsupportedVersion { - // Shouldn't happen; /certs has been around forever. ret := Result{ Status: ResultInvalidVersion, - Endpoint: "/{{mount}}/certs", - Message: "This health check requires Vault 1.11+ but an earlier version of Vault Server was contacted, preventing this health check from running.", + Endpoint: "/sys/mounts/{{mount}}/tune", + Message: "This health check requires Vault 1.9+ but an earlier version of Vault Server was contacted, preventing this health check from running.", } return []*Result{&ret}, nil } + if h.Fetcher.IsSecretPermissionsError() { + ret := Result{ + Status: ResultInsufficientPermissions, + Endpoint: "/sys/mounts/{{mount}}/tune", + Message: "Without this information, this health check is unable to function.", + } + + if e.Client.Token() == "" { + ret.Message = "No token available so unable read the tune endpoint for this mount. " + ret.Message + } else { + ret.Message = "This token lacks permission to read the tune endpoint for this mount. " + ret.Message + } + + results = append(results, &ret) + return + } + sourceMap := map[string][]string{ "audit_non_hmac_request_keys": VisibleReqParams, "audit_non_hmac_response_keys": VisibleRespParams, } for source, visibleList := range sourceMap { - actual, err := stringList(h.TuneData[source]) + actual, err := StringList(h.TuneData[source]) if err != nil { return nil, fmt.Errorf("error parsing %v from server: %v", source, err) } @@ -158,7 +176,7 @@ func (h *AuditVisibility) Evaluate(e *Executor) (results []*Result, err error) { "audit_non_hmac_response_keys": HiddenRespParams, } for source, hiddenList := range sourceMap { - actual, err := stringList(h.TuneData[source]) + actual, err := StringList(h.TuneData[source]) if err != nil { return nil, fmt.Errorf("error parsing %v from server: %v", source, err) } diff --git a/command/healthcheck/pki_hardware_backed_root.go b/command/healthcheck/pki_hardware_backed_root.go index 199a781fe9bb..89d3550eaf9b 100644 --- a/command/healthcheck/pki_hardware_backed_root.go +++ b/command/healthcheck/pki_hardware_backed_root.go @@ -13,12 +13,14 @@ type HardwareBackedRoot struct { UnsupportedVersion bool + FetchIssues map[string]*PathFetch IssuerKeyMap map[string]string KeyIsManaged map[string]string } func NewHardwareBackedRootCheck() Check { return &HardwareBackedRoot{ + FetchIssues: make(map[string]*PathFetch), IssuerKeyMap: make(map[string]string), KeyIsManaged: make(map[string]string), } @@ -64,6 +66,7 @@ func (h *HardwareBackedRoot) FetchResources(e *Executor) error { if err != nil { return err } + h.FetchIssues[issuer] = ret continue } @@ -83,13 +86,15 @@ func (h *HardwareBackedRoot) FetchResources(e *Executor) error { } h.IssuerKeyMap[issuer] = keyId - skip, _, keyEntry, err := pkiFetchKeyEntry(e, keyId, func() { + skip, ret, keyEntry, err := pkiFetchKeyEntry(e, keyId, func() { h.UnsupportedVersion = true }) if skip || err != nil || keyEntry == nil { if err != nil { return err } + + h.FetchIssues[issuer] = ret continue } @@ -112,6 +117,25 @@ func (h *HardwareBackedRoot) Evaluate(e *Executor) (results []*Result, err error return []*Result{&ret}, nil } + for issuer, fetchPath := range h.FetchIssues { + if fetchPath != nil && fetchPath.IsSecretPermissionsError() { + delete(h.IssuerKeyMap, issuer) + ret := Result{ + Status: ResultInsufficientPermissions, + Endpoint: fetchPath.Path, + Message: "Without this information, this health check is unable to function.", + } + + if e.Client.Token() == "" { + ret.Message = "No token available so unable for the endpoint for this mount. " + ret.Message + } else { + ret.Message = "This token lacks permission for the endpoint for this mount. " + ret.Message + } + + results = append(results, &ret) + } + } + for name, keyId := range h.IssuerKeyMap { var ret Result ret.Status = ResultInformational diff --git a/command/healthcheck/pki_role_allows_glob_wildcards.go b/command/healthcheck/pki_role_allows_glob_wildcards.go index c78fad8653c3..34fb09927ce5 100644 --- a/command/healthcheck/pki_role_allows_glob_wildcards.go +++ b/command/healthcheck/pki_role_allows_glob_wildcards.go @@ -10,14 +10,16 @@ import ( type RoleAllowsGlobWildcards struct { Enabled bool UnsupportedVersion bool - NoPerms bool - RoleEntryMap map[string]map[string]interface{} + RoleListFetchIssue *PathFetch + RoleFetchIssues map[string]*PathFetch + RoleEntryMap map[string]map[string]interface{} } func NewRoleAllowsGlobWildcardsCheck() Check { return &RoleAllowsGlobWildcards{ - RoleEntryMap: make(map[string]map[string]interface{}), + RoleFetchIssues: make(map[string]*PathFetch), + RoleEntryMap: make(map[string]map[string]interface{}), } } @@ -49,7 +51,7 @@ func (h *RoleAllowsGlobWildcards) FetchResources(e *Executor) error { }) if exit || err != nil { if f != nil && f.IsSecretPermissionsError() { - h.NoPerms = true + h.RoleListFetchIssue = f } return err } @@ -60,7 +62,7 @@ func (h *RoleAllowsGlobWildcards) FetchResources(e *Executor) error { }) if skip || err != nil || entry == nil { if f != nil && f.IsSecretPermissionsError() { - h.NoPerms = true + h.RoleFetchIssues[role] = f } if err != nil { return err @@ -84,18 +86,37 @@ func (h *RoleAllowsGlobWildcards) Evaluate(e *Executor) (results []*Result, err } return []*Result{&ret}, nil } - if h.NoPerms { + if h.RoleListFetchIssue != nil && h.RoleListFetchIssue.IsSecretPermissionsError() { ret := Result{ Status: ResultInsufficientPermissions, - Endpoint: "/{{mount}}/roles", - Message: "lacks permission either to list the roles or to read a specific role. This may restrict the ability to fully execute this health check.", + Endpoint: h.RoleListFetchIssue.Path, + Message: "lacks permission either to list the roles. This restricts the ability to fully execute this health check.", } if e.Client.Token() == "" { ret.Message = "No token available and so this health check " + ret.Message } else { ret.Message = "This token " + ret.Message } - results = append(results, &ret) + return []*Result{&ret}, nil + } + + for role, fetchPath := range h.RoleFetchIssues { + if fetchPath != nil && fetchPath.IsSecretPermissionsError() { + delete(h.RoleEntryMap, role) + ret := Result{ + Status: ResultInsufficientPermissions, + Endpoint: fetchPath.Path, + Message: "Without this information, this health check is unable to function.", + } + + if e.Client.Token() == "" { + ret.Message = "No token available so unable for the endpoint for this mount. " + ret.Message + } else { + ret.Message = "This token lacks permission the endpoint for this mount. " + ret.Message + } + + results = append(results, &ret) + } } for role, entry := range h.RoleEntryMap { @@ -141,7 +162,7 @@ func (h *RoleAllowsGlobWildcards) Evaluate(e *Executor) (results []*Result, err ret := Result{ Status: ResultWarning, - Endpoint: "/{{mount}}/role/" + role, + Endpoint: "/{{mount}}/roles/" + role, Message: fmt.Sprintf("Role currently allows wildcard issuance while allowing globs in allowed_domains (%v). Because globs can expand to one or more wildcard character, including wildcards under additional subdomains, these options are dangerous to enable together. If glob domains are required to be enabled, it is suggested to either disable wildcard issuance if not desired, or create two separate roles -- one with wildcard issuance for specified domains and one with glob matching enabled for concrete domain identifiers.", allowedDomains), } diff --git a/command/healthcheck/pki_role_allows_localhost.go b/command/healthcheck/pki_role_allows_localhost.go index 570ffdf90651..568aa3a5f857 100644 --- a/command/healthcheck/pki_role_allows_localhost.go +++ b/command/healthcheck/pki_role_allows_localhost.go @@ -9,14 +9,16 @@ import ( type RoleAllowsLocalhost struct { Enabled bool UnsupportedVersion bool - NoPerms bool - RoleEntryMap map[string]map[string]interface{} + RoleListFetchIssue *PathFetch + RoleFetchIssues map[string]*PathFetch + RoleEntryMap map[string]map[string]interface{} } func NewRoleAllowsLocalhostCheck() Check { return &RoleAllowsLocalhost{ - RoleEntryMap: make(map[string]map[string]interface{}), + RoleFetchIssues: make(map[string]*PathFetch), + RoleEntryMap: make(map[string]map[string]interface{}), } } @@ -48,7 +50,7 @@ func (h *RoleAllowsLocalhost) FetchResources(e *Executor) error { }) if exit || err != nil { if f != nil && f.IsSecretPermissionsError() { - h.NoPerms = true + h.RoleListFetchIssue = f } return err } @@ -59,7 +61,7 @@ func (h *RoleAllowsLocalhost) FetchResources(e *Executor) error { }) if skip || err != nil || entry == nil { if f != nil && f.IsSecretPermissionsError() { - h.NoPerms = true + h.RoleFetchIssues[role] = f } if err != nil { return err @@ -83,18 +85,38 @@ func (h *RoleAllowsLocalhost) Evaluate(e *Executor) (results []*Result, err erro } return []*Result{&ret}, nil } - if h.NoPerms { + + if h.RoleListFetchIssue != nil && h.RoleListFetchIssue.IsSecretPermissionsError() { ret := Result{ Status: ResultInsufficientPermissions, - Endpoint: "/{{mount}}/roles", - Message: "lacks permission either to list the roles or to read a specific role. This may restrict the ability to fully execute this health check", + Endpoint: h.RoleListFetchIssue.Path, + Message: "lacks permission either to list the roles. This restricts the ability to fully execute this health check.", } if e.Client.Token() == "" { ret.Message = "No token available and so this health check " + ret.Message } else { ret.Message = "This token " + ret.Message } - results = append(results, &ret) + return []*Result{&ret}, nil + } + + for role, fetchPath := range h.RoleFetchIssues { + if fetchPath != nil && fetchPath.IsSecretPermissionsError() { + delete(h.RoleEntryMap, role) + ret := Result{ + Status: ResultInsufficientPermissions, + Endpoint: fetchPath.Path, + Message: "Without this information, this health check is unable to function.", + } + + if e.Client.Token() == "" { + ret.Message = "No token available so unable for the endpoint for this mount. " + ret.Message + } else { + ret.Message = "This token lacks permission the endpoint for this mount. " + ret.Message + } + + results = append(results, &ret) + } } for role, entry := range h.RoleEntryMap { @@ -115,7 +137,7 @@ func (h *RoleAllowsLocalhost) Evaluate(e *Executor) (results []*Result, err erro ret := Result{ Status: ResultWarning, - Endpoint: "/{{mount}}/role/" + role, + Endpoint: "/{{mount}}/roles/" + role, Message: fmt.Sprintf("Role currently allows localhost issuance with a non-empty allowed_domains (%v): this role is intended for issuing other hostnames and the allow_localhost=true option may be overlooked by operators. If this role is intended to issue certificates valid for localhost, consider setting allow_localhost=false and explicitly adding localhost to the list of allowed domains.", allowedDomains), } diff --git a/command/healthcheck/pki_role_no_store_false.go b/command/healthcheck/pki_role_no_store_false.go index 6e13e222d2cf..4fa7ba5ac68d 100644 --- a/command/healthcheck/pki_role_no_store_false.go +++ b/command/healthcheck/pki_role_no_store_false.go @@ -11,19 +11,20 @@ import ( type RoleNoStoreFalse struct { Enabled bool UnsupportedVersion bool - NoPerms bool AllowedRoles map[string]bool - CertCounts int - RoleEntryMap map[string]map[string]interface{} - CRLConfig *PathFetch + RoleListFetchIssue *PathFetch + RoleFetchIssues map[string]*PathFetch + RoleEntryMap map[string]map[string]interface{} + CRLConfig *PathFetch } func NewRoleNoStoreFalseCheck() Check { return &RoleNoStoreFalse{ - AllowedRoles: make(map[string]bool), - RoleEntryMap: make(map[string]map[string]interface{}), + RoleFetchIssues: make(map[string]*PathFetch), + AllowedRoles: make(map[string]bool), + RoleEntryMap: make(map[string]map[string]interface{}), } } @@ -64,7 +65,7 @@ func (h *RoleNoStoreFalse) FetchResources(e *Executor) error { }) if exit || err != nil { if f != nil && f.IsSecretPermissionsError() { - h.NoPerms = true + h.RoleListFetchIssue = f } return err } @@ -75,7 +76,7 @@ func (h *RoleNoStoreFalse) FetchResources(e *Executor) error { }) if skip || err != nil || entry == nil { if f != nil && f.IsSecretPermissionsError() { - h.NoPerms = true + h.RoleFetchIssues[role] = f } if err != nil { return err @@ -86,14 +87,6 @@ func (h *RoleNoStoreFalse) FetchResources(e *Executor) error { h.RoleEntryMap[role] = entry } - exit, _, leaves, err := pkiFetchLeavesList(e, func() { - h.UnsupportedVersion = true - }) - if exit || err != nil { - return err - } - h.CertCounts = len(leaves) - // Check if the issuer is fetched yet. configRet, err := e.FetchIfNotFetched(logical.ReadOperation, "/{{mount}}/config/crl") if err != nil { @@ -116,18 +109,37 @@ func (h *RoleNoStoreFalse) Evaluate(e *Executor) (results []*Result, err error) return []*Result{&ret}, nil } - if h.NoPerms { + if h.RoleListFetchIssue != nil && h.RoleListFetchIssue.IsSecretPermissionsError() { ret := Result{ Status: ResultInsufficientPermissions, - Endpoint: "/{{mount}}/roles", - Message: "lacks permission either to list the roles or to read a specific role. This may restrict the ability to fully execute this health check", + Endpoint: h.RoleListFetchIssue.Path, + Message: "lacks permission either to list the roles. This restricts the ability to fully execute this health check.", } if e.Client.Token() == "" { ret.Message = "No token available and so this health check " + ret.Message } else { ret.Message = "This token " + ret.Message } - results = append(results, &ret) + return []*Result{&ret}, nil + } + + for role, fetchPath := range h.RoleFetchIssues { + if fetchPath != nil && fetchPath.IsSecretPermissionsError() { + delete(h.RoleEntryMap, role) + ret := Result{ + Status: ResultInsufficientPermissions, + Endpoint: fetchPath.Path, + Message: "Without this information, this health check is unable to function.", + } + + if e.Client.Token() == "" { + ret.Message = "No token available so unable for the endpoint for this mount. " + ret.Message + } else { + ret.Message = "This token lacks permission the endpoint for this mount. " + ret.Message + } + + results = append(results, &ret) + } } crlAutoRebuild := false @@ -159,7 +171,7 @@ func (h *RoleNoStoreFalse) Evaluate(e *Executor) (results []*Result, err error) ret := Result{ Status: ResultWarning, - Endpoint: "/{{mount}}/role/" + role, + Endpoint: "/{{mount}}/roles/" + role, Message: "Role currently stores every issued certificate (no_store=false). Too many issued and/or revoked certificates can exceed Vault's storage limits and make operations slow. It is encouraged to enable auto-rebuild of CRLs to prevent every revocation from creating a new CRL, and to limit the number of certificates issued under roles with no_store=false: use shorter lifetimes and/or BYOC revocation instead.", } diff --git a/command/healthcheck/pki_root_issued_leaves.go b/command/healthcheck/pki_root_issued_leaves.go index 3252a91fb041..e858794b621c 100644 --- a/command/healthcheck/pki_root_issued_leaves.go +++ b/command/healthcheck/pki_root_issued_leaves.go @@ -14,12 +14,14 @@ type RootIssuedLeaves struct { CertsToFetch int + FetchIssues map[string]*PathFetch RootCertMap map[string]*x509.Certificate LeafCertMap map[string]*x509.Certificate } func NewRootIssuedLeavesCheck() Check { return &RootIssuedLeaves{ + FetchIssues: make(map[string]*PathFetch), RootCertMap: make(map[string]*x509.Certificate), LeafCertMap: make(map[string]*x509.Certificate), } @@ -64,9 +66,10 @@ func (h *RootIssuedLeaves) FetchResources(e *Executor) error { } for _, issuer := range issuers { - skip, _, cert, err := pkiFetchIssuer(e, issuer, func() { + skip, pathFetch, cert, err := pkiFetchIssuer(e, issuer, func() { h.UnsupportedVersion = true }) + h.FetchIssues[issuer] = pathFetch if skip || err != nil { if err != nil { return err @@ -85,10 +88,15 @@ func (h *RootIssuedLeaves) FetchResources(e *Executor) error { h.RootCertMap[issuer] = cert } - exit, _, leaves, err := pkiFetchLeavesList(e, func() { + exit, f, leaves, err := pkiFetchLeavesList(e, func() { h.UnsupportedVersion = true }) if exit || err != nil { + if f != nil && f.IsSecretPermissionsError() { + for _, issuer := range issuers { + h.FetchIssues[issuer] = f + } + } return err } @@ -130,6 +138,25 @@ func (h *RootIssuedLeaves) Evaluate(e *Executor) (results []*Result, err error) return []*Result{&ret}, nil } + for issuer, fetchPath := range h.FetchIssues { + if fetchPath != nil && fetchPath.IsSecretPermissionsError() { + delete(h.RootCertMap, issuer) + ret := Result{ + Status: ResultInsufficientPermissions, + Endpoint: fetchPath.Path, + Message: "Without this information, this health check is unable to function.", + } + + if e.Client.Token() == "" { + ret.Message = "No token available so unable for the endpoint for this mount. " + ret.Message + } else { + ret.Message = "This token lacks permission for the endpoint for this mount. " + ret.Message + } + + results = append(results, &ret) + } + } + issuerHasLeaf := make(map[string]bool) for serial, leaf := range h.LeafCertMap { if len(issuerHasLeaf) == len(h.RootCertMap) { diff --git a/command/healthcheck/pki_tidy_last_run.go b/command/healthcheck/pki_tidy_last_run.go index e079212333d9..6fed74d33964 100644 --- a/command/healthcheck/pki_tidy_last_run.go +++ b/command/healthcheck/pki_tidy_last_run.go @@ -93,7 +93,7 @@ func (h *TidyLastRun) Evaluate(e *Executor) (results []*Result, err error) { ret := Result{ Status: ResultInsufficientPermissions, Endpoint: "/{{mount}}/tidy-status", - Message: "Without this information, this health check is unable tof unction.", + Message: "Without this information, this health check is unable to function.", } if e.Client.Token() == "" { diff --git a/command/healthcheck/pki_too_many_certs.go b/command/healthcheck/pki_too_many_certs.go index 8bd61003bceb..6b07b5dfe38c 100644 --- a/command/healthcheck/pki_too_many_certs.go +++ b/command/healthcheck/pki_too_many_certs.go @@ -14,6 +14,7 @@ type TooManyCerts struct { CountWarning int CertCounts int + FetchIssue *PathFetch } func NewTooManyCertsCheck() Check { @@ -60,7 +61,9 @@ func (h *TooManyCerts) FetchResources(e *Executor) error { exit, leavesRet, _, err := pkiFetchLeavesList(e, func() { h.UnsupportedVersion = true }) - if exit { + h.FetchIssue = leavesRet + + if exit || err != nil { return err } @@ -80,6 +83,23 @@ func (h *TooManyCerts) Evaluate(e *Executor) (results []*Result, err error) { return []*Result{&ret}, nil } + if h.FetchIssue != nil && h.FetchIssue.IsSecretPermissionsError() { + ret := Result{ + Status: ResultInsufficientPermissions, + Endpoint: h.FetchIssue.Path, + Message: "Without this information, this health check is unable to function.", + } + + if e.Client.Token() == "" { + ret.Message = "No token available so unable to list the endpoint for this mount. " + ret.Message + } else { + ret.Message = "This token lacks permission to list the endpoint for this mount. " + ret.Message + } + + results = append(results, &ret) + return + } + ret := Result{ Status: ResultOK, Endpoint: "/{{mount}}/certs", diff --git a/command/healthcheck/shared.go b/command/healthcheck/shared.go index e9d6a5a9964e..9f2b05051766 100644 --- a/command/healthcheck/shared.go +++ b/command/healthcheck/shared.go @@ -6,7 +6,7 @@ import ( "github.com/hashicorp/vault/sdk/logical" ) -func stringList(source interface{}) ([]string, error) { +func StringList(source interface{}) ([]string, error) { if source == nil { return nil, nil } @@ -35,7 +35,7 @@ func stringList(source interface{}) ([]string, error) { func fetchMountTune(e *Executor, versionError func()) (bool, *PathFetch, map[string]interface{}, error) { tuneRet, err := e.FetchIfNotFetched(logical.ReadOperation, "/sys/mounts/{{mount}}/tune") if err != nil { - return true, nil, nil, err + return true, nil, nil, fmt.Errorf("failed to fetch mount tune information: %w", err) } if !tuneRet.IsSecretOK() { @@ -43,7 +43,7 @@ func fetchMountTune(e *Executor, versionError func()) (bool, *PathFetch, map[str versionError() } - return true, nil, nil, nil + return true, tuneRet, nil, nil } var data map[string]interface{} = nil diff --git a/command/namespace_list.go b/command/namespace_list.go index 605c4e32e28f..2be2a3874df5 100644 --- a/command/namespace_list.go +++ b/command/namespace_list.go @@ -37,7 +37,18 @@ Usage: vault namespace list [options] } func (c *NamespaceListCommand) Flags() *FlagSets { - return c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + + f := set.NewFlagSet("Command Options") + + f.BoolVar(&BoolVar{ + Name: "detailed", + Target: &c.flagDetailed, + Default: false, + Usage: "Print detailed information such as namespace ID.", + }) + + return set } func (c *NamespaceListCommand) AutocompleteArgs() complete.Predictor { @@ -101,5 +112,9 @@ func (c *NamespaceListCommand) Run(args []string) int { return 2 } + if c.flagDetailed && Format(c.UI) != "table" { + return OutputData(c.UI, secret.Data["key_info"]) + } + return OutputList(c.UI, secret) } diff --git a/command/pki.go b/command/pki.go index 4212ee6f86ab..8ae5eae4a64e 100644 --- a/command/pki.go +++ b/command/pki.go @@ -13,7 +13,7 @@ type PKICommand struct { } func (c *PKICommand) Synopsis() string { - return "Interact with Vault's Key-Value storage" + return "Interact with Vault's PKI Secrets Engine" } func (c *PKICommand) Help() string { diff --git a/command/pki_health_check.go b/command/pki_health_check.go index c47adb2148b5..8c56a2c1f0ed 100644 --- a/command/pki_health_check.go +++ b/command/pki_health_check.go @@ -135,7 +135,7 @@ default unless enabled by the configuration file explicitly.`, Default: false, EnvVar: "", Usage: `When specified, no health checks are run, but all known health -checks are printed. Still requires a positional mount argument.`, +checks are printed.`, }) return set @@ -170,10 +170,10 @@ func (c *PKIHealthCheckCommand) Run(args []string) int { } args = f.Args() - if len(args) < 1 { + if !c.flagList && len(args) < 1 { c.UI.Error("Not enough arguments (expected mount path, got nothing)") return pkiRetUsage - } else if len(args) > 1 { + } else if !c.flagList && len(args) > 1 { c.UI.Error(fmt.Sprintf("Too many arguments (expected only mount path, got %d arguments)", len(args))) for _, arg := range args { if strings.HasPrefix(arg, "-") { @@ -196,7 +196,14 @@ func (c *PKIHealthCheckCommand) Run(args []string) int { return pkiRetUsage } - mount := sanitizePath(args[0]) + // When listing is enabled, we lack an argument here, but do not contact + // the server at all, so we're safe to use a hard-coded default here. + pkiPath := "" + if len(args) == 1 { + pkiPath = args[0] + } + + mount := sanitizePath(pkiPath) executor := healthcheck.NewExecutor(client, mount) executor.AddCheck(healthcheck.NewCAValidityPeriodCheck()) executor.AddCheck(healthcheck.NewCRLValidityPeriodCheck()) @@ -216,33 +223,43 @@ func (c *PKIHealthCheckCommand) Run(args []string) int { // Handle listing, if necessary. if c.flagList { - c.UI.Output("Health Checks:") + uiFormat := Format(c.UI) + if uiFormat == "yaml" { + c.UI.Error("YAML output format is not supported by the --list command") + return pkiRetUsage + } + + if uiFormat != "json" { + c.UI.Output("Default health check config:") + } + config := map[string]map[string]interface{}{} for _, checker := range executor.Checkers { - c.UI.Output(" - " + checker.Name()) - - prefix := " " - cfg := checker.DefaultConfig() - marshaled, err := json.MarshalIndent(cfg, prefix, " ") - if err != nil { - c.UI.Error(fmt.Sprintf("Failed to marshal default config for check: %v", err)) - return pkiRetUsage - } - c.UI.Output(prefix + string(marshaled)) + config[checker.Name()] = checker.DefaultConfig() + } + + marshaled, err := json.MarshalIndent(config, "", " ") + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to marshal default config for check: %v", err)) + return pkiRetUsage } + c.UI.Output(string(marshaled)) return pkiRetOK } // Handle config merging. external_config := map[string]interface{}{} if c.flagConfig != "" { - contents, err := os.ReadFile(c.flagConfig) + contents, err := os.Open(c.flagConfig) if err != nil { c.UI.Error(fmt.Sprintf("Failed to read configuration file %v: %v", c.flagConfig, err)) return pkiRetUsage } - if err := json.Unmarshal(contents, &external_config); err != nil { + decoder := json.NewDecoder(contents) + decoder.UseNumber() // Use json.Number instead of float64 values as we are decoding to an interface{}. + + if err := decoder.Decode(&external_config); err != nil { c.UI.Error(fmt.Sprintf("Failed to parse configuration file %v: %v", c.flagConfig, err)) return pkiRetUsage } diff --git a/command/pki_health_check_test.go b/command/pki_health_check_test.go index bdd491a0497d..af3cf337a468 100644 --- a/command/pki_health_check_test.go +++ b/command/pki_health_check_test.go @@ -271,6 +271,8 @@ func testPKIHealthCheckCommand(tb testing.TB) (*cli.MockUi, *PKIHealthCheckComma } func execPKIHC(t *testing.T, client *api.Client, ok bool) (int, string, map[string][]map[string]interface{}) { + t.Helper() + stdout := bytes.NewBuffer(nil) stderr := bytes.NewBuffer(nil) runOpts := &RunOptions{ @@ -295,6 +297,8 @@ func execPKIHC(t *testing.T, client *api.Client, ok bool) (int, string, map[stri } func validateExpectedPKIHC(t *testing.T, expected, results map[string][]map[string]interface{}) { + t.Helper() + for test, subtest := range expected { actual, ok := results[test] require.True(t, ok, fmt.Sprintf("expected top-level test %v to be present", test)) @@ -615,7 +619,7 @@ var expectedNoPerm = map[string][]map[string]interface{}{ }, "root_issued_leaves": { { - "status": "ok", + "status": "insufficient_permissions", }, }, "tidy_last_run": { @@ -625,7 +629,7 @@ var expectedNoPerm = map[string][]map[string]interface{}{ }, "too_many_certs": { { - "status": "ok", + "status": "insufficient_permissions", }, }, } diff --git a/command/pki_issue_intermediate.go b/command/pki_issue_intermediate.go index 90bb04fc95be..7fd881aac1bb 100644 --- a/command/pki_issue_intermediate.go +++ b/command/pki_issue_intermediate.go @@ -89,6 +89,14 @@ func (c *PKIIssueCACommand) Run(args []string) int { return 1 } + parentMountIssuer := sanitizePath(args[0]) // /pki/issuer/default + + intermediateMount := sanitizePath(args[1]) + + return pkiIssue(c.BaseCommand, parentMountIssuer, intermediateMount, c.flagNewIssuerName, c.flagKeyStorageSource, data) +} + +func pkiIssue(c *BaseCommand, parentMountIssuer string, intermediateMount string, flagNewIssuerName string, flagKeyStorageSource string, data map[string]interface{}) int { // Check We Have a Client client, err := c.Client() if err != nil { @@ -97,27 +105,26 @@ func (c *PKIIssueCACommand) Run(args []string) int { } // Sanity Check the Parent Issuer - parentMountIssuer := sanitizePath(args[0]) // /pki/issuer/default - _, parentIssuerName := paths.Split(parentMountIssuer) if !strings.Contains(parentMountIssuer, "/issuer/") { c.UI.Error(fmt.Sprintf("Parent Issuer %v is Not a PKI Issuer Path of the format /mount/issuer/issuer-ref", parentMountIssuer)) + return 1 } - _, err = client.Logical().Read(parentMountIssuer + "/json") + _, err = readIssuer(client, parentMountIssuer) if err != nil { c.UI.Error(fmt.Sprintf("Unable to access parent issuer %v: %v", parentMountIssuer, err)) + return 1 } // Set-up Failure State (Immediately Before First Write Call) - intermediateMount := sanitizePath(args[1]) failureState := inCaseOfFailure{ intermediateMount: intermediateMount, parentMount: strings.Split(parentMountIssuer, "/issuer/")[0], parentIssuer: parentMountIssuer, - newName: c.flagNewIssuerName, + newName: flagNewIssuerName, } // Generate Certificate Signing Request - csrResp, err := client.Logical().Write(intermediateMount+"/intermediate/generate/"+c.flagKeyStorageSource, data) + csrResp, err := client.Logical().Write(intermediateMount+"/intermediate/generate/"+flagKeyStorageSource, data) if err != nil { if strings.Contains(err.Error(), "no handler for route") { // Mount Given Does Not Exist c.UI.Error(fmt.Sprintf("Given Intermediate Mount %v Does Not Exist: %v", intermediateMount, err)) @@ -129,21 +136,21 @@ func (c *PKIIssueCACommand) Run(args []string) int { return 1 } // Parse CSR Response, Also Verifies that this is a PKI Mount - // (eg. calling the above call on cubbyhole/ won't return an error response) + // (e.g. calling the above call on cubbyhole/ won't return an error response) csrPemRaw, present := csrResp.Data["csr"] if !present { c.UI.Error(fmt.Sprintf("Failed to Generate Intermediate CSR on %v, got response: %v", intermediateMount, csrResp)) return 1 } keyIdRaw, present := csrResp.Data["key_id"] - if !present && c.flagKeyStorageSource == "internal" { + if !present && flagKeyStorageSource == "internal" { c.UI.Error(fmt.Sprintf("Failed to Generate Key on %v, got response: %v", intermediateMount, csrResp)) return 1 } // If that all Parses, then we've successfully generated a CSR! Save It (and the Key-ID) failureState.csrGenerated = true - if c.flagKeyStorageSource == "internal" { + if flagKeyStorageSource == "internal" { failureState.createdKeyId = keyIdRaw.(string) } csr := csrPemRaw.(string) @@ -171,7 +178,7 @@ func (c *PKIIssueCACommand) Run(args []string) int { // Next Import Certificate certificate := rootResp.Data["certificate"].(string) - issuerId, err := importIssuerWithName(client, intermediateMount, certificate, c.flagNewIssuerName) + issuerId, err := importIssuerWithName(client, intermediateMount, certificate, flagNewIssuerName) failureState.certIssuerId = issuerId if err != nil { if strings.Contains(err.Error(), "error naming issuer") { @@ -189,6 +196,7 @@ func (c *PKIIssueCACommand) Run(args []string) int { // Then Import Issuing Certificate issuingCa := rootResp.Data["issuing_ca"].(string) + _, parentIssuerName := paths.Split(parentMountIssuer) _, err = importIssuerWithName(client, intermediateMount, issuingCa, parentIssuerName) if err != nil { if strings.Contains(err.Error(), "error naming issuer") { @@ -215,16 +223,17 @@ func (c *PKIIssueCACommand) Run(args []string) int { failureState.caChainImported = true // Finally we read our newly issued certificate in order to tell our caller about it - c.readAndOutputNewCertificate(client, intermediateMount, issuerId) + readAndOutputNewCertificate(client, intermediateMount, issuerId, c) return 0 } -func (c *PKIIssueCACommand) readAndOutputNewCertificate(client *api.Client, intermediateMount string, issuerId string) { +func readAndOutputNewCertificate(client *api.Client, intermediateMount string, issuerId string, c *BaseCommand) { resp, err := client.Logical().Read(sanitizePath(intermediateMount + "/issuer/" + issuerId)) if err != nil || resp == nil { c.UI.Error(fmt.Sprintf("Error Reading Fully Imported Certificate from %v : %v", intermediateMount+"/issuer/"+issuerId, err)) + return } OutputSecret(c.UI, resp) diff --git a/command/pki_list_intermediate_command.go b/command/pki_list_intermediate.go similarity index 97% rename from command/pki_list_intermediate_command.go rename to command/pki_list_intermediate.go index a54c5ef77952..2de5a8e8ab76 100644 --- a/command/pki_list_intermediate_command.go +++ b/command/pki_list_intermediate.go @@ -186,10 +186,16 @@ func (c *PKIListIntermediateCommand) Run(args []string) int { "signature_match": c.flagSignatureMatch, } + issuerResp, err := readIssuer(client, issuer) + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to read parent issuer on path %s: %s", issuer, err.Error())) + return 1 + } + for _, child := range issued { path := sanitizePath(child) if path != "" { - err, verifyResults := verifySignBetween(client, issuer, path) + verifyResults, err := verifySignBetween(client, issuerResp, path) if err != nil { c.UI.Error(fmt.Sprintf("Failed to run verification on path %v: %v", path, err)) return 1 diff --git a/command/pki_reissue_intermediate.go b/command/pki_reissue_intermediate.go new file mode 100644 index 000000000000..4c6659cf3770 --- /dev/null +++ b/command/pki_reissue_intermediate.go @@ -0,0 +1,290 @@ +package command + +import ( + "crypto/ecdsa" + "crypto/elliptic" + "crypto/rsa" + "crypto/x509" + "encoding/hex" + "fmt" + "io" + "net" + "net/url" + "os" + "strings" + + "github.com/posener/complete" +) + +type PKIReIssueCACommand struct { + *BaseCommand + + flagConfig string + flagReturnIndicator string + flagDefaultDisabled bool + flagList bool + + flagKeyStorageSource string + flagNewIssuerName string +} + +func (c *PKIReIssueCACommand) Synopsis() string { + return "Uses a parent certificate and a template certificate to create a new issuer on a child mount" +} + +func (c *PKIReIssueCACommand) Help() string { + helpText := ` +Usage: vault pki reissue PARENT TEMPLATE CHILD_MOUNT options +` + return strings.TrimSpace(helpText) +} + +func (c *PKIReIssueCACommand) Flags() *FlagSets { + set := c.flagSet(FlagSetHTTP | FlagSetOutputFormat) + f := set.NewFlagSet("Command Options") + + f.StringVar(&StringVar{ + Name: "type", + Target: &c.flagKeyStorageSource, + Default: "internal", + EnvVar: "", + Usage: `Options are “existing” - to use an existing key inside vault, “internal” - to generate a new key inside vault, or “kms” - to link to an external key. Exported keys are not available through this API.`, + Completion: complete.PredictSet("internal", "existing", "kms"), + }) + + f.StringVar(&StringVar{ + Name: "issuer_name", + Target: &c.flagNewIssuerName, + Default: "", + EnvVar: "", + Usage: `If present, the newly created issuer will be given this name`, + }) + + return set +} + +func (c *PKIReIssueCACommand) Run(args []string) int { + // Parse Args + f := c.Flags() + if err := f.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + args = f.Args() + + if len(args) < 3 { + c.UI.Error("Not enough arguments: expected parent issuer and child-mount location and some key_value argument") + return 1 + } + + stdin := (io.Reader)(os.Stdin) + userData, err := parseArgsData(stdin, args[3:]) + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to parse K=V data: %s", err)) + return 1 + } + + // Check We Have a Client + client, err := c.Client() + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to obtain client: %v", err)) + return 1 + } + + parentIssuer := sanitizePath(args[0]) // /pki/issuer/default + templateIssuer := sanitizePath(args[1]) + intermediateMount := sanitizePath(args[2]) + + templateIssuerBundle, err := readIssuer(client, templateIssuer) + if err != nil { + c.UI.Error(fmt.Sprintf("Error fetching template certificate %v : %v", templateIssuer, err)) + return 1 + } + certificate := templateIssuerBundle.certificate + + useExistingKey := c.flagKeyStorageSource == "existing" + keyRef := "" + if useExistingKey { + keyRef = templateIssuerBundle.keyId + + if keyRef == "" { + c.UI.Error(fmt.Sprintf("Template issuer %s did not have a key id field set in response which is required", templateIssuer)) + return 1 + } + } + + templateData, err := parseTemplateCertificate(*certificate, useExistingKey, keyRef) + data := updateTemplateWithData(templateData, userData) + + return pkiIssue(c.BaseCommand, parentIssuer, intermediateMount, c.flagNewIssuerName, c.flagKeyStorageSource, data) +} + +func updateTemplateWithData(template map[string]interface{}, changes map[string]interface{}) map[string]interface{} { + data := map[string]interface{}{} + + for key, value := range template { + data[key] = value + } + + // ttl and not_after set the same thing. Delete template ttl if using not_after: + if _, ok := changes["not_after"]; ok { + delete(data, "ttl") + } + + // If we are updating the key_type, do not set key_bits + if _, ok := changes["key_type"]; ok && changes["key_type"] != template["key_type"] { + delete(data, "key_bits") + } + + for key, value := range changes { + data[key] = value + } + + return data +} + +func parseTemplateCertificate(certificate x509.Certificate, useExistingKey bool, keyRef string) (templateData map[string]interface{}, err error) { + // Generate Certificate Signing Parameters + templateData = map[string]interface{}{ + "common_name": certificate.Subject.CommonName, + "alt_names": makeAltNamesCommaSeparatedString(certificate.DNSNames, certificate.EmailAddresses), + "ip_sans": makeIpAddressCommaSeparatedString(certificate.IPAddresses), + "uri_sans": makeUriCommaSeparatedString(certificate.URIs), + // other_sans (string: "") - Specifies custom OID/UTF8-string SANs. These must match values specified on the role in allowed_other_sans (see role creation for allowed_other_sans globbing rules). The format is the same as OpenSSL: ;: where the only current valid type is UTF8. This can be a comma-delimited list or a JSON string slice. + // Punting on Other_SANs, shouldn't really be on CAs + "signature_bits": findSignatureBits(certificate.SignatureAlgorithm), + "exclude_cn_from_sans": determineExcludeCnFromSans(certificate), + "ou": certificate.Subject.OrganizationalUnit, + "organization": certificate.Subject.Organization, + "country": certificate.Subject.Country, + "locality": certificate.Subject.Locality, + "province": certificate.Subject.Province, + "street_address": certificate.Subject.StreetAddress, + "postal_code": certificate.Subject.PostalCode, + "serial_number": certificate.Subject.SerialNumber, + "ttl": (certificate.NotAfter.Sub(certificate.NotBefore)).String(), + "max_path_length": certificate.MaxPathLen, + "permitted_dns_domains": strings.Join(certificate.PermittedDNSDomains, ","), + "use_pss": isPSS(certificate.SignatureAlgorithm), + } + + if useExistingKey { + templateData["skid"] = hex.EncodeToString(certificate.SubjectKeyId) // TODO: Double Check this with someone + if keyRef == "" { + return nil, fmt.Errorf("unable to create certificate template for existing key without a key_id") + } + templateData["key_ref"] = keyRef + } else { + templateData["key_type"] = getKeyType(certificate.PublicKeyAlgorithm.String()) + templateData["key_bits"] = findBitLength(certificate.PublicKey) + } + + return templateData, nil +} + +func isPSS(algorithm x509.SignatureAlgorithm) bool { + switch algorithm { + case x509.SHA384WithRSAPSS, x509.SHA512WithRSAPSS, x509.SHA256WithRSAPSS: + return true + default: + return false + } +} + +func makeAltNamesCommaSeparatedString(names []string, emails []string) string { + return strings.Join(names, ",") + "," + strings.Join(emails, ",") +} + +func makeUriCommaSeparatedString(uris []*url.URL) string { + stringAddresses := make([]string, len(uris)) + for i, uri := range uris { + stringAddresses[i] = uri.String() + } + return strings.Join(stringAddresses, ",") +} + +func makeIpAddressCommaSeparatedString(addresses []net.IP) string { + stringAddresses := make([]string, len(addresses)) + for i, address := range addresses { + stringAddresses[i] = address.String() + } + return strings.Join(stringAddresses, ",") +} + +func determineExcludeCnFromSans(certificate x509.Certificate) bool { + cn := certificate.Subject.CommonName + if cn == "" { + return false + } + + emails := certificate.EmailAddresses + for _, email := range emails { + if email == cn { + return false + } + } + + dnses := certificate.DNSNames + for _, dns := range dnses { + if dns == cn { + return false + } + } + + return true +} + +func findBitLength(publicKey any) int { + if publicKey == nil { + return 0 + } + switch pub := publicKey.(type) { + case *rsa.PublicKey: + return pub.N.BitLen() + case *ecdsa.PublicKey: + switch pub.Curve { + case elliptic.P224(): + return 224 + case elliptic.P256(): + return 256 + case elliptic.P384(): + return 384 + case elliptic.P521(): + return 521 + default: + return 0 + } + default: + return 0 + } +} + +func findSignatureBits(algo x509.SignatureAlgorithm) int { + switch algo { + case x509.MD2WithRSA, x509.MD5WithRSA, x509.SHA1WithRSA, x509.DSAWithSHA1, x509.ECDSAWithSHA1: + return -1 + case x509.SHA256WithRSA, x509.DSAWithSHA256, x509.ECDSAWithSHA256, x509.SHA256WithRSAPSS: + return 256 + case x509.SHA384WithRSA, x509.ECDSAWithSHA384, x509.SHA384WithRSAPSS: + return 384 + case x509.SHA512WithRSA, x509.SHA512WithRSAPSS, x509.ECDSAWithSHA512: + return 512 + case x509.PureEd25519: + return 0 + default: + return -1 + } +} + +func getKeyType(goKeyType string) string { + switch goKeyType { + case "RSA": + return "rsa" + case "ECDSA": + return "ec" + case "Ed25519": + return "ed25519" + default: + return "" + } +} diff --git a/command/pki_reissue_intermediate_test.go b/command/pki_reissue_intermediate_test.go new file mode 100644 index 000000000000..928449344cb6 --- /dev/null +++ b/command/pki_reissue_intermediate_test.go @@ -0,0 +1,195 @@ +package command + +import ( + "bytes" + "testing" + + "github.com/hashicorp/vault/api" +) + +// TestPKIReIssueIntermediate tests that the pki reissue command line tool accurately copies information from the +// template certificate to the newly issued certificate, by issuing and reissuing several certificates and seeing how +// they related to each other. +func TestPKIReIssueIntermediate(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + // Relationship Map to Create + // pki-root | pki-newroot | pki-empty + // RootX1 RootX2 RootX4 RootX3 + // | | + // ---------------------------------------------- + // v v + // IntX1 IntX2 pki-int + // | | + // v v + // IntX3 (-----------------------) IntX3 + // + // Here X1,X2 have the same name (same mount) + // RootX4 uses the same key as RootX1 (but a different common_name/subject) + // RootX3 has the same name, and is on a different mount + // RootX1 has issued IntX1; RootX3 has issued IntX2 + createComplicatedIssuerSetUpWithReIssueIntermediate(t, client) + + runPkiVerifySignTests(t, client) + + runPkiListIntermediateTests(t, client) +} + +func createComplicatedIssuerSetUpWithReIssueIntermediate(t *testing.T, client *api.Client) { + // Relationship Map to Create + // pki-root | pki-newroot | pki-empty + // RootX1 RootX2 RootX4 RootX3 + // | | + // ---------------------------------------------- + // v v + // IntX1 IntX2 pki-int + // | | + // v v + // IntX3 (-----------------------) IntX3 + // + // Here X1,X2 have the same name (same mount) + // RootX4 uses the same key as RootX1 (but a different common_name/subject) + // RootX3 has the same name, and is on a different mount + // RootX1 has issued IntX1; RootX3 has issued IntX2 + + if err := client.Sys().Mount("pki-root", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + MaxLeaseTTL: "36500d", + }, + }); err != nil { + t.Fatalf("pki mount error: %#v", err) + } + + if err := client.Sys().Mount("pki-newroot", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + MaxLeaseTTL: "36500d", + }, + }); err != nil { + t.Fatalf("pki mount error: %#v", err) + } + + if err := client.Sys().Mount("pki-int", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + MaxLeaseTTL: "36500d", + }, + }); err != nil { + t.Fatalf("pki mount error: %#v", err) + } + + // Used to check handling empty list responses: Not Used for Any Issuers / Certificates + if err := client.Sys().Mount("pki-empty", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{}, + }); err != nil { + t.Fatalf("pki mount error: %#v", err) + } + + resp, err := client.Logical().Write("pki-root/root/generate/internal", map[string]interface{}{ + "key_type": "ec", + "common_name": "Root X", + "ttl": "3650d", + "issuer_name": "rootX1", + "key_name": "rootX1", + }) + if err != nil || resp == nil { + t.Fatalf("failed to prime CA: %v", err) + } + + resp, err = client.Logical().Write("pki-root/root/generate/internal", map[string]interface{}{ + "key_type": "ec", + "common_name": "Root X", + "ttl": "3650d", + "issuer_name": "rootX2", + }) + if err != nil || resp == nil { + t.Fatalf("failed to prime CA: %v", err) + } + + if resp, err := client.Logical().Write("pki-newroot/root/generate/internal", map[string]interface{}{ + "key_type": "ec", + "common_name": "Root X", + "ttl": "3650d", + "issuer_name": "rootX3", + }); err != nil || resp == nil { + t.Fatalf("failed to prime CA: %v", err) + } + + if resp, err := client.Logical().Write("pki-root/root/generate/existing", map[string]interface{}{ + "common_name": "Root X4", + "ttl": "3650d", + "issuer_name": "rootX4", + "key_ref": "rootX1", + }); err != nil || resp == nil { + t.Fatalf("failed to prime CA: %v", err) + } + + stdout := bytes.NewBuffer(nil) + stderr := bytes.NewBuffer(nil) + runOpts := &RunOptions{ + Stdout: stdout, + Stderr: stderr, + Client: client, + } + + // Intermediate X1 + intX1CallArgs := []string{ + "pki", "issue", "-format=json", "-issuer_name=intX1", + "pki-root/issuer/rootX1", + "pki-int/", + "key_type=rsa", + "common_name=Int X1", + "ou=thing", + "ttl=3650d", + } + codeOut := RunCustom(intX1CallArgs, runOpts) + if codeOut != 0 { + t.Fatalf("error issuing intermediate X1, code: %d \n stdout: %v \n stderr: %v", codeOut, stdout, stderr) + } + + // Intermediate X2 - using ReIssue + intX2CallArgs := []string{ + "pki", "reissue", "-format=json", "-issuer_name=intX2", + "pki-newroot/issuer/rootX3", + "pki-int/issuer/intX1", + "pki-int/", + "key_type=ec", + "common_name=Int X2", + } + codeOut = RunCustom(intX2CallArgs, runOpts) + if codeOut != 0 { + t.Fatalf("error issuing intermediate X2, code: %d \n stdout: %v \n stderr: %v", codeOut, stdout, stderr) + } + + // Intermediate X3 + intX3OriginalCallArgs := []string{ + "pki", "issue", "-format=json", "-issuer_name=intX3", + "pki-int/issuer/intX1", + "pki-int/", + "key_type=ec", + "use_pss=true", // This is meaningful because rootX1 is an RSA key + "signature_bits=512", + "common_name=Int X3", + "ttl=3650d", + } + codeOut = RunCustom(intX3OriginalCallArgs, runOpts) + if codeOut != 0 { + t.Fatalf("error issuing intermediate X3, code: %d \n stdout: %v \n stderr: %v", codeOut, stdout, stderr) + } + + intX3AdaptedCallArgs := []string{ + "pki", "reissue", "-format=json", "-issuer_name=intX3also", "-type=existing", + "pki-int/issuer/intX2", // This is a EC key + "pki-int/issuer/intX3", // This template includes use_pss = true which can't be accomodated + "pki-int/", + } + codeOut = RunCustom(intX3AdaptedCallArgs, runOpts) + if codeOut != 0 { + t.Fatalf("error issuing intermediate X3also, code: %d \n stdout: %v \n stderr: %v", codeOut, stdout, stderr) + } +} diff --git a/command/pki_verify_sign_command.go b/command/pki_verify_sign.go similarity index 60% rename from command/pki_verify_sign_command.go rename to command/pki_verify_sign.go index fc38583a3f95..cee6ae00d1a0 100644 --- a/command/pki_verify_sign_command.go +++ b/command/pki_verify_sign.go @@ -8,9 +8,10 @@ import ( "strconv" "strings" + "github.com/hashicorp/vault/command/healthcheck" + "github.com/ghodss/yaml" "github.com/hashicorp/vault/api" - "github.com/hashicorp/vault/sdk/helper/certutil" "github.com/ryanuber/columnize" ) @@ -93,7 +94,13 @@ func (c *PKIVerifySignCommand) Run(args []string) int { return 1 } - err, results := verifySignBetween(client, issuer, issued) + issuerResp, err := readIssuer(client, issuer) + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to read issuer: %s: %s", issuer, err.Error())) + return 1 + } + + results, err := verifySignBetween(client, issuerResp, issued) if err != nil { c.UI.Error(fmt.Sprintf("Failed to run verification: %v", err)) return pkiRetUsage @@ -104,60 +111,34 @@ func (c *PKIVerifySignCommand) Run(args []string) int { return 0 } -func verifySignBetween(client *api.Client, issuerPath string, issuedPath string) (error, map[string]bool) { +func verifySignBetween(client *api.Client, issuerResp *issuerResponse, issuedPath string) (map[string]bool, error) { // Note that this eats warnings - // Fetch and Parse the Potential Issuer: - issuerResp, err := client.Logical().Read(issuerPath) - if err != nil { - return fmt.Errorf("error: unable to fetch issuer %v: %w", issuerPath, err), nil - } - issuerCertPem := issuerResp.Data["certificate"].(string) - issuerCertBundle, err := certutil.ParsePEMBundle(issuerCertPem) - if err != nil { - return err, nil - } - issuerKeyId := issuerCertBundle.Certificate.SubjectKeyId + issuerCert := issuerResp.certificate + issuerKeyId := issuerCert.SubjectKeyId // Fetch and Parse the Potential Issued Cert - issuedCertResp, err := client.Logical().Read(issuedPath) - if err != nil { - return fmt.Errorf("error: unable to fetch issuer %v: %w", issuerPath, err), nil - } - if len(issuedPath) <= 2 { - return fmt.Errorf("%v", issuedPath), nil - } - caChainRaw := issuedCertResp.Data["ca_chain"] - if caChainRaw == nil { - return fmt.Errorf("no ca_chain information on %v", issuedPath), nil - } - caChainCast := caChainRaw.([]interface{}) - caChain := make([]string, len(caChainCast)) - for i, cert := range caChainCast { - caChain[i] = cert.(string) - } - issuedCertPem := issuedCertResp.Data["certificate"].(string) - issuedCertBundle, err := certutil.ParsePEMBundle(issuedCertPem) + issuedCertBundle, err := readIssuer(client, issuedPath) if err != nil { - return err, nil + return nil, fmt.Errorf("error: unable to fetch issuer %v: %w", issuedPath, err) } - parentKeyId := issuedCertBundle.Certificate.AuthorityKeyId + parentKeyId := issuedCertBundle.certificate.AuthorityKeyId // Check the Chain-Match rootCertPool := x509.NewCertPool() - rootCertPool.AddCert(issuerCertBundle.Certificate) + rootCertPool.AddCert(issuerCert) checkTrustPathOptions := x509.VerifyOptions{ Roots: rootCertPool, } trust := false - trusts, err := issuedCertBundle.Certificate.Verify(checkTrustPathOptions) + trusts, err := issuedCertBundle.certificate.Verify(checkTrustPathOptions) if err != nil && !strings.Contains(err.Error(), "certificate signed by unknown authority") { - return err, nil + return nil, err } else if err == nil { for _, chain := range trusts { // Output of this Should Only Have One Trust with Chain of Length Two (Child followed by Parent) for _, cert := range chain { - if issuedCertBundle.Certificate.Equal(cert) { + if issuedCertBundle.certificate.Equal(cert) { trust = true break } @@ -166,29 +147,113 @@ func verifySignBetween(client *api.Client, issuerPath string, issuedPath string) } pathMatch := false - for _, cert := range caChain { - if strings.TrimSpace(cert) == strings.TrimSpace(issuerCertPem) { // TODO: Decode into ASN1 to Check + for _, cert := range issuedCertBundle.caChain { + if bytes.Equal(cert.Raw, issuerCert.Raw) { pathMatch = true break } } signatureMatch := false - err = issuedCertBundle.Certificate.CheckSignatureFrom(issuerCertBundle.Certificate) + err = issuedCertBundle.certificate.CheckSignatureFrom(issuerCert) if err == nil { signatureMatch = true } result := map[string]bool{ // This comparison isn't strictly correct, despite a standard ordering these are sets - "subject_match": bytes.Equal(issuerCertBundle.Certificate.RawSubject, issuedCertBundle.Certificate.RawIssuer), + "subject_match": bytes.Equal(issuerCert.RawSubject, issuedCertBundle.certificate.RawIssuer), "path_match": pathMatch, "trust_match": trust, // TODO: Refactor into a reasonable function "key_id_match": bytes.Equal(parentKeyId, issuerKeyId), "signature_match": signatureMatch, } - return nil, result + return result, nil +} + +type issuerResponse struct { + keyId string + certificate *x509.Certificate + caChain []*x509.Certificate +} + +func readIssuer(client *api.Client, issuerPath string) (*issuerResponse, error) { + issuerResp, err := client.Logical().Read(issuerPath) + if err != nil { + return nil, err + } + issuerCertPem, err := requireStrRespField(issuerResp, "certificate") + if err != nil { + return nil, err + } + issuerCert, err := healthcheck.ParsePEMCert(issuerCertPem) + if err != nil { + return nil, fmt.Errorf("unable to parse issuer %v's certificate: %w", issuerPath, err) + } + + caChainPem, err := requireStrListRespField(issuerResp, "ca_chain") + if err != nil { + return nil, fmt.Errorf("unable to parse issuer %v's CA chain: %w", issuerPath, err) + } + + var caChain []*x509.Certificate + for _, pem := range caChainPem { + trimmedPem := strings.TrimSpace(pem) + if trimmedPem == "" { + continue + } + cert, err := healthcheck.ParsePEMCert(trimmedPem) + if err != nil { + return nil, err + } + caChain = append(caChain, cert) + } + + keyId := optStrRespField(issuerResp, "key_id") + + return &issuerResponse{ + keyId: keyId, + certificate: issuerCert, + caChain: caChain, + }, nil +} + +func optStrRespField(resp *api.Secret, reqField string) string { + if resp == nil || resp.Data == nil { + return "" + } + if val, present := resp.Data[reqField]; !present { + return "" + } else if strVal, castOk := val.(string); !castOk || strVal == "" { + return "" + } else { + return strVal + } +} + +func requireStrRespField(resp *api.Secret, reqField string) (string, error) { + if resp == nil || resp.Data == nil { + return "", fmt.Errorf("nil response received, %s field unavailable", reqField) + } + if val, present := resp.Data[reqField]; !present { + return "", fmt.Errorf("response did not contain field: %s", reqField) + } else if strVal, castOk := val.(string); !castOk || strVal == "" { + return "", fmt.Errorf("field %s value was blank or not a string: %v", reqField, val) + } else { + return strVal, nil + } +} + +func requireStrListRespField(resp *api.Secret, reqField string) ([]string, error) { + if resp == nil || resp.Data == nil { + return nil, fmt.Errorf("nil response received, %s field unavailable", reqField) + } + if val, present := resp.Data[reqField]; !present { + return nil, fmt.Errorf("response did not contain field: %s", reqField) + } else { + return healthcheck.StringList(val) + } } func (c *PKIVerifySignCommand) outputResults(results map[string]bool, potentialParent, potentialChild string) error { diff --git a/command/server.go b/command/server.go index dd5526eb7299..a15f1b7122db 100644 --- a/command/server.go +++ b/command/server.go @@ -1721,7 +1721,6 @@ func (c *ServerCommand) configureLogging(config *server.Config) (hclog.Intercept } logCfg := &loghelper.LogConfig{ - Name: "vault", LogLevel: logLevel, LogFormat: logFormat, LogFilePath: config.LogFile, diff --git a/command/server/config.go b/command/server/config.go index d2d30b40ed3e..a9637f22477f 100644 --- a/command/server/config.go +++ b/command/server/config.go @@ -5,7 +5,6 @@ import ( "errors" "fmt" "io" - "io/ioutil" "math" "os" "path/filepath" @@ -194,7 +193,10 @@ func DevTLSConfig(storageType, certDir string) (*Config, error) { if err := os.WriteFile(fmt.Sprintf("%s/%s", certDir, VaultDevKeyFilename), []byte(key), 0o400); err != nil { return nil, err } + return parseDevTLSConfig(storageType, certDir) +} +func parseDevTLSConfig(storageType, certDir string) (*Config, error) { hclStr := ` disable_mlock = true @@ -217,8 +219,8 @@ storage "%s" { ui = true ` - - hclStr = fmt.Sprintf(hclStr, certDir, certDir, storageType) + certDirEscaped := strings.Replace(certDir, "\\", "\\\\", -1) + hclStr = fmt.Sprintf(hclStr, certDirEscaped, certDirEscaped, storageType) parsed, err := ParseConfig(hclStr, "") if err != nil { return nil, err @@ -465,9 +467,14 @@ func LoadConfig(path string) (*Config, error) { return nil, errors.New("Error parsing the environment variable VAULT_ENABLE_FILE_PERMISSIONS_CHECK") } } + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() if enableFilePermissionsCheck { - err = osutil.OwnerPermissionsMatch(path, 0, 0) + err = osutil.OwnerPermissionsMatchFile(f, 0, 0) if err != nil { return nil, err } @@ -496,8 +503,14 @@ func CheckConfig(c *Config, e error) (*Config, error) { // LoadConfigFile loads the configuration from the given file. func LoadConfigFile(path string) (*Config, error) { + // Open the file + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() // Read the file - d, err := ioutil.ReadFile(path) + d, err := io.ReadAll(f) if err != nil { return nil, err } @@ -518,7 +531,7 @@ func LoadConfigFile(path string) (*Config, error) { if enableFilePermissionsCheck { // check permissions of the config file - err = osutil.OwnerPermissionsMatch(path, 0, 0) + err = osutil.OwnerPermissionsMatchFile(f, 0, 0) if err != nil { return nil, err } @@ -1120,23 +1133,39 @@ func (c *Config) Sanitized() map[string]interface{} { // Sanitize storage stanza if c.Storage != nil { + storageType := c.Storage.Type sanitizedStorage := map[string]interface{}{ - "type": c.Storage.Type, + "type": storageType, "redirect_addr": c.Storage.RedirectAddr, "cluster_addr": c.Storage.ClusterAddr, "disable_clustering": c.Storage.DisableClustering, } + + if storageType == "raft" { + sanitizedStorage["raft"] = map[string]interface{}{ + "max_entry_size": c.Storage.Config["max_entry_size"], + } + } + result["storage"] = sanitizedStorage } // Sanitize HA storage stanza if c.HAStorage != nil { + haStorageType := c.HAStorage.Type sanitizedHAStorage := map[string]interface{}{ - "type": c.HAStorage.Type, + "type": haStorageType, "redirect_addr": c.HAStorage.RedirectAddr, "cluster_addr": c.HAStorage.ClusterAddr, "disable_clustering": c.HAStorage.DisableClustering, } + + if haStorageType == "raft" { + sanitizedHAStorage["raft"] = map[string]interface{}{ + "max_entry_size": c.HAStorage.Config["max_entry_size"], + } + } + result["ha_storage"] = sanitizedHAStorage } diff --git a/command/server/config_test.go b/command/server/config_test.go index 5b3aeb54b21c..ed40f2667640 100644 --- a/command/server/config_test.go +++ b/command/server/config_test.go @@ -5,6 +5,8 @@ import ( "reflect" "strings" "testing" + + "github.com/stretchr/testify/require" ) func TestLoadConfigFile(t *testing.T) { @@ -183,3 +185,29 @@ func TestMerge(t *testing.T) { }) } } + +// Test_parseDevTLSConfig verifies that both Windows and Unix directories are correctly escaped when creating a dev TLS +// configuration in HCL +func Test_parseDevTLSConfig(t *testing.T) { + tests := []struct { + name string + certDirectory string + }{ + { + name: "windows path", + certDirectory: `C:\Users\ADMINI~1\AppData\Local\Temp\2\vault-tls4169358130`, + }, + { + name: "unix path", + certDirectory: "/tmp/vault-tls4169358130", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + cfg, err := parseDevTLSConfig("file", tt.certDirectory) + require.NoError(t, err) + require.Equal(t, fmt.Sprintf("%s/%s", tt.certDirectory, VaultDevCertFilename), cfg.Listeners[0].TLSCertFile) + require.Equal(t, fmt.Sprintf("%s/%s", tt.certDirectory, VaultDevKeyFilename), cfg.Listeners[0].TLSKeyFile) + }) + } +} diff --git a/command/server/hcp_link_config_test.go b/command/server/hcp_link_config_test.go index f71c96d76cc5..51f5a5ec8800 100644 --- a/command/server/hcp_link_config_test.go +++ b/command/server/hcp_link_config_test.go @@ -9,6 +9,10 @@ import ( ) func TestHCPLinkConfig(t *testing.T) { + t.Setenv("HCP_CLIENT_ID", "") + t.Setenv("HCP_CLIENT_SECRET", "") + t.Setenv("HCP_RESOURCE_ID", "") + config, err := LoadConfigFile("./test-fixtures/hcp_link_config.hcl") if err != nil { t.Fatalf("err: %s", err) diff --git a/command/server_test.go b/command/server_test.go index 4ffdd17a62fc..e114cc105a54 100644 --- a/command/server_test.go +++ b/command/server_test.go @@ -21,6 +21,7 @@ import ( "github.com/hashicorp/vault/sdk/physical" physInmem "github.com/hashicorp/vault/sdk/physical/inmem" "github.com/mitchellh/cli" + "github.com/stretchr/testify/require" ) func init() { @@ -314,3 +315,13 @@ func TestServer(t *testing.T) { }) } } + +// TestServer_DevTLS verifies that a vault server starts up correctly with the -dev-tls flag +func TestServer_DevTLS(t *testing.T) { + ui, cmd := testServerCommand(t) + args := []string{"-dev-tls", "-dev-listen-address=127.0.0.1:0", "-test-server-config"} + retCode := cmd.Run(args) + output := ui.ErrorWriter.String() + ui.OutputWriter.String() + require.Equal(t, 0, retCode, output) + require.Contains(t, output, `tls: "enabled"`) +} diff --git a/command/transit.go b/command/transit.go new file mode 100644 index 000000000000..9b4b3050161f --- /dev/null +++ b/command/transit.go @@ -0,0 +1,39 @@ +package command + +import ( + "strings" + + "github.com/mitchellh/cli" +) + +var _ cli.Command = (*TransitCommand)(nil) + +type TransitCommand struct { + *BaseCommand +} + +func (c *TransitCommand) Synopsis() string { + return "Interact with Vault's Transit Secrets Engine" +} + +func (c *TransitCommand) Help() string { + helpText := ` +Usage: vault transit [options] [args] + + This command has subcommands for interacting with Vault's Transit Secrets + Engine. Here are some simple examples, and more detailed examples are + available in the subcommands or the documentation. + + To import a key into the specified Transit or Transform mount: + + $ vault transit import transit/keys/newly-imported @path/to/key type=rsa-2048 + + Please see the individual subcommand help for detailed usage information. +` + + return strings.TrimSpace(helpText) +} + +func (c *TransitCommand) Run(args []string) int { + return cli.RunResultHelp +} diff --git a/command/transit_import_key.go b/command/transit_import_key.go index 04b42c0b7097..56e72f835f2b 100644 --- a/command/transit_import_key.go +++ b/command/transit_import_key.go @@ -8,6 +8,7 @@ import ( "encoding/base64" "encoding/pem" "fmt" + "os" "regexp" "strings" @@ -38,17 +39,21 @@ func (c *TransitImportCommand) Help() string { Usage: vault transit import PATH KEY [options...] Using the Transit or Transform key wrapping system, imports key material from - the base64 encoded KEY, into a new key whose API path is PATH. To import a new version - into an existing key, use import_version. The remaining options after KEY (key=value style) are passed - on to the transit/transform create key endpoint. If your system or device natively supports - the RSA AES key wrap mechanism, you should use it directly rather than this command. + the base64 encoded KEY (either directly on the CLI or via @path notation), + into a new key whose API path is PATH. To import a new version into an + existing key, use import_version. The remaining options after KEY (key=value + style) are passed on to the Transit or Transform create key endpoint. If your + system or device natively supports the RSA AES key wrap mechanism (such as + the PKCS#11 mechanism CKM_RSA_AES_KEY_WRAP), you should use it directly + rather than this command. + ` + c.Flags().Help() return strings.TrimSpace(helpText) } func (c *TransitImportCommand) Flags() *FlagSets { - return c.flagSet(FlagSetHTTP | FlagSetOutputField | FlagSetOutputFormat) + return c.flagSet(FlagSetHTTP) } func (c *TransitImportCommand) AutocompleteArgs() complete.Predictor { @@ -60,13 +65,20 @@ func (c *TransitImportCommand) AutocompleteFlags() complete.Flags { } func (c *TransitImportCommand) Run(args []string) int { - return importKey(c.BaseCommand, "import", args) + return importKey(c.BaseCommand, "import", c.Flags(), args) } // error codes: 1: user error, 2: internal computation error, 3: remote api call error -func importKey(c *BaseCommand, operation string, args []string) int { - if len(args) != 2 { - c.UI.Error(fmt.Sprintf("Incorrect argument count (expected 2, got %d)", len(args))) +func importKey(c *BaseCommand, operation string, flags *FlagSets, args []string) int { + // Parse and validate the arguments. + if err := flags.Parse(args); err != nil { + c.UI.Error(err.Error()) + return 1 + } + + args = flags.Args() + if len(args) < 2 { + c.UI.Error(fmt.Sprintf("Incorrect argument count (expected 2+, got %d). Wanted PATH to import into and KEY material.", len(args))) return 1 } @@ -89,7 +101,18 @@ func importKey(c *BaseCommand, operation string, args []string) int { path := parts[1] keyName := parts[2] - key, err := base64.StdEncoding.DecodeString(args[1]) + keyMaterial := args[1] + if keyMaterial[0] == '@' { + keyMaterialBytes, err := os.ReadFile(keyMaterial[1:]) + if err != nil { + c.UI.Error(fmt.Sprintf("error reading key material file: %v", err)) + return 1 + } + + keyMaterial = string(keyMaterialBytes) + } + + key, err := base64.StdEncoding.DecodeString(keyMaterial) if err != nil { c.UI.Error(fmt.Sprintf("error base64 decoding source key material: %v", err)) return 1 @@ -126,15 +149,19 @@ func importKey(c *BaseCommand, operation string, args []string) int { } combinedCiphertext := append(wrappedAESKey, wrappedTargetKey...) importCiphertext := base64.StdEncoding.EncodeToString(combinedCiphertext) + // Parse all the key options - data := map[string]interface{}{ - "ciphertext": importCiphertext, + data, err := parseArgsData(os.Stdin, args[2:]) + if err != nil { + c.UI.Error(fmt.Sprintf("Failed to parse extra K=V data: %s", err)) + return 1 } - for _, v := range args[2:] { - parts := strings.Split(v, "=") - data[parts[0]] = parts[1] + if data == nil { + data = make(map[string]interface{}, 1) } + data["ciphertext"] = importCiphertext + c.UI.Output("Submitting wrapped key to Vault transit.") // Finally, call import _, err = client.Logical().Write(path+"/keys/"+keyName+"/"+operation, data) diff --git a/command/transit_import_key_test.go b/command/transit_import_key_test.go new file mode 100644 index 000000000000..d13c032048ff --- /dev/null +++ b/command/transit_import_key_test.go @@ -0,0 +1,186 @@ +package command + +import ( + "bytes" + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "testing" + + "github.com/hashicorp/vault/api" + + "github.com/stretchr/testify/require" +) + +// Validate the `vault transit import` command works. +func TestTransitImport(t *testing.T) { + t.Parallel() + + client, closer := testVaultServer(t) + defer closer() + + if err := client.Sys().Mount("transit", &api.MountInput{ + Type: "transit", + }); err != nil { + t.Fatalf("transit mount error: %#v", err) + } + + rsa1, rsa2, aes128, aes256 := generateKeys(t) + + type testCase struct { + variant string + path string + key []byte + args []string + shouldFail bool + } + tests := []testCase{ + { + "import", + "transit/keys/rsa1", + rsa1, + []string{"type=rsa-2048"}, + false, /* first import */ + }, + { + "import", + "transit/keys/rsa1", + rsa2, + []string{"type=rsa-2048"}, + true, /* already exists */ + }, + { + "import-version", + "transit/keys/rsa1", + rsa2, + []string{"type=rsa-2048"}, + false, /* new version */ + }, + { + "import", + "transit/keys/rsa2", + rsa2, + []string{"type=rsa-4096"}, + true, /* wrong type */ + }, + { + "import", + "transit/keys/rsa2", + rsa2, + []string{"type=rsa-2048"}, + false, /* new name */ + }, + { + "import", + "transit/keys/aes1", + aes128, + []string{"type=aes128-gcm96"}, + false, /* first import */ + }, + { + "import", + "transit/keys/aes1", + aes256, + []string{"type=aes256-gcm96"}, + true, /* already exists */ + }, + { + "import-version", + "transit/keys/aes1", + aes256, + []string{"type=aes256-gcm96"}, + true, /* new version, different type */ + }, + { + "import-version", + "transit/keys/aes1", + aes128, + []string{"type=aes128-gcm96"}, + false, /* new version */ + }, + { + "import", + "transit/keys/aes2", + aes256, + []string{"type=aes128-gcm96"}, + true, /* wrong type */ + }, + { + "import", + "transit/keys/aes2", + aes256, + []string{"type=aes256-gcm96"}, + false, /* new name */ + }, + } + + for index, tc := range tests { + t.Logf("Running test case %d: %v", index, tc) + execTransitImport(t, client, tc.variant, tc.path, tc.key, tc.args, tc.shouldFail) + } +} + +func execTransitImport(t *testing.T, client *api.Client, method string, path string, key []byte, data []string, expectFailure bool) { + t.Helper() + + keyBase64 := base64.StdEncoding.EncodeToString(key) + + var args []string + args = append(args, "transit") + args = append(args, method) + args = append(args, path) + args = append(args, keyBase64) + args = append(args, data...) + + stdout := bytes.NewBuffer(nil) + stderr := bytes.NewBuffer(nil) + runOpts := &RunOptions{ + Stdout: stdout, + Stderr: stderr, + Client: client, + } + + code := RunCustom(args, runOpts) + combined := stdout.String() + stderr.String() + + if code != 0 { + if !expectFailure { + t.Fatalf("Got unexpected failure from test (ret %d): %v", code, combined) + } + } else { + if expectFailure { + t.Fatalf("Expected failure, got success from test (ret %d): %v", code, combined) + } + } +} + +func generateKeys(t *testing.T) (rsa1 []byte, rsa2 []byte, aes128 []byte, aes256 []byte) { + t.Helper() + + priv1, err := rsa.GenerateKey(rand.Reader, 2048) + require.NotNil(t, priv1, "failed generating RSA 1 key") + require.NoError(t, err, "failed generating RSA 1 key") + + rsa1, err = x509.MarshalPKCS8PrivateKey(priv1) + require.NotNil(t, rsa1, "failed marshaling RSA 1 key") + require.NoError(t, err, "failed marshaling RSA 1 key") + + priv2, err := rsa.GenerateKey(rand.Reader, 2048) + require.NotNil(t, priv2, "failed generating RSA 2 key") + require.NoError(t, err, "failed generating RSA 2 key") + + rsa2, err = x509.MarshalPKCS8PrivateKey(priv2) + require.NotNil(t, rsa2, "failed marshaling RSA 2 key") + require.NoError(t, err, "failed marshaling RSA 2 key") + + aes128 = make([]byte, 128/8) + _, err = rand.Read(aes128) + require.NoError(t, err, "failed generating AES 128 key") + + aes256 = make([]byte, 256/8) + _, err = rand.Read(aes256) + require.NoError(t, err, "failed generating AES 256 key") + + return +} diff --git a/command/transit_import_key_version.go b/command/transit_import_key_version.go index ee84a35efeea..7b38f7dc7689 100644 --- a/command/transit_import_key_version.go +++ b/command/transit_import_key_version.go @@ -22,21 +22,24 @@ func (c *TransitImportVersionCommand) Synopsis() string { func (c *TransitImportVersionCommand) Help() string { helpText := ` -Usage: vault transit import-version PATH KEY +Usage: vault transit import-version PATH KEY [...] Using the Transit or Transform key wrapping system, imports key material from - the base64 encoded KEY, into a new key whose API path is PATH. To import a new transit/transform key, - use import. The remaining options after KEY (key=value style) are passed on to the transit/transform create key - endpoint. - If your system or device natively supports the RSA AES key wrap mechanism, you should use it directly - rather than this command. + the base64 encoded KEY (either directly on the CLI or via @path notation), + into a new key whose API path is PATH. To import a new Transit or Transform + key, use the import command instead. The remaining options after KEY + (key=value style) are passed on to the Transit or Transform create key endpoint. + If your system or device natively supports the RSA AES key wrap mechanism + (such as the PKCS#11 mechanism CKM_RSA_AES_KEY_WRAP), you should use it + directly rather than this command. + ` + c.Flags().Help() return strings.TrimSpace(helpText) } func (c *TransitImportVersionCommand) Flags() *FlagSets { - return c.flagSet(FlagSetHTTP | FlagSetOutputField | FlagSetOutputFormat) + return c.flagSet(FlagSetHTTP) } func (c *TransitImportVersionCommand) AutocompleteArgs() complete.Predictor { @@ -48,5 +51,5 @@ func (c *TransitImportVersionCommand) AutocompleteFlags() complete.Flags { } func (c *TransitImportVersionCommand) Run(args []string) int { - return importKey(c.BaseCommand, "import_version", args) + return importKey(c.BaseCommand, "import_version", c.Flags(), args) } diff --git a/enos/ci/service-user-iam/main.tf b/enos/ci/service-user-iam/main.tf index a42333932267..bea2d46a4309 100644 --- a/enos/ci/service-user-iam/main.tf +++ b/enos/ci/service-user-iam/main.tf @@ -28,6 +28,7 @@ resource "aws_iam_role" "role" { data "aws_iam_policy_document" "assume_role_policy_document" { provider = aws.us_east_1 + statement { effect = "Allow" actions = ["sts:AssumeRole"] @@ -43,11 +44,47 @@ resource "aws_iam_role_policy" "role_policy" { provider = aws.us_east_1 role = aws_iam_role.role.name name = "${local.service_user}_policy" - policy = data.aws_iam_policy_document.iam_policy_document.json + policy = data.aws_iam_policy_document.role_policy.json +} + +data "aws_iam_policy_document" "role_policy" { + source_policy_documents = [ + data.aws_iam_policy_document.enos_scenario.json, + data.aws_iam_policy_document.aws_nuke.json, + ] +} + +data "aws_iam_policy_document" "aws_nuke" { + provider = aws.us_east_1 + + statement { + effect = "Allow" + actions = [ + "ec2:DescribeInternetGateways", + "ec2:DescribeNatGateways", + "ec2:DescribeRegions", + "ec2:DescribeVpnGateways", + "iam:DeleteAccessKey", + "iam:DeleteUser", + "iam:DeleteUserPolicy", + "iam:GetUser", + "iam:ListAccessKeys", + "iam:ListAccountAliases", + "iam:ListGroupsForUser", + "iam:ListUserPolicies", + "iam:ListUserTags", + "iam:ListUsers", + "iam:UntagUser", + "servicequotas:ListServiceQuotas" + ] + + resources = ["*"] + } } -data "aws_iam_policy_document" "iam_policy_document" { +data "aws_iam_policy_document" "enos_scenario" { provider = aws.us_east_1 + statement { effect = "Allow" actions = [ @@ -55,19 +92,27 @@ data "aws_iam_policy_document" "iam_policy_document" { "ec2:AttachInternetGateway", "ec2:AuthorizeSecurityGroupEgress", "ec2:AuthorizeSecurityGroupIngress", + "ec2:CancelSpotFleetRequests", + "ec2:CancelSpotInstanceRequests", "ec2:CreateInternetGateway", "ec2:CreateKeyPair", + "ec2:CreateLaunchTemplate", + "ec2:CreateLaunchTemplateVersion", "ec2:CreateRoute", "ec2:CreateRouteTable", "ec2:CreateSecurityGroup", + "ec2:CreateSpotDatafeedSubscription", "ec2:CreateSubnet", "ec2:CreateTags", "ec2:CreateVolume", "ec2:CreateVPC", "ec2:DeleteInternetGateway", + "ec2:DeleteLaunchTemplate", + "ec2:DeleteLaunchTemplateVersions", "ec2:DeleteKeyPair", "ec2:DeleteRouteTable", "ec2:DeleteSecurityGroup", + "ec2:DeleteSpotDatafeedSubscription", "ec2:DeleteSubnet", "ec2:DeleteTags", "ec2:DeleteVolume", @@ -81,14 +126,22 @@ data "aws_iam_policy_document" "iam_policy_document" { "ec2:DescribeInstanceTypeOfferings", "ec2:DescribeInstanceTypes", "ec2:DescribeInternetGateways", - "ec2:DescribeInternetGateways", "ec2:DescribeKeyPairs", + "ec2:DescribeLaunchTemplates", + "ec2:DescribeLaunchTemplateVersions", "ec2:DescribeNatGateways", "ec2:DescribeNetworkAcls", "ec2:DescribeNetworkInterfaces", "ec2:DescribeRegions", "ec2:DescribeRouteTables", "ec2:DescribeSecurityGroups", + "ec2:DescribeSpotDatafeedSubscription", + "ec2:DescribeSpotFleetInstances", + "ec2:DescribeSpotFleetInstanceRequests", + "ec2:DescribeSpotFleetRequests", + "ec2:DescribeSpotFleetRequestHistory", + "ec2:DescribeSpotInstanceRequests", + "ec2:DescribeSpotPriceHistory", "ec2:DescribeSubnets", "ec2:DescribeTags", "ec2:DescribeVolumes", @@ -99,14 +152,21 @@ data "aws_iam_policy_document" "iam_policy_document" { "ec2:DescribeVpnGateways", "ec2:DetachInternetGateway", "ec2:DisassociateRouteTable", + "ec2:GetLaunchTemplateData", + "ec2:GetSpotPlacementScores", "ec2:ImportKeyPair", "ec2:ModifyInstanceAttribute", + "ec2:ModifyLaunchTemplate", + "ec2:ModifySpotFleetRequest", "ec2:ModifySubnetAttribute", "ec2:ModifyVPCAttribute", + "ec2:RequestSpotInstances", + "ec2:RequestSpotFleet", "ec2:ResetInstanceAttribute", "ec2:RevokeSecurityGroupEgress", "ec2:RevokeSecurityGroupIngress", "ec2:RunInstances", + "ec2:SendSpotInstanceInterruptions", "ec2:TerminateInstances", "elasticloadbalancing:DescribeLoadBalancers", "elasticloadbalancing:DescribeTargetGroups", @@ -115,11 +175,10 @@ data "aws_iam_policy_document" "iam_policy_document" { "iam:CreateInstanceProfile", "iam:CreatePolicy", "iam:CreateRole", - "iam:CreateRole", + "iam:CreateServiceLinkedRole", "iam:DeleteInstanceProfile", "iam:DeletePolicy", "iam:DeleteRole", - "iam:DeleteRole", "iam:DeleteRolePolicy", "iam:DetachRolePolicy", "iam:GetInstanceProfile", @@ -132,7 +191,6 @@ data "aws_iam_policy_document" "iam_policy_document" { "iam:ListPolicies", "iam:ListRolePolicies", "iam:ListRoles", - "iam:ListRoles", "iam:PassRole", "iam:PutRolePolicy", "iam:RemoveRoleFromInstanceProfile", @@ -150,6 +208,7 @@ data "aws_iam_policy_document" "iam_policy_document" { "kms:ScheduleKeyDeletion", "servicequotas:ListServiceQuotas" ] + resources = ["*"] } } diff --git a/enos/ci/service-user-iam/service-quotas.tf b/enos/ci/service-user-iam/service-quotas.tf index 73a68363d84d..544f311504e7 100644 --- a/enos/ci/service-user-iam/service-quotas.tf +++ b/enos/ci/service-user-iam/service-quotas.tf @@ -1,33 +1,62 @@ locals { // This is the code of the service quota to request a change for. Each adjustable limit has a // unique code. See, https://registry.terraform.io/providers/hashicorp/aws/latest/docs/resources/servicequotas_service_quota#quota_code - subnets_per_vps_quota = "L-F678F1CE" + subnets_per_vpcs_quota = "L-F678F1CE" + standard_spot_instance_requests_quota = "L-34B43A08" } resource "aws_servicequotas_service_quota" "vpcs_per_region_us_east_1" { provider = aws.us_east_2 - quota_code = local.subnets_per_vps_quota + quota_code = local.subnets_per_vpcs_quota service_code = "vpc" value = 50 } resource "aws_servicequotas_service_quota" "vpcs_per_region_us_east_2" { provider = aws.us_east_2 - quota_code = local.subnets_per_vps_quota + quota_code = local.subnets_per_vpcs_quota service_code = "vpc" value = 50 } resource "aws_servicequotas_service_quota" "vpcs_per_region_us_west_1" { provider = aws.us_west_1 - quota_code = local.subnets_per_vps_quota + quota_code = local.subnets_per_vpcs_quota service_code = "vpc" value = 50 } resource "aws_servicequotas_service_quota" "vpcs_per_region_us_west_2" { provider = aws.us_west_2 - quota_code = local.subnets_per_vps_quota + quota_code = local.subnets_per_vpcs_quota service_code = "vpc" value = 50 } + +resource "aws_servicequotas_service_quota" "spot_requests_per_region_us_east_1" { + provider = aws.us_east_2 + quota_code = local.standard_spot_instance_requests_quota + service_code = "ec2" + value = 640 +} + +resource "aws_servicequotas_service_quota" "spot_requests_per_region_us_east_2" { + provider = aws.us_east_2 + quota_code = local.standard_spot_instance_requests_quota + service_code = "ec2" + value = 640 +} + +resource "aws_servicequotas_service_quota" "spot_requests_per_region_us_west_1" { + provider = aws.us_west_1 + quota_code = local.standard_spot_instance_requests_quota + service_code = "ec2" + value = 640 +} + +resource "aws_servicequotas_service_quota" "spot_requests_per_region_us_west_2" { + provider = aws.us_west_2 + quota_code = local.standard_spot_instance_requests_quota + service_code = "ec2" + value = 640 +} diff --git a/enos/enos-modules.hcl b/enos/enos-modules.hcl index 26cb20dac4d6..9216fbf8f215 100644 --- a/enos/enos-modules.hcl +++ b/enos/enos-modules.hcl @@ -65,6 +65,27 @@ module "shutdown_multiple_nodes" { source = "./modules/shutdown_multiple_nodes" } +module "target_ec2_instances" { + source = "./modules/target_ec2_instances" + + common_tags = var.tags + instance_count = var.vault_instance_count + project_name = var.project_name + ssh_keypair = var.aws_ssh_keypair_name +} + +module "target_ec2_spot_fleet" { + source = "./modules/target_ec2_spot_fleet" + + common_tags = var.tags + instance_mem_min = 4096 + instance_cpu_min = 2 + project_name = var.project_name + // Current on-demand cost of t3.medium in us-east. + spot_price_max = "0.0416" + ssh_keypair = var.aws_ssh_keypair_name +} + module "vault_agent" { source = "./modules/vault_agent" @@ -72,7 +93,6 @@ module "vault_agent" { vault_instance_count = var.vault_instance_count } - module "vault_verify_agent_output" { source = "./modules/vault_verify_agent_output" @@ -80,15 +100,9 @@ module "vault_verify_agent_output" { } module "vault_cluster" { - source = "app.terraform.io/hashicorp-qti/aws-vault/enos" - # source = "../../terraform-enos-aws-vault" + source = "./modules/vault_cluster" - common_tags = var.tags - environment = "ci" - instance_count = var.vault_instance_count - project_name = var.project_name - ssh_aws_keypair = var.aws_ssh_keypair_name - vault_install_dir = var.vault_install_dir + install_dir = var.vault_install_dir } module "vault_get_cluster_ips" { diff --git a/enos/enos-scenario-agent.hcl b/enos/enos-scenario-agent.hcl index 8a7de6032dac..29a1204aae83 100644 --- a/enos/enos-scenario-agent.hcl +++ b/enos/enos-scenario-agent.hcl @@ -22,13 +22,18 @@ scenario "agent" { "ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"] "ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"] } - bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_bundle_path) : null - dependencies_to_install = ["jq"] + bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_bundle_path) : null + packages = ["jq"] enos_provider = { rhel = provider.enos.rhel ubuntu = provider.enos.ubuntu } install_artifactory_artifact = local.bundle_path == null + spot_price_max = { + // These prices are based on on-demand cost for t3.medium in us-east + "rhel" = "0.1016" + "ubuntu" = "0.0416" + } tags = merge({ "Project Name" : var.project_name "Project" : "Enos", @@ -51,21 +56,21 @@ scenario "agent" { module = "build_${matrix.artifact_source}" variables { - build_tags = try(var.vault_local_build_tags, local.build_tags[matrix.edition]) - bundle_path = local.bundle_path - goarch = matrix.arch - goos = "linux" - artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null - artifactory_repo = matrix.artifact_source == "artifactory" ? var.artifactory_repo : null - artifactory_username = matrix.artifact_source == "artifactory" ? var.artifactory_username : null - artifactory_token = matrix.artifact_source == "artifactory" ? var.artifactory_token : null - arch = matrix.artifact_source == "artifactory" ? matrix.arch : null - vault_product_version = var.vault_product_version - artifact_type = matrix.artifact_source == "artifactory" ? var.vault_artifact_type : null - distro = matrix.artifact_source == "artifactory" ? matrix.distro : null - edition = matrix.artifact_source == "artifactory" ? matrix.edition : null - instance_type = matrix.artifact_source == "artifactory" ? local.vault_instance_type : null - revision = var.vault_revision + build_tags = var.vault_local_build_tags != null ? var.vault_local_build_tags : local.build_tags[matrix.edition] + bundle_path = local.bundle_path + goarch = matrix.arch + goos = "linux" + artifactory_host = matrix.artifact_source == "artifactory" ? var.artifactory_host : null + artifactory_repo = matrix.artifact_source == "artifactory" ? var.artifactory_repo : null + artifactory_username = matrix.artifact_source == "artifactory" ? var.artifactory_username : null + artifactory_token = matrix.artifact_source == "artifactory" ? var.artifactory_token : null + arch = matrix.artifact_source == "artifactory" ? matrix.arch : null + product_version = var.vault_product_version + artifact_type = matrix.artifact_source == "artifactory" ? var.vault_artifact_type : null + distro = matrix.artifact_source == "artifactory" ? matrix.distro : null + edition = matrix.artifact_source == "artifactory" ? matrix.edition : null + instance_type = matrix.artifact_source == "artifactory" ? local.vault_instance_type : null + revision = var.vault_revision } } @@ -99,28 +104,29 @@ scenario "agent" { } } - step "create_backend_cluster" { - module = "backend_raft" + step "create_vault_cluster_targets" { + module = module.target_ec2_spot_fleet // "target_ec2_instances" can be used for on-demand instances depends_on = [step.create_vpc] providers = { - enos = provider.enos.ubuntu + enos = local.enos_provider[matrix.distro] } variables { - ami_id = step.create_vpc.ami_ids["ubuntu"]["amd64"] - common_tags = local.tags - instance_type = var.backend_instance_type - kms_key_arn = step.create_vpc.kms_key_arn - vpc_id = step.create_vpc.vpc_id + ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch] + awskms_unseal_key_arn = step.create_vpc.kms_key_arn + common_tags = local.tags + instance_type = local.vault_instance_type // only used for on-demand instances + spot_price_max = local.spot_price_max[matrix.distro] + vpc_id = step.create_vpc.vpc_id } } step "create_vault_cluster" { module = module.vault_cluster depends_on = [ - step.create_backend_cluster, step.build_vault, + step.create_vault_cluster_targets ] providers = { @@ -128,28 +134,25 @@ scenario "agent" { } variables { - ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch] - common_tags = local.tags - consul_cluster_tag = step.create_backend_cluster.consul_cluster_tag - dependencies_to_install = local.dependencies_to_install - instance_type = local.vault_instance_type - kms_key_arn = step.create_vpc.kms_key_arn - storage_backend = "raft" - unseal_method = "shamir" - vault_local_artifact_path = local.bundle_path - vault_artifactory_release = local.install_artifactory_artifact ? step.build_vault.vault_artifactory_release : null - vault_license = matrix.edition != "oss" ? step.read_license.license : null - vpc_id = step.create_vpc.vpc_id - vault_environment = { + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + awskms_unseal_key_arn = step.create_vpc.kms_key_arn + cluster_name = step.create_vault_cluster_targets.cluster_name + config_env_vars = { VAULT_LOG_LEVEL = var.vault_log_level } + install_dir = var.vault_install_dir + license = matrix.edition != "oss" ? step.read_license.license : null + local_artifact_path = local.bundle_path + packages = local.packages + storage_backend = "raft" + target_hosts = step.create_vault_cluster_targets.hosts + unseal_method = "shamir" } } step "start_vault_agent" { module = "vault_agent" depends_on = [ - step.create_backend_cluster, step.build_vault, step.create_vault_cluster, ] @@ -159,8 +162,8 @@ scenario "agent" { } variables { - vault_instances = step.create_vault_cluster.vault_instances - vault_root_token = step.create_vault_cluster.vault_root_token + vault_instances = step.create_vault_cluster_targets.hosts + vault_root_token = step.create_vault_cluster.root_token vault_agent_template_destination = "/tmp/agent_output.txt" vault_agent_template_contents = "{{ with secret \\\"auth/token/lookup-self\\\" }}orphan={{ .Data.orphan }} display_name={{ .Data.display_name }}{{ end }}" } @@ -178,49 +181,64 @@ scenario "agent" { } variables { - vault_instances = step.create_vault_cluster.vault_instances + vault_instances = step.create_vault_cluster_targets.hosts vault_agent_template_destination = "/tmp/agent_output.txt" vault_agent_expected_output = "orphan=true display_name=approle" } } - output "vault_cluster_instance_ids" { - description = "The Vault cluster instance IDs" - value = step.create_vault_cluster.instance_ids + output "awkms_unseal_key_arn" { + description = "The Vault cluster KMS key arn" + value = step.create_vpc.kms_key_arn } - output "vault_cluster_pub_ips" { - description = "The Vault cluster public IPs" - value = step.create_vault_cluster.instance_public_ips + output "cluster_name" { + description = "The Vault cluster name" + value = step.create_vault_cluster.cluster_name } - output "vault_cluster_priv_ips" { + output "hosts" { + description = "The Vault cluster target hosts" + value = step.create_vault_cluster.target_hosts + } + + output "private_ips" { description = "The Vault cluster private IPs" - value = step.create_vault_cluster.instance_private_ips + value = step.create_vault_cluster.private_ips } - output "vault_cluster_key_id" { - description = "The Vault cluster Key ID" - value = step.create_vault_cluster.key_id + output "public_ips" { + description = "The Vault cluster public IPs" + value = step.create_vault_cluster.public_ips } - output "vault_cluster_root_token" { + output "root_token" { description = "The Vault cluster root token" - value = step.create_vault_cluster.vault_root_token + value = step.create_vault_cluster.root_token } - output "vault_cluster_unseal_keys_b64" { - description = "The Vault cluster unseal keys" - value = step.create_vault_cluster.vault_unseal_keys_b64 + output "recovery_key_shares" { + description = "The Vault cluster recovery key shares" + value = step.create_vault_cluster.recovery_key_shares } - output "vault_cluster_unseal_keys_hex" { - description = "The Vault cluster unseal keys hex" - value = step.create_vault_cluster.vault_unseal_keys_hex + output "recovery_keys_b64" { + description = "The Vault cluster recovery keys b64" + value = step.create_vault_cluster.recovery_keys_b64 + } + + output "recovery_keys_hex" { + description = "The Vault cluster recovery keys hex" + value = step.create_vault_cluster.recovery_keys_hex } - output "vault_cluster_tag" { - description = "The Vault cluster tag" - value = step.create_vault_cluster.vault_cluster_tag + output "unseal_keys_b64" { + description = "The Vault cluster unseal keys" + value = step.create_vault_cluster.unseal_keys_b64 + } + + output "unseal_keys_hex" { + description = "The Vault cluster unseal keys hex" + value = step.create_vault_cluster.unseal_keys_hex } } diff --git a/enos/enos-scenario-autopilot.hcl b/enos/enos-scenario-autopilot.hcl index bf683b67f7a5..53861119863f 100644 --- a/enos/enos-scenario-autopilot.hcl +++ b/enos/enos-scenario-autopilot.hcl @@ -12,6 +12,12 @@ scenario "autopilot" { edition = ["oss", "ent.fips1402", "ent.hsm.fips1402"] artifact_type = ["package"] } + + # Our local builder always creates bundles + exclude { + artifact_source = ["local"] + artifact_type = ["package"] + } } terraform_cli = terraform_cli.default @@ -29,12 +35,17 @@ scenario "autopilot" { "ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"] "ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"] } - bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_bundle_path) : null - dependencies_to_install = ["jq"] + bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_bundle_path) : null + packages = ["jq"] enos_provider = { rhel = provider.enos.rhel ubuntu = provider.enos.ubuntu } + spot_price_max = { + // These prices are based on on-demand cost for t3.medium in us-east + "rhel" = "0.1016" + "ubuntu" = "0.0416" + } tags = merge({ "Project Name" : var.project_name "Project" : "Enos", @@ -105,36 +116,51 @@ scenario "autopilot" { } } - # This step creates a Vault cluster using a bundle downloaded from - # releases.hashicorp.com, with the version specified in var.vault_autopilot_initial_release + step "create_vault_cluster_targets" { + module = module.target_ec2_spot_fleet // "target_ec2_instances" can be used for on-demand instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch] + awskms_unseal_key_arn = step.create_vpc.kms_key_arn + common_tags = local.tags + instance_type = local.vault_instance_type // only used for on-demand instances + spot_price_max = local.spot_price_max[matrix.distro] + vpc_id = step.create_vpc.vpc_id + } + } + step "create_vault_cluster" { module = module.vault_cluster depends_on = [ - step.create_vpc, step.build_vault, + step.create_vault_cluster_targets ] + providers = { enos = local.enos_provider[matrix.distro] } variables { - ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch] - common_tags = local.tags - dependencies_to_install = local.dependencies_to_install - instance_type = local.vault_instance_type - kms_key_arn = step.create_vpc.kms_key_arn - storage_backend = "raft" + awskms_unseal_key_arn = step.create_vpc.kms_key_arn + cluster_name = step.create_vault_cluster_targets.cluster_name + config_env_vars = { + VAULT_LOG_LEVEL = var.vault_log_level + } + install_dir = local.vault_install_dir + license = matrix.edition != "oss" ? step.read_license.license : null + packages = local.packages + release = var.vault_autopilot_initial_release + storage_backend = "raft" storage_backend_addl_config = { autopilot_upgrade_version = var.vault_autopilot_initial_release.version } - unseal_method = matrix.seal - vault_install_dir = local.vault_install_dir - vault_release = var.vault_autopilot_initial_release - vault_license = step.read_license.license - vpc_id = step.create_vpc.vpc_id - vault_environment = { - VAULT_LOG_LEVEL = var.vault_log_level - } + target_hosts = step.create_vault_cluster_targets.hosts + unseal_method = matrix.seal } } @@ -152,12 +178,13 @@ scenario "autopilot" { } variables { - vault_instances = step.create_vault_cluster.vault_instances + vault_instances = step.create_vault_cluster_targets.hosts vault_install_dir = local.vault_install_dir - vault_root_token = step.create_vault_cluster.vault_root_token + vault_root_token = step.create_vault_cluster.root_token } } + step "verify_write_test_data" { module = module.vault_verify_write_data depends_on = [ @@ -172,9 +199,9 @@ scenario "autopilot" { variables { leader_public_ip = step.get_vault_cluster_ips.leader_public_ip leader_private_ip = step.get_vault_cluster_ips.leader_private_ip - vault_instances = step.create_vault_cluster.vault_instances + vault_instances = step.create_vault_cluster_targets.hosts vault_install_dir = local.vault_install_dir - vault_root_token = step.create_vault_cluster.vault_root_token + vault_root_token = step.create_vault_cluster.root_token } } @@ -186,8 +213,25 @@ scenario "autopilot" { } } - # This step creates a new Vault cluster using a bundle or package - # from the matrix.artifact_source, with the var.vault_product_version + step "create_vault_cluster_upgrade_targets" { + module = module.target_ec2_spot_fleet // "target_ec2_instances" can be used for on-demand instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch] + awskms_unseal_key_arn = step.create_vpc.kms_key_arn + common_tags = local.tags + cluster_name = step.create_vault_cluster_targets.cluster_name + instance_type = local.vault_instance_type // only used for on-demand instances + spot_price_max = local.spot_price_max[matrix.distro] + vpc_id = step.create_vpc.vpc_id + } + } + step "upgrade_vault_cluster_with_autopilot" { module = module.vault_cluster depends_on = [ @@ -202,28 +246,25 @@ scenario "autopilot" { } variables { - ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch] - common_tags = local.tags - dependencies_to_install = local.dependencies_to_install - instance_type = local.vault_instance_type - kms_key_arn = step.create_vpc.kms_key_arn + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + awskms_unseal_key_arn = step.create_vpc.kms_key_arn + cluster_name = step.create_vault_cluster_targets.cluster_name + config_env_vars = { + VAULT_LOG_LEVEL = var.vault_log_level + } + force_unseal = matrix.seal == "shamir" + initialize_cluster = false + install_dir = local.vault_install_dir + license = matrix.edition != "oss" ? step.read_license.license : null + local_artifact_path = local.bundle_path + packages = local.packages + root_token = step.create_vault_cluster.root_token + shamir_unseal_keys = matrix.seal == "shamir" ? step.create_vault_cluster.unseal_keys_hex : null storage_backend = "raft" storage_backend_addl_config = step.create_autopilot_upgrade_storageconfig.storage_addl_config + storage_node_prefix = "upgrade_node" + target_hosts = step.create_vault_cluster_upgrade_targets.hosts unseal_method = matrix.seal - vault_cluster_tag = step.create_vault_cluster.vault_cluster_tag - vault_init = false - vault_install_dir = local.vault_install_dir - vault_license = step.read_license.license - vault_local_artifact_path = local.bundle_path - vault_artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null - vault_node_prefix = "upgrade_node" - vault_root_token = step.create_vault_cluster.vault_root_token - vault_unseal_when_no_init = matrix.seal == "shamir" - vault_unseal_keys = matrix.seal == "shamir" ? step.create_vault_cluster.vault_unseal_keys_hex : null - vpc_id = step.create_vpc.vpc_id - vault_environment = { - VAULT_LOG_LEVEL = var.vault_log_level - } } } @@ -231,6 +272,7 @@ scenario "autopilot" { module = module.vault_verify_unsealed depends_on = [ step.create_vault_cluster, + step.create_vault_cluster_upgrade_targets, step.upgrade_vault_cluster_with_autopilot, ] @@ -240,7 +282,7 @@ scenario "autopilot" { variables { vault_install_dir = local.vault_install_dir - vault_instances = step.upgrade_vault_cluster_with_autopilot.vault_instances + vault_instances = step.create_vault_cluster_upgrade_targets.hosts } } @@ -257,14 +299,15 @@ scenario "autopilot" { variables { vault_install_dir = local.vault_install_dir - vault_instances = step.upgrade_vault_cluster_with_autopilot.vault_instances - vault_root_token = step.upgrade_vault_cluster_with_autopilot.vault_root_token + vault_instances = step.create_vault_cluster_upgrade_targets.hosts + vault_root_token = step.upgrade_vault_cluster_with_autopilot.root_token } } step "verify_autopilot_await_server_removal_state" { module = module.vault_verify_autopilot depends_on = [ + step.create_vault_cluster_upgrade_targets, step.upgrade_vault_cluster_with_autopilot, step.verify_raft_auto_join_voter ] @@ -277,8 +320,8 @@ scenario "autopilot" { vault_autopilot_upgrade_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version vault_autopilot_upgrade_status = "await-server-removal" vault_install_dir = local.vault_install_dir - vault_instances = step.upgrade_vault_cluster_with_autopilot.vault_instances - vault_root_token = step.create_vault_cluster.vault_root_token + vault_instances = step.create_vault_cluster_upgrade_targets.hosts + vault_root_token = step.upgrade_vault_cluster_with_autopilot.root_token } } @@ -286,6 +329,7 @@ scenario "autopilot" { module = module.vault_get_cluster_ips depends_on = [ step.create_vault_cluster, + step.create_vault_cluster_upgrade_targets, step.get_vault_cluster_ips, step.upgrade_vault_cluster_with_autopilot ] @@ -295,11 +339,11 @@ scenario "autopilot" { } variables { - vault_instances = step.create_vault_cluster.vault_instances + vault_instances = step.create_vault_cluster_targets.hosts vault_install_dir = local.vault_install_dir - added_vault_instances = step.upgrade_vault_cluster_with_autopilot.vault_instances - vault_root_token = step.create_vault_cluster.vault_root_token + vault_root_token = step.create_vault_cluster.root_token node_public_ip = step.get_vault_cluster_ips.leader_public_ip + added_vault_instances = step.create_vault_cluster_targets.hosts } } @@ -326,6 +370,7 @@ scenario "autopilot" { step "raft_remove_peers" { module = module.vault_raft_remove_peer depends_on = [ + step.create_vault_cluster_upgrade_targets, step.get_updated_vault_cluster_ips, step.upgrade_vault_cluster_with_autopilot, step.verify_autopilot_await_server_removal_state @@ -336,11 +381,11 @@ scenario "autopilot" { } variables { - vault_install_dir = local.vault_install_dir operator_instance = step.get_updated_vault_cluster_ips.leader_public_ip - remove_vault_instances = step.create_vault_cluster.vault_instances + remove_vault_instances = step.create_vault_cluster_targets.hosts + vault_install_dir = local.vault_install_dir vault_instance_count = 3 - vault_root_token = step.create_vault_cluster.vault_root_token + vault_root_token = step.create_vault_cluster.root_token } } @@ -356,7 +401,7 @@ scenario "autopilot" { } variables { - old_vault_instances = step.create_vault_cluster.vault_instances + old_vault_instances = step.create_vault_cluster_targets.hosts vault_instance_count = 3 } } @@ -364,6 +409,7 @@ scenario "autopilot" { step "verify_autopilot_idle_state" { module = module.vault_verify_autopilot depends_on = [ + step.create_vault_cluster_upgrade_targets, step.upgrade_vault_cluster_with_autopilot, step.verify_raft_auto_join_voter, step.remove_old_nodes @@ -377,15 +423,16 @@ scenario "autopilot" { vault_autopilot_upgrade_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version vault_autopilot_upgrade_status = "idle" vault_install_dir = local.vault_install_dir - vault_instances = step.upgrade_vault_cluster_with_autopilot.vault_instances - vault_root_token = step.create_vault_cluster.vault_root_token + vault_instances = step.create_vault_cluster_upgrade_targets.hosts + vault_root_token = step.create_vault_cluster.root_token } } step "verify_undo_logs_status" { - skip_step = try(semverconstraint(var.vault_product_version, "<1.13.0-0"), true) + skip_step = semverconstraint(var.vault_product_version, "<1.13.0-0") module = module.vault_verify_undo_logs depends_on = [ + step.create_vault_cluster_upgrade_targets, step.remove_old_nodes, step.upgrade_vault_cluster_with_autopilot, step.verify_autopilot_idle_state @@ -397,78 +444,78 @@ scenario "autopilot" { variables { vault_install_dir = local.vault_install_dir - vault_instances = step.upgrade_vault_cluster_with_autopilot.vault_instances - vault_root_token = step.create_vault_cluster.vault_root_token + vault_instances = step.create_vault_cluster_upgrade_targets.hosts + vault_root_token = step.create_vault_cluster.root_token } } - output "vault_cluster_instance_ids" { - description = "The Vault cluster instance IDs" - value = step.create_vault_cluster.instance_ids + output "awskms_unseal_key_arn" { + description = "The Vault cluster KMS key arn" + value = step.create_vpc.kms_key_arn } - output "vault_cluster_pub_ips" { - description = "The Vault cluster public IPs" - value = step.create_vault_cluster.instance_public_ips + output "cluster_name" { + description = "The Vault cluster name" + value = step.create_vault_cluster.cluster_name } - output "vault_cluster_priv_ips" { - description = "The Vault cluster private IPs" - value = step.create_vault_cluster.instance_private_ips + output "hosts" { + description = "The Vault cluster target hosts" + value = step.create_vault_cluster.target_hosts } - output "vault_cluster_key_id" { - description = "The Vault cluster Key ID" - value = step.create_vault_cluster.key_id + output "private_ips" { + description = "The Vault cluster private IPs" + value = step.create_vault_cluster.private_ips } - output "vault_cluster_root_token" { - description = "The Vault cluster root token" - value = step.create_vault_cluster.vault_root_token + output "public_ips" { + description = "The Vault cluster public IPs" + value = step.create_vault_cluster.public_ips } - output "vault_cluster_unseal_keys_b64" { - description = "The Vault cluster unseal keys" - value = step.create_vault_cluster.vault_unseal_keys_b64 + output "root_token" { + description = "The Vault cluster root token" + value = step.create_vault_cluster.root_token } - output "vault_cluster_recovery_key_shares" { + output "recovery_key_shares" { description = "The Vault cluster recovery key shares" - value = step.create_vault_cluster.vault_recovery_key_shares + value = step.create_vault_cluster.recovery_key_shares } - output "vault_cluster_recovery_keys_b64" { + output "recovery_keys_b64" { description = "The Vault cluster recovery keys b64" - value = step.create_vault_cluster.vault_recovery_keys_b64 + value = step.create_vault_cluster.recovery_keys_b64 } - output "vault_cluster_recovery_keys_hex" { + output "recovery_keys_hex" { description = "The Vault cluster recovery keys hex" - value = step.create_vault_cluster.vault_recovery_keys_hex + value = step.create_vault_cluster.recovery_keys_hex } - output "vault_cluster_unseal_keys_hex" { - description = "The Vault cluster unseal keys hex" - value = step.create_vault_cluster.vault_unseal_keys_hex + output "unseal_keys_b64" { + description = "The Vault cluster unseal keys" + value = step.create_vault_cluster.unseal_keys_b64 } - output "vault_cluster_tag" { - description = "The Vault cluster tag" - value = step.create_vault_cluster.vault_cluster_tag + output "unseal_keys_hex" { + description = "The Vault cluster unseal keys hex" + value = step.create_vault_cluster.unseal_keys_hex } - output "upgraded_vault_cluster_instance_ids" { - description = "The Vault cluster instance IDs" - value = step.upgrade_vault_cluster_with_autopilot.instance_ids + output "upgrade_hosts" { + description = "The Vault cluster target hosts" + value = step.upgrade_vault_cluster_with_autopilot.target_hosts } - output "upgraded_vault_cluster_pub_ips" { - description = "The Vault cluster public IPs" - value = step.upgrade_vault_cluster_with_autopilot.instance_public_ips + output "upgrade_private_ips" { + description = "The Vault cluster private IPs" + value = step.upgrade_vault_cluster_with_autopilot.private_ips } - output "upgraded_vault_cluster_priv_ips" { - description = "The Vault cluster private IPs" - value = step.upgrade_vault_cluster_with_autopilot.instance_private_ips + output "upgrade_public_ips" { + description = "The Vault cluster public IPs" + value = step.upgrade_vault_cluster_with_autopilot.public_ips } } diff --git a/enos/enos-scenario-replication.hcl b/enos/enos-scenario-replication.hcl index a6c81672eae3..655bf0f55131 100644 --- a/enos/enos-scenario-replication.hcl +++ b/enos/enos-scenario-replication.hcl @@ -19,6 +19,12 @@ scenario "replication" { edition = ["ent.fips1402", "ent.hsm.fips1402"] artifact_type = ["package"] } + + # Our local builder always creates bundles + exclude { + artifact_source = ["local"] + artifact_type = ["package"] + } } terraform_cli = terraform_cli.default @@ -36,12 +42,17 @@ scenario "replication" { "ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"] "ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"] } - bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_bundle_path) : null - dependencies_to_install = ["jq"] + bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_bundle_path) : null + packages = ["jq"] enos_provider = { rhel = provider.enos.rhel ubuntu = provider.enos.ubuntu } + spot_price_max = { + // These prices are based on on-demand cost for t3.medium in us-east + "rhel" = "0.1016" + "ubuntu" = "0.0416" + } tags = merge({ "Project Name" : var.project_name "Project" : "Enos", @@ -131,37 +142,55 @@ scenario "replication" { } } - step "create_vault_primary_cluster" { + step "create_primary_cluster_targets" { + module = module.target_ec2_spot_fleet // "target_ec2_instances" can be used for on-demand instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch] + awskms_unseal_key_arn = step.create_vpc.kms_key_arn + common_tags = local.tags + instance_type = local.vault_instance_type // only used for on-demand instances + spot_price_max = local.spot_price_max[matrix.distro] + vpc_id = step.create_vpc.vpc_id + } + } + + step "create_primary_cluster" { module = module.vault_cluster depends_on = [ step.create_primary_backend_cluster, step.build_vault, + step.create_primary_cluster_targets ] + providers = { enos = local.enos_provider[matrix.distro] } variables { - ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch] - common_tags = local.tags + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + awskms_unseal_key_arn = step.create_vpc.kms_key_arn + cluster_name = step.create_primary_cluster_targets.cluster_name + config_env_vars = { + VAULT_LOG_LEVEL = var.vault_log_level + } consul_cluster_tag = step.create_primary_backend_cluster.consul_cluster_tag consul_release = matrix.primary_backend == "consul" ? { edition = var.backend_edition version = matrix.consul_version } : null - dependencies_to_install = local.dependencies_to_install - instance_type = local.vault_instance_type - kms_key_arn = step.create_vpc.kms_key_arn - storage_backend = matrix.primary_backend - unseal_method = matrix.primary_seal - vault_local_artifact_path = local.bundle_path - vault_install_dir = local.vault_install_dir - vault_artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null - vault_license = step.read_license.license - vpc_id = step.create_vpc.vpc_id - vault_environment = { - VAULT_LOG_LEVEL = var.vault_log_level - } + install_dir = local.vault_install_dir + license = matrix.edition != "oss" ? step.read_license.license : null + local_artifact_path = local.bundle_path + packages = local.packages + storage_backend = matrix.primary_backend + target_hosts = step.create_primary_cluster_targets.hosts + unseal_method = matrix.primary_seal } } @@ -186,44 +215,62 @@ scenario "replication" { } } - step "create_vault_secondary_cluster" { + step "create_secondary_cluster_targets" { + module = module.target_ec2_spot_fleet // "target_ec2_instances" can be used for on-demand instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch] + awskms_unseal_key_arn = step.create_vpc.kms_key_arn + common_tags = local.tags + instance_type = local.vault_instance_type // only used for on-demand instances + spot_price_max = local.spot_price_max[matrix.distro] + vpc_id = step.create_vpc.vpc_id + } + } + + step "create_secondary_cluster" { module = module.vault_cluster depends_on = [ step.create_secondary_backend_cluster, step.build_vault, + step.create_secondary_cluster_targets ] + providers = { enos = local.enos_provider[matrix.distro] } variables { - ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch] - common_tags = local.tags + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + awskms_unseal_key_arn = step.create_vpc.kms_key_arn + cluster_name = step.create_secondary_cluster_targets.cluster_name + config_env_vars = { + VAULT_LOG_LEVEL = var.vault_log_level + } consul_cluster_tag = step.create_secondary_backend_cluster.consul_cluster_tag consul_release = matrix.secondary_backend == "consul" ? { edition = var.backend_edition version = matrix.consul_version } : null - dependencies_to_install = local.dependencies_to_install - instance_type = local.vault_instance_type - kms_key_arn = step.create_vpc.kms_key_arn - storage_backend = matrix.secondary_backend - unseal_method = matrix.secondary_seal - vault_local_artifact_path = local.bundle_path - vault_install_dir = local.vault_install_dir - vault_artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null - vault_license = step.read_license.license - vpc_id = step.create_vpc.vpc_id - vault_environment = { - VAULT_LOG_LEVEL = var.vault_log_level - } + install_dir = local.vault_install_dir + license = matrix.edition != "oss" ? step.read_license.license : null + local_artifact_path = local.bundle_path + packages = local.packages + storage_backend = matrix.secondary_backend + target_hosts = step.create_secondary_cluster_targets.hosts + unseal_method = matrix.secondary_seal } } - step "verify_vault_primary_unsealed" { + step "verify_that_vault_primary_cluster_is_unsealed" { module = module.vault_verify_unsealed depends_on = [ - step.create_vault_primary_cluster + step.create_primary_cluster ] providers = { @@ -231,15 +278,15 @@ scenario "replication" { } variables { - vault_instances = step.create_vault_primary_cluster.vault_instances + vault_instances = step.create_primary_cluster_targets.hosts vault_install_dir = local.vault_install_dir } } - step "verify_vault_secondary_unsealed" { + step "verify_that_vault_secondary_cluster_is_unsealed" { module = module.vault_verify_unsealed depends_on = [ - step.create_vault_secondary_cluster + step.create_secondary_cluster ] providers = { @@ -247,42 +294,42 @@ scenario "replication" { } variables { - vault_instances = step.create_vault_secondary_cluster.vault_instances + vault_instances = step.create_secondary_cluster_targets.hosts vault_install_dir = local.vault_install_dir } } step "get_primary_cluster_ips" { module = module.vault_get_cluster_ips - depends_on = [step.verify_vault_primary_unsealed] + depends_on = [step.verify_that_vault_primary_cluster_is_unsealed] providers = { enos = local.enos_provider[matrix.distro] } variables { - vault_instances = step.create_vault_primary_cluster.vault_instances + vault_instances = step.create_primary_cluster_targets.hosts vault_install_dir = local.vault_install_dir - vault_root_token = step.create_vault_primary_cluster.vault_root_token + vault_root_token = step.create_primary_cluster.root_token } } step "get_secondary_cluster_ips" { module = module.vault_get_cluster_ips - depends_on = [step.verify_vault_secondary_unsealed] + depends_on = [step.verify_that_vault_secondary_cluster_is_unsealed] providers = { enos = local.enos_provider[matrix.distro] } variables { - vault_instances = step.create_vault_secondary_cluster.vault_instances + vault_instances = step.create_secondary_cluster_targets.hosts vault_install_dir = local.vault_install_dir - vault_root_token = step.create_vault_secondary_cluster.vault_root_token + vault_root_token = step.create_secondary_cluster.root_token } } - step "verify_vault_primary_write_data" { + step "write_test_data_on_primary" { module = module.vault_verify_write_data depends_on = [step.get_primary_cluster_ips] @@ -294,9 +341,9 @@ scenario "replication" { variables { leader_public_ip = step.get_primary_cluster_ips.leader_public_ip leader_private_ip = step.get_primary_cluster_ips.leader_private_ip - vault_instances = step.create_vault_primary_cluster.vault_instances + vault_instances = step.create_primary_cluster_targets.hosts vault_install_dir = local.vault_install_dir - vault_root_token = step.create_vault_primary_cluster.vault_root_token + vault_root_token = step.create_primary_cluster.root_token } } @@ -304,7 +351,7 @@ scenario "replication" { module = module.vault_setup_perf_primary depends_on = [ step.get_primary_cluster_ips, - step.verify_vault_primary_write_data + step.write_test_data_on_primary ] providers = { @@ -315,7 +362,7 @@ scenario "replication" { primary_leader_public_ip = step.get_primary_cluster_ips.leader_public_ip primary_leader_private_ip = step.get_primary_cluster_ips.leader_private_ip vault_install_dir = local.vault_install_dir - vault_root_token = step.create_vault_primary_cluster.vault_root_token + vault_root_token = step.create_primary_cluster.root_token } } @@ -330,7 +377,7 @@ scenario "replication" { variables { primary_leader_public_ip = step.get_primary_cluster_ips.leader_public_ip vault_install_dir = local.vault_install_dir - vault_root_token = step.create_vault_primary_cluster.vault_root_token + vault_root_token = step.create_primary_cluster.root_token } } @@ -346,7 +393,7 @@ scenario "replication" { secondary_leader_public_ip = step.get_secondary_cluster_ips.leader_public_ip secondary_leader_private_ip = step.get_secondary_cluster_ips.leader_private_ip vault_install_dir = local.vault_install_dir - vault_root_token = step.create_vault_secondary_cluster.vault_root_token + vault_root_token = step.create_secondary_cluster.root_token wrapping_token = step.generate_secondary_token.secondary_token } } @@ -356,8 +403,8 @@ scenario "replication" { step "unseal_secondary_followers" { module = module.vault_unseal_nodes depends_on = [ - step.create_vault_primary_cluster, - step.create_vault_secondary_cluster, + step.create_primary_cluster, + step.create_secondary_cluster, step.get_secondary_cluster_ips, step.configure_performance_replication_secondary ] @@ -369,12 +416,12 @@ scenario "replication" { variables { follower_public_ips = step.get_secondary_cluster_ips.follower_public_ips vault_install_dir = local.vault_install_dir - vault_unseal_keys = matrix.primary_seal == "shamir" ? step.create_vault_primary_cluster.vault_unseal_keys_hex : step.create_vault_primary_cluster.vault_recovery_keys_hex + vault_unseal_keys = matrix.primary_seal == "shamir" ? step.create_primary_cluster.unseal_keys_hex : step.create_primary_cluster.recovery_keys_hex vault_seal_type = matrix.primary_seal == "shamir" ? matrix.primary_seal : matrix.secondary_seal } } - step "verify_vault_secondary_unsealed_after_replication" { + step "verify_secondary_cluster_is_unsealed_after_enabling_replication" { module = module.vault_verify_unsealed depends_on = [ step.unseal_secondary_followers @@ -385,14 +432,14 @@ scenario "replication" { } variables { - vault_instances = step.create_vault_secondary_cluster.vault_instances + vault_instances = step.create_secondary_cluster_targets.hosts vault_install_dir = local.vault_install_dir } } step "verify_performance_replication" { module = module.vault_verify_performance_replication - depends_on = [step.verify_vault_secondary_unsealed_after_replication] + depends_on = [step.verify_secondary_cluster_is_unsealed_after_enabling_replication] providers = { enos = local.enos_provider[matrix.distro] @@ -412,7 +459,7 @@ scenario "replication" { depends_on = [ step.verify_performance_replication, step.get_secondary_cluster_ips, - step.verify_vault_primary_write_data + step.write_test_data_on_primary ] providers = { @@ -425,13 +472,32 @@ scenario "replication" { } } - step "add_primary_cluster_nodes" { + step "create_more_primary_cluster_targets" { + module = module.target_ec2_spot_fleet // "target_ec2_instances" can be used for on-demand instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch] + awskms_unseal_key_arn = step.create_vpc.kms_key_arn + common_tags = local.tags + instance_type = local.vault_instance_type // only used for on-demand instances + spot_price_max = local.spot_price_max[matrix.distro] + vpc_id = step.create_vpc.vpc_id + } + } + + step "add_more_nodes_to_primary_cluster" { module = module.vault_cluster depends_on = [ step.create_vpc, step.create_primary_backend_cluster, - step.create_vault_primary_cluster, - step.verify_replicated_data + step.create_primary_cluster, + step.verify_replicated_data, + step.create_more_primary_cluster_targets ] providers = { @@ -439,45 +505,42 @@ scenario "replication" { } variables { - ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch] - common_tags = local.tags + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + awskms_unseal_key_arn = step.create_vpc.kms_key_arn + cluster_name = step.create_primary_cluster_targets.cluster_name + config_env_vars = { + VAULT_LOG_LEVEL = var.vault_log_level + } consul_cluster_tag = step.create_primary_backend_cluster.consul_cluster_tag consul_release = matrix.primary_backend == "consul" ? { edition = var.backend_edition version = matrix.consul_version } : null - dependencies_to_install = local.dependencies_to_install - instance_type = local.vault_instance_type - kms_key_arn = step.create_vpc.kms_key_arn - storage_backend = matrix.primary_backend - unseal_method = matrix.primary_seal - vault_cluster_tag = step.create_vault_primary_cluster.vault_cluster_tag - vault_init = false - vault_license = step.read_license.license - vault_local_artifact_path = local.bundle_path - vault_install_dir = local.vault_install_dir - vault_artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null - vault_node_prefix = "newprimary_node" - vault_root_token = step.create_vault_primary_cluster.vault_root_token - vault_unseal_when_no_init = matrix.primary_seal == "shamir" - vault_unseal_keys = matrix.primary_seal == "shamir" ? step.create_vault_primary_cluster.vault_unseal_keys_hex : null - vpc_id = step.create_vpc.vpc_id - vault_environment = { - VAULT_LOG_LEVEL = var.vault_log_level - } - } - } - - step "verify_add_node_unsealed" { + force_unseal = matrix.primary_seal == "shamir" + initialize_cluster = false + install_dir = local.vault_install_dir + license = matrix.edition != "oss" ? step.read_license.license : null + local_artifact_path = local.bundle_path + packages = local.packages + root_token = step.create_primary_cluster.root_token + shamir_unseal_keys = matrix.primary_seal == "shamir" ? step.create_primary_cluster.unseal_keys_hex : null + storage_backend = matrix.primary_backend + storage_node_prefix = "newprimary_node" + target_hosts = step.create_more_primary_cluster_targets.hosts + unseal_method = matrix.primary_seal + } + } + + step "verify_more_primary_nodes_unsealed" { module = module.vault_verify_unsealed - depends_on = [step.add_primary_cluster_nodes] + depends_on = [step.add_more_nodes_to_primary_cluster] providers = { enos = local.enos_provider[matrix.distro] } variables { - vault_instances = step.add_primary_cluster_nodes.vault_instances + vault_instances = step.create_more_primary_cluster_targets.hosts vault_install_dir = local.vault_install_dir } } @@ -486,9 +549,9 @@ scenario "replication" { skip_step = matrix.primary_backend != "raft" module = module.vault_verify_raft_auto_join_voter depends_on = [ - step.add_primary_cluster_nodes, - step.create_vault_primary_cluster, - step.verify_add_node_unsealed + step.add_more_nodes_to_primary_cluster, + step.create_primary_cluster, + step.verify_more_primary_nodes_unsealed ] providers = { @@ -496,9 +559,9 @@ scenario "replication" { } variables { - vault_instances = step.add_primary_cluster_nodes.vault_instances + vault_instances = step.create_more_primary_cluster_targets.hosts vault_install_dir = local.vault_install_dir - vault_root_token = step.create_vault_primary_cluster.vault_root_token + vault_root_token = step.create_primary_cluster.root_token } } @@ -506,7 +569,7 @@ scenario "replication" { module = module.shutdown_node depends_on = [ step.get_primary_cluster_ips, - step.verify_add_node_unsealed + step.verify_more_primary_nodes_unsealed ] providers = { @@ -537,7 +600,7 @@ scenario "replication" { step "get_updated_primary_cluster_ips" { module = module.vault_get_cluster_ips depends_on = [ - step.add_primary_cluster_nodes, + step.add_more_nodes_to_primary_cluster, step.remove_primary_follower_1, step.remove_primary_leader ] @@ -547,10 +610,10 @@ scenario "replication" { } variables { - vault_instances = step.create_vault_primary_cluster.vault_instances + vault_instances = step.create_primary_cluster_targets.hosts vault_install_dir = local.vault_install_dir - added_vault_instances = step.add_primary_cluster_nodes.vault_instances - vault_root_token = step.create_vault_primary_cluster.vault_root_token + added_vault_instances = step.create_more_primary_cluster_targets.hosts + vault_root_token = step.create_primary_cluster.root_token node_public_ip = step.get_primary_cluster_ips.follower_public_ip_2 } } @@ -572,112 +635,97 @@ scenario "replication" { } } - output "vault_primary_cluster_pub_ips" { - description = "The Vault primary cluster public IPs" - value = step.create_vault_primary_cluster.instance_public_ips + output "primary_cluster_hosts" { + description = "The Vault primary cluster target hosts" + value = step.create_primary_cluster_targets.hosts } - output "vault_primary_cluster_priv_ips" { - description = "The Vault primary cluster private IPs" - value = step.create_vault_primary_cluster.instance_private_ips + output "primary_cluster_additional_hosts" { + description = "The Vault added new node on primary cluster target hosts" + value = step.create_more_primary_cluster_targets.hosts } - output "vault_primary_newnode_pub_ip" { - description = "The Vault added new node on primary cluster public IP" - value = step.add_primary_cluster_nodes.instance_public_ips - } - - output "vault_primary_newnode_priv_ip" { - description = "The Vault added new node on primary cluster private IP" - value = step.add_primary_cluster_nodes.instance_private_ips - } - - output "vault_primary_cluster_root_token" { + output "primary_cluster_root_token" { description = "The Vault primary cluster root token" - value = step.create_vault_primary_cluster.vault_root_token + value = step.create_primary_cluster.root_token } - output "vault_primary_cluster_unseal_keys_b64" { + output "primary_cluster_unseal_keys_b64" { description = "The Vault primary cluster unseal keys" - value = step.create_vault_primary_cluster.vault_unseal_keys_b64 + value = step.create_primary_cluster.unseal_keys_b64 } - output "vault_primary_cluster_unseal_keys_hex" { + output "primary_cluster_unseal_keys_hex" { description = "The Vault primary cluster unseal keys hex" - value = step.create_vault_primary_cluster.vault_unseal_keys_hex + value = step.create_primary_cluster.unseal_keys_hex } - output "vault_primary_cluster_recovery_key_shares" { + output "primary_cluster_recovery_key_shares" { description = "The Vault primary cluster recovery key shares" - value = step.create_vault_primary_cluster.vault_recovery_key_shares + value = step.create_primary_cluster.recovery_key_shares } - output "vault_primary_cluster_recovery_keys_b64" { + output "primary_cluster_recovery_keys_b64" { description = "The Vault primary cluster recovery keys b64" - value = step.create_vault_primary_cluster.vault_recovery_keys_b64 + value = step.create_primary_cluster.recovery_keys_b64 } - output "vault_primary_cluster_recovery_keys_hex" { + output "primary_cluster_recovery_keys_hex" { description = "The Vault primary cluster recovery keys hex" - value = step.create_vault_primary_cluster.vault_recovery_keys_hex + value = step.create_primary_cluster.recovery_keys_hex } - output "vault_secondary_cluster_pub_ips" { + output "secondary_cluster_hosts" { description = "The Vault secondary cluster public IPs" - value = step.create_vault_secondary_cluster.instance_public_ips - } - - output "vault_secondary_cluster_priv_ips" { - description = "The Vault secondary cluster private IPs" - value = step.create_vault_secondary_cluster.instance_private_ips + value = step.create_secondary_cluster_targets.hosts } - output "vault_primary_performance_replication_status" { + output "initial_primary_replication_status" { description = "The Vault primary cluster performance replication status" value = step.verify_performance_replication.primary_replication_status } - output "vault_replication_known_primary_cluster_addrs" { + output "initial_known_primary_cluster_addresses" { description = "The Vault secondary cluster performance replication status" value = step.verify_performance_replication.known_primary_cluster_addrs } - output "vault_secondary_performance_replication_status" { + output "initial_secondary_performance_replication_status" { description = "The Vault secondary cluster performance replication status" value = step.verify_performance_replication.secondary_replication_status } - output "vault_primary_updated_performance_replication_status" { + output "intial_primary_replication_data_secondaries" { + description = "The Vault primary cluster secondaries connection status" + value = step.verify_performance_replication.primary_replication_data_secondaries + } + + output "initial_secondary_replication_data_primaries" { + description = "The Vault secondary cluster primaries connection status" + value = step.verify_performance_replication.secondary_replication_data_primaries + } + + output "updated_primary_replication_status" { description = "The Vault updated primary cluster performance replication status" value = step.verify_updated_performance_replication.primary_replication_status } - output "vault_updated_replication_known_primary_cluster_addrs" { + output "updated_known_primary_cluster_addresses" { description = "The Vault secondary cluster performance replication status" value = step.verify_updated_performance_replication.known_primary_cluster_addrs } - output "verify_secondary_updated_performance_replication_status" { + output "updated_secondary_replication_status" { description = "The Vault updated secondary cluster performance replication status" value = step.verify_updated_performance_replication.secondary_replication_status } - output "primary_replication_data_secondaries" { - description = "The Vault primary cluster secondaries connection status" - value = step.verify_performance_replication.primary_replication_data_secondaries - } - - output "secondary_replication_data_primaries" { - description = "The Vault secondary cluster primaries connection status" - value = step.verify_performance_replication.secondary_replication_data_primaries - } - - output "primary_updated_replication_data_secondaries" { + output "updated_primary_replication_data_secondaries" { description = "The Vault updated primary cluster secondaries connection status" value = step.verify_updated_performance_replication.primary_replication_data_secondaries } - output "secondary_updated_replication_data_primaries" { + output "updated_secondary_replication_data_primaries" { description = "The Vault updated secondary cluster primaries connection status" value = step.verify_updated_performance_replication.secondary_replication_data_primaries } diff --git a/enos/enos-scenario-smoke.hcl b/enos/enos-scenario-smoke.hcl index 717f275ffa73..9055b8cc12df 100644 --- a/enos/enos-scenario-smoke.hcl +++ b/enos/enos-scenario-smoke.hcl @@ -14,6 +14,12 @@ scenario "smoke" { edition = ["oss", "ent.fips1402", "ent.hsm.fips1402"] artifact_type = ["package"] } + + # Our local builder always creates bundles + exclude { + artifact_source = ["local"] + artifact_type = ["package"] + } } terraform_cli = terraform_cli.default @@ -32,12 +38,17 @@ scenario "smoke" { "ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"] "ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"] } - bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_bundle_path) : null - dependencies_to_install = ["jq"] + bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_bundle_path) : null + packages = ["jq"] enos_provider = { rhel = provider.enos.rhel ubuntu = provider.enos.ubuntu } + spot_price_max = { + // These prices are based on on-demand cost for t3.medium in us-east + "rhel" = "0.1016" + "ubuntu" = "0.0416" + } tags = merge({ "Project Name" : var.project_name "Project" : "Enos", @@ -134,11 +145,30 @@ scenario "smoke" { } } + step "create_vault_cluster_targets" { + module = module.target_ec2_spot_fleet // "target_ec2_instances" can be used for on-demand instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch] + awskms_unseal_key_arn = step.create_vpc.kms_key_arn + common_tags = local.tags + instance_type = local.vault_instance_type // only used for on-demand instances + spot_price_max = local.spot_price_max[matrix.distro] + vpc_id = step.create_vpc.vpc_id + } + } + step "create_vault_cluster" { module = module.vault_cluster depends_on = [ step.create_backend_cluster, step.build_vault, + step.create_vault_cluster_targets ] providers = { @@ -146,26 +176,24 @@ scenario "smoke" { } variables { - ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch] - common_tags = local.tags + artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null + awskms_unseal_key_arn = step.create_vpc.kms_key_arn + cluster_name = step.create_vault_cluster_targets.cluster_name + config_env_vars = { + VAULT_LOG_LEVEL = var.vault_log_level + } consul_cluster_tag = step.create_backend_cluster.consul_cluster_tag consul_release = matrix.backend == "consul" ? { edition = var.backend_edition version = matrix.consul_version } : null - dependencies_to_install = local.dependencies_to_install - instance_type = local.vault_instance_type - kms_key_arn = step.create_vpc.kms_key_arn - storage_backend = matrix.backend - unseal_method = matrix.seal - vault_local_artifact_path = local.bundle_path - vault_install_dir = local.vault_install_dir - vault_artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null - vault_license = matrix.edition != "oss" ? step.read_license.license : null - vpc_id = step.create_vpc.vpc_id - vault_environment = { - VAULT_LOG_LEVEL = var.vault_log_level - } + install_dir = local.vault_install_dir + license = matrix.edition != "oss" ? step.read_license.license : null + local_artifact_path = local.bundle_path + packages = local.packages + storage_backend = matrix.backend + target_hosts = step.create_vault_cluster_targets.hosts + unseal_method = matrix.seal } } @@ -178,9 +206,9 @@ scenario "smoke" { } variables { - vault_instances = step.create_vault_cluster.vault_instances + vault_instances = step.create_vault_cluster_targets.hosts vault_install_dir = local.vault_install_dir - vault_root_token = step.create_vault_cluster.vault_root_token + vault_root_token = step.create_vault_cluster.root_token } } @@ -193,13 +221,13 @@ scenario "smoke" { } variables { - vault_instances = step.create_vault_cluster.vault_instances + vault_instances = step.create_vault_cluster_targets.hosts vault_edition = matrix.edition vault_install_dir = local.vault_install_dir vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date - vault_root_token = step.create_vault_cluster.vault_root_token + vault_root_token = step.create_vault_cluster.root_token } } @@ -213,7 +241,7 @@ scenario "smoke" { variables { vault_install_dir = local.vault_install_dir - vault_instances = step.create_vault_cluster.vault_instances + vault_instances = step.create_vault_cluster_targets.hosts } } @@ -231,9 +259,9 @@ scenario "smoke" { variables { leader_public_ip = step.get_vault_cluster_ips.leader_public_ip leader_private_ip = step.get_vault_cluster_ips.leader_private_ip - vault_instances = step.create_vault_cluster.vault_instances + vault_instances = step.create_vault_cluster_targets.hosts vault_install_dir = local.vault_install_dir - vault_root_token = step.create_vault_cluster.vault_root_token + vault_root_token = step.create_vault_cluster.root_token } } @@ -248,8 +276,8 @@ scenario "smoke" { variables { vault_install_dir = local.vault_install_dir - vault_instances = step.create_vault_cluster.vault_instances - vault_root_token = step.create_vault_cluster.vault_root_token + vault_instances = step.create_vault_cluster_targets.hosts + vault_root_token = step.create_vault_cluster.root_token } } @@ -264,7 +292,7 @@ scenario "smoke" { variables { vault_edition = matrix.edition vault_install_dir = local.vault_install_dir - vault_instances = step.create_vault_cluster.vault_instances + vault_instances = step.create_vault_cluster_targets.hosts } } @@ -294,63 +322,63 @@ scenario "smoke" { } variables { - vault_instances = step.create_vault_cluster.vault_instances + vault_instances = step.create_vault_cluster_targets.hosts vault_install_dir = local.vault_install_dir } } - output "vault_cluster_instance_ids" { - description = "The Vault cluster instance IDs" - value = step.create_vault_cluster.instance_ids + output "awskms_unseal_key_arn" { + description = "The Vault cluster KMS key arn" + value = step.create_vpc.kms_key_arn } - output "vault_cluster_pub_ips" { - description = "The Vault cluster public IPs" - value = step.create_vault_cluster.instance_public_ips + output "cluster_name" { + description = "The Vault cluster name" + value = step.create_vault_cluster.cluster_name + } + + output "hosts" { + description = "The Vault cluster target hosts" + value = step.create_vault_cluster.target_hosts } - output "vault_cluster_priv_ips" { + output "private_ips" { description = "The Vault cluster private IPs" - value = step.create_vault_cluster.instance_private_ips + value = step.create_vault_cluster.private_ips } - output "vault_cluster_key_id" { - description = "The Vault cluster Key ID" - value = step.create_vault_cluster.key_id + output "public_ips" { + description = "The Vault cluster public IPs" + value = step.create_vault_cluster.public_ips } - output "vault_cluster_root_token" { + output "root_token" { description = "The Vault cluster root token" - value = step.create_vault_cluster.vault_root_token + value = step.create_vault_cluster.root_token } - output "vault_cluster_recovery_key_shares" { + output "recovery_key_shares" { description = "The Vault cluster recovery key shares" - value = step.create_vault_cluster.vault_recovery_key_shares + value = step.create_vault_cluster.recovery_key_shares } - output "vault_cluster_recovery_keys_b64" { + output "recovery_keys_b64" { description = "The Vault cluster recovery keys b64" - value = step.create_vault_cluster.vault_recovery_keys_b64 + value = step.create_vault_cluster.recovery_keys_b64 } - output "vault_cluster_recovery_keys_hex" { + output "recovery_keys_hex" { description = "The Vault cluster recovery keys hex" - value = step.create_vault_cluster.vault_recovery_keys_hex + value = step.create_vault_cluster.recovery_keys_hex } - output "vault_cluster_unseal_keys_b64" { + output "unseal_keys_b64" { description = "The Vault cluster unseal keys" - value = step.create_vault_cluster.vault_unseal_keys_b64 + value = step.create_vault_cluster.unseal_keys_b64 } - output "vault_cluster_unseal_keys_hex" { + output "unseal_keys_hex" { description = "The Vault cluster unseal keys hex" - value = step.create_vault_cluster.vault_unseal_keys_hex - } - - output "vault_cluster_tag" { - description = "The Vault cluster tag" - value = step.create_vault_cluster.vault_cluster_tag + value = step.create_vault_cluster.unseal_keys_hex } } diff --git a/enos/enos-scenario-ui.hcl b/enos/enos-scenario-ui.hcl index 77335e6ad8f8..b1f56582702c 100644 --- a/enos/enos-scenario-ui.hcl +++ b/enos/enos-scenario-ui.hcl @@ -110,11 +110,29 @@ scenario "ui" { } } + step "create_vault_cluster_targets" { + module = module.target_ec2_spot_fleet // "target_ec2_instances" can be used for on-demand instances + depends_on = [step.create_vpc] + + providers = { + enos = provider.enos.ubuntu + } + + variables { + ami_id = step.create_vpc.ami_ids[local.distro][local.arch] + awskms_unseal_key_arn = step.create_vpc.kms_key_arn + common_tags = local.tags + instance_type = local.vault_instance_type // only used for on-demand instances + vpc_id = step.create_vpc.vpc_id + } + } + step "create_vault_cluster" { module = module.vault_cluster depends_on = [ step.create_backend_cluster, step.build_vault, + step.create_vault_cluster_targets ] providers = { @@ -122,20 +140,22 @@ scenario "ui" { } variables { - ami_id = step.create_vpc.ami_ids[local.distro][local.arch] - common_tags = local.tags - consul_cluster_tag = step.create_backend_cluster.consul_cluster_tag - instance_type = local.vault_instance_type - kms_key_arn = step.create_vpc.kms_key_arn - storage_backend = matrix.backend - unseal_method = local.seal - vault_local_artifact_path = local.bundle_path - vault_install_dir = local.vault_install_dir - vault_license = matrix.edition != "oss" ? step.read_license.license : null - vpc_id = step.create_vpc.vpc_id - vault_environment = { + awskms_unseal_key_arn = step.create_vpc.kms_key_arn + cluster_name = step.create_vault_cluster_targets.cluster_name + config_env_vars = { VAULT_LOG_LEVEL = var.vault_log_level } + consul_cluster_tag = step.create_backend_cluster.consul_cluster_tag + consul_release = matrix.backend == "consul" ? { + edition = var.backend_edition + version = local.consul_version + } : null + install_dir = local.vault_install_dir + license = matrix.edition != "oss" ? step.read_license.license : null + local_artifact_path = local.bundle_path + storage_backend = matrix.backend + target_hosts = step.create_vault_cluster_targets.hosts + unseal_method = local.seal } } @@ -143,52 +163,72 @@ scenario "ui" { module = module.vault_test_ui variables { - vault_addr = step.create_vault_cluster.instance_public_ips[0] - vault_root_token = step.create_vault_cluster.vault_root_token - vault_unseal_keys = step.create_vault_cluster.vault_recovery_keys_b64 - vault_recovery_threshold = step.create_vault_cluster.vault_recovery_threshold + vault_addr = step.create_vault_cluster_targets.hosts[0].public_ip + vault_root_token = step.create_vault_cluster.root_token + vault_unseal_keys = step.create_vault_cluster.recovery_keys_b64 + vault_recovery_threshold = step.create_vault_cluster.recovery_threshold ui_test_filter = local.ui_test_filter } } - output "vault_cluster_instance_ids" { - description = "The Vault cluster instance IDs" - value = step.create_vault_cluster.instance_ids + output "awskms_unseal_key_arn" { + description = "The Vault cluster KMS key arn" + value = step.create_vpc.kms_key_arn } - output "vault_cluster_pub_ips" { - description = "The Vault cluster public IPs" - value = step.create_vault_cluster.instance_public_ips + output "cluster_name" { + description = "The Vault cluster name" + value = step.create_vault_cluster.cluster_name + } + + output "hosts" { + description = "The Vault cluster target hosts" + value = step.create_vault_cluster.target_hosts } - output "vault_cluster_priv_ips" { + output "private_ips" { description = "The Vault cluster private IPs" - value = step.create_vault_cluster.instance_private_ips + value = step.create_vault_cluster.private_ips } - output "vault_cluster_key_id" { - description = "The Vault cluster Key ID" - value = step.create_vault_cluster.key_id + output "public_ips" { + description = "The Vault cluster public IPs" + value = step.create_vault_cluster.public_ips } - output "vault_cluster_root_token" { + output "root_token" { description = "The Vault cluster root token" - value = step.create_vault_cluster.vault_root_token + value = step.create_vault_cluster.root_token + } + + output "recovery_key_shares" { + description = "The Vault cluster recovery key shares" + value = step.create_vault_cluster.recovery_key_shares + } + + output "recovery_keys_b64" { + description = "The Vault cluster recovery keys b64" + value = step.create_vault_cluster.recovery_keys_b64 + } + + output "recovery_keys_hex" { + description = "The Vault cluster recovery keys hex" + value = step.create_vault_cluster.recovery_keys_hex } - output "vault_cluster_unseal_keys_b64" { + output "unseal_keys_b64" { description = "The Vault cluster unseal keys" - value = step.create_vault_cluster.vault_unseal_keys_b64 + value = step.create_vault_cluster.unseal_keys_b64 } - output "vault_cluster_unseal_keys_hex" { + output "unseal_keys_hex" { description = "The Vault cluster unseal keys hex" - value = step.create_vault_cluster.vault_unseal_keys_hex + value = step.create_vault_cluster.unseal_keys_hex } - output "vault_cluster_tag" { - description = "The Vault cluster tag" - value = step.create_vault_cluster.vault_cluster_tag + output "ui_test_environment" { + value = step.test_ui.ui_test_environment + description = "The environment variables that are required in order to run the test:enos yarn target" } output "ui_test_stderr" { @@ -200,9 +240,4 @@ scenario "ui" { description = "The stdout of the ui tests that ran" value = step.test_ui.ui_test_stdout } - - output "ui_test_environment" { - value = step.test_ui.ui_test_environment - description = "The environment variables that are required in order to run the test:enos yarn target" - } } diff --git a/enos/enos-scenario-upgrade.hcl b/enos/enos-scenario-upgrade.hcl index 5a188695c5e4..5a8a52e6f048 100644 --- a/enos/enos-scenario-upgrade.hcl +++ b/enos/enos-scenario-upgrade.hcl @@ -32,12 +32,17 @@ scenario "upgrade" { "ent.hsm" = ["ui", "enterprise", "cgo", "hsm", "venthsm"] "ent.hsm.fips1402" = ["ui", "enterprise", "cgo", "hsm", "fips", "fips_140_2", "ent.hsm.fips1402"] } - bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_bundle_path) : null - dependencies_to_install = ["jq"] + bundle_path = matrix.artifact_source != "artifactory" ? abspath(var.vault_bundle_path) : null + packages = ["jq"] enos_provider = { rhel = provider.enos.rhel ubuntu = provider.enos.ubuntu } + spot_price_max = { + // These prices are based on on-demand cost for t3.medium in us-east + "rhel" = "0.1016" + "ubuntu" = "0.0416" + } tags = merge({ "Project Name" : var.project_name "Project" : "Enos", @@ -135,13 +140,30 @@ scenario "upgrade" { } } - # This step creates a Vault cluster using a bundle downloaded from - # releases.hashicorp.com, with the version specified in var.vault_autopilot_initial_release + step "create_vault_cluster_targets" { + module = module.target_ec2_spot_fleet // "target_ec2_instances" can be used for on-demand instances + depends_on = [step.create_vpc] + + providers = { + enos = local.enos_provider[matrix.distro] + } + + variables { + ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch] + awskms_unseal_key_arn = step.create_vpc.kms_key_arn + common_tags = local.tags + instance_type = local.vault_instance_type // only used for on-demand instances + spot_price_max = local.spot_price_max[matrix.distro] + vpc_id = step.create_vpc.vpc_id + } + } + step "create_vault_cluster" { module = module.vault_cluster depends_on = [ step.create_backend_cluster, step.build_vault, + step.create_vault_cluster_targets ] providers = { @@ -149,25 +171,23 @@ scenario "upgrade" { } variables { - ami_id = step.create_vpc.ami_ids[matrix.distro][matrix.arch] - common_tags = local.tags + awskms_unseal_key_arn = step.create_vpc.kms_key_arn + cluster_name = step.create_vault_cluster_targets.cluster_name + config_env_vars = { + VAULT_LOG_LEVEL = var.vault_log_level + } consul_cluster_tag = step.create_backend_cluster.consul_cluster_tag consul_release = matrix.backend == "consul" ? { edition = var.backend_edition version = matrix.consul_version } : null - dependencies_to_install = local.dependencies_to_install - instance_type = local.vault_instance_type - kms_key_arn = step.create_vpc.kms_key_arn - storage_backend = matrix.backend - unseal_method = matrix.seal - vault_install_dir = local.vault_install_dir - vault_release = var.vault_upgrade_initial_release - vault_license = matrix.edition != "oss" ? step.read_license.license : null - vpc_id = step.create_vpc.vpc_id - vault_environment = { - VAULT_LOG_LEVEL = var.vault_log_level - } + install_dir = local.vault_install_dir + license = matrix.edition != "oss" ? step.read_license.license : null + packages = local.packages + release = var.vault_upgrade_initial_release + storage_backend = matrix.backend + target_hosts = step.create_vault_cluster_targets.hosts + unseal_method = matrix.seal } } @@ -180,9 +200,9 @@ scenario "upgrade" { } variables { - vault_instances = step.create_vault_cluster.vault_instances + vault_instances = step.create_vault_cluster_targets.hosts vault_install_dir = local.vault_install_dir - vault_root_token = step.create_vault_cluster.vault_root_token + vault_root_token = step.create_vault_cluster.root_token } } @@ -200,9 +220,9 @@ scenario "upgrade" { variables { leader_public_ip = step.get_vault_cluster_ips.leader_public_ip leader_private_ip = step.get_vault_cluster_ips.leader_private_ip - vault_instances = step.create_vault_cluster.vault_instances + vault_instances = step.create_vault_cluster_targets.hosts vault_install_dir = local.vault_install_dir - vault_root_token = step.create_vault_cluster.vault_root_token + vault_root_token = step.create_vault_cluster.root_token } } @@ -220,11 +240,11 @@ scenario "upgrade" { variables { vault_api_addr = "http://localhost:8200" - vault_instances = step.create_vault_cluster.vault_instances + vault_instances = step.create_vault_cluster_targets.hosts vault_local_artifact_path = local.bundle_path vault_artifactory_release = matrix.artifact_source == "artifactory" ? step.build_vault.vault_artifactory_release : null vault_install_dir = local.vault_install_dir - vault_unseal_keys = matrix.seal == "shamir" ? step.create_vault_cluster.vault_unseal_keys_hex : null + vault_unseal_keys = matrix.seal == "shamir" ? step.create_vault_cluster.unseal_keys_hex : null vault_seal_type = matrix.seal } } @@ -241,13 +261,13 @@ scenario "upgrade" { } variables { - vault_instances = step.create_vault_cluster.vault_instances + vault_instances = step.create_vault_cluster_targets.hosts vault_edition = matrix.edition vault_install_dir = local.vault_install_dir vault_product_version = matrix.artifact_source == "local" ? step.get_local_metadata.version : var.vault_product_version vault_revision = matrix.artifact_source == "local" ? step.get_local_metadata.revision : var.vault_revision vault_build_date = matrix.artifact_source == "local" ? step.get_local_metadata.build_date : var.vault_build_date - vault_root_token = step.create_vault_cluster.vault_root_token + vault_root_token = step.create_vault_cluster.root_token } } @@ -263,9 +283,9 @@ scenario "upgrade" { } variables { - vault_instances = step.create_vault_cluster.vault_instances + vault_instances = step.create_vault_cluster_targets.hosts vault_install_dir = local.vault_install_dir - vault_root_token = step.create_vault_cluster.vault_root_token + vault_root_token = step.create_vault_cluster.root_token } } @@ -282,7 +302,7 @@ scenario "upgrade" { } variables { - vault_instances = step.create_vault_cluster.vault_instances + vault_instances = step.create_vault_cluster_targets.hosts vault_install_dir = local.vault_install_dir } } @@ -319,63 +339,63 @@ scenario "upgrade" { variables { vault_install_dir = local.vault_install_dir - vault_instances = step.create_vault_cluster.vault_instances - vault_root_token = step.create_vault_cluster.vault_root_token + vault_instances = step.create_vault_cluster_targets.hosts + vault_root_token = step.create_vault_cluster.root_token } } - output "vault_cluster_instance_ids" { - description = "The Vault cluster instance IDs" - value = step.create_vault_cluster.instance_ids + output "awskms_unseal_key_arn" { + description = "The Vault cluster KMS key arn" + value = step.create_vpc.kms_key_arn } - output "vault_cluster_pub_ips" { - description = "The Vault cluster public IPs" - value = step.create_vault_cluster.instance_public_ips + output "cluster_name" { + description = "The Vault cluster name" + value = step.create_vault_cluster.cluster_name } - output "vault_cluster_priv_ips" { + output "hosts" { + description = "The Vault cluster target hosts" + value = step.create_vault_cluster.target_hosts + } + + output "private_ips" { description = "The Vault cluster private IPs" - value = step.create_vault_cluster.instance_private_ips + value = step.create_vault_cluster.private_ips } - output "vault_cluster_key_id" { - description = "The Vault cluster Key ID" - value = step.create_vault_cluster.key_id + output "public_ips" { + description = "The Vault cluster public IPs" + value = step.create_vault_cluster.public_ips } - output "vault_cluster_root_token" { + output "root_token" { description = "The Vault cluster root token" - value = step.create_vault_cluster.vault_root_token + value = step.create_vault_cluster.root_token } - output "vault_cluster_recovery_key_shares" { + output "recovery_key_shares" { description = "The Vault cluster recovery key shares" - value = step.create_vault_cluster.vault_recovery_key_shares + value = step.create_vault_cluster.recovery_key_shares } - output "vault_cluster_recovery_keys_b64" { + output "recovery_keys_b64" { description = "The Vault cluster recovery keys b64" - value = step.create_vault_cluster.vault_recovery_keys_b64 + value = step.create_vault_cluster.recovery_keys_b64 } - output "vault_cluster_recovery_keys_hex" { + output "recovery_keys_hex" { description = "The Vault cluster recovery keys hex" - value = step.create_vault_cluster.vault_recovery_keys_hex + value = step.create_vault_cluster.recovery_keys_hex } - output "vault_cluster_unseal_keys_b64" { + output "unseal_keys_b64" { description = "The Vault cluster unseal keys" - value = step.create_vault_cluster.vault_unseal_keys_b64 + value = step.create_vault_cluster.unseal_keys_b64 } - output "vault_cluster_unseal_keys_hex" { + output "unseal_keys_hex" { description = "The Vault cluster unseal keys hex" - value = step.create_vault_cluster.vault_unseal_keys_hex - } - - output "vault_cluster_tag" { - description = "The Vault cluster tag" - value = step.create_vault_cluster.vault_cluster_tag + value = step.create_vault_cluster.unseal_keys_hex } } diff --git a/enos/modules/target_ec2_instances/main.tf b/enos/modules/target_ec2_instances/main.tf new file mode 100644 index 000000000000..26a520d3e8ce --- /dev/null +++ b/enos/modules/target_ec2_instances/main.tf @@ -0,0 +1,181 @@ +terraform { + required_providers { + # We need to specify the provider source in each module until we publish it + # to the public registry + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + version = ">= 0.3.24" + } + } +} + +data "aws_vpc" "vpc" { + id = var.vpc_id +} + +data "aws_subnets" "vpc" { + filter { + name = "vpc-id" + values = [var.vpc_id] + } +} + +data "aws_kms_key" "kms_key" { + key_id = var.awskms_unseal_key_arn +} + +data "aws_iam_policy_document" "target" { + statement { + resources = ["*"] + + actions = [ + "ec2:DescribeInstances", + "secretsmanager:*" + ] + } + + statement { + resources = [var.awskms_unseal_key_arn] + + actions = [ + "kms:DescribeKey", + "kms:ListKeys", + "kms:Encrypt", + "kms:Decrypt", + "kms:GenerateDataKey" + ] + } +} + +data "aws_iam_policy_document" "target_instance_role" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["ec2.amazonaws.com"] + } + } +} + +data "enos_environment" "localhost" {} + +resource "random_string" "cluster_name" { + length = 8 + lower = true + upper = false + numeric = false + special = false +} + +locals { + instances = toset([for idx in range(var.instance_count) : tostring(idx)]) + cluster_name = coalesce(var.cluster_name, random_string.cluster_name.result) + name_prefix = "${var.project_name}-${local.cluster_name}" +} + +resource "aws_iam_role" "target_instance_role" { + name = "target_instance_role-${random_string.cluster_name.result}" + assume_role_policy = data.aws_iam_policy_document.target_instance_role.json +} + +resource "aws_iam_instance_profile" "target" { + name = "${local.name_prefix}-target" + role = aws_iam_role.target_instance_role.name +} + +resource "aws_iam_role_policy" "target" { + name = "${local.name_prefix}-target" + role = aws_iam_role.target_instance_role.id + policy = data.aws_iam_policy_document.target.json +} + +resource "aws_security_group" "target" { + name = "${local.name_prefix}-target" + description = "Target instance security group" + vpc_id = var.vpc_id + + # SSH traffic + ingress { + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + # Vault traffic + ingress { + from_port = 8200 + to_port = 8201 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + formatlist("%s/32", var.ssh_allow_ips) + ]) + } + + # Consul traffic + ingress { + from_port = 8301 + to_port = 8301 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8301 + to_port = 8301 + protocol = "udp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + # Internal traffic + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + self = true + } + + # External traffic + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = merge( + var.common_tags, + { + Name = "${local.name_prefix}-sg" + }, + ) +} + +resource "aws_instance" "targets" { + for_each = local.instances + ami = var.ami_id + instance_type = var.instance_type + vpc_security_group_ids = [aws_security_group.target.id] + subnet_id = tolist(data.aws_subnets.vpc.ids)[each.key % length(data.aws_subnets.vpc.ids)] + key_name = var.ssh_keypair + iam_instance_profile = aws_iam_instance_profile.target.name + tags = merge( + var.common_tags, + { + Name = "${local.name_prefix}-target-instance" + Type = local.cluster_name + }, + ) +} diff --git a/enos/modules/target_ec2_instances/outputs.tf b/enos/modules/target_ec2_instances/outputs.tf new file mode 100644 index 000000000000..9428bfdb9915 --- /dev/null +++ b/enos/modules/target_ec2_instances/outputs.tf @@ -0,0 +1,11 @@ +output "cluster_name" { + value = local.cluster_name +} + +output "hosts" { + description = "The ec2 instance target hosts" + value = { for idx in range(var.instance_count) : idx => { + public_ip = aws_instance.targets[idx].public_ip + private_ip = aws_instance.targets[idx].private_ip + } } +} diff --git a/enos/modules/target_ec2_instances/variables.tf b/enos/modules/target_ec2_instances/variables.tf new file mode 100644 index 000000000000..89dbbf03c776 --- /dev/null +++ b/enos/modules/target_ec2_instances/variables.tf @@ -0,0 +1,61 @@ +variable "ami_id" { + description = "The machine image identifier" + type = string +} + +variable "awskms_unseal_key_arn" { + type = string + description = "The AWSKMS key ARN if using the awskms unseal method. If specified the instances will be granted kms permissions to the key" + default = null +} + +variable "cluster_name" { + type = string + description = "A unique cluster identifier" + default = null +} + +variable "common_tags" { + description = "Common tags for cloud resources" + type = map(string) + default = { "Project" : "Enos" } +} + +variable "instance_count" { + description = "The number of target instances to create" + type = number + default = 3 +} + +variable "instance_type" { + description = "The instance machine type" + type = string + default = "t3.small" +} + +variable "project_name" { + description = "A unique project name" + type = string +} + +variable "spot_price_max" { + description = "Unused shim variable to match target_ec2_spot_fleet" + type = string + default = null +} + +variable "ssh_allow_ips" { + description = "Allowlisted IP addresses for SSH access to target nodes. The IP address of the machine running Enos will automatically allowlisted" + type = list(string) + default = [] +} + +variable "ssh_keypair" { + description = "SSH keypair used to connect to EC2 instances" + type = string +} + +variable "vpc_id" { + description = "The identifier of the VPC where the target instances will be created" + type = string +} diff --git a/enos/modules/target_ec2_spot_fleet/main.tf b/enos/modules/target_ec2_spot_fleet/main.tf new file mode 100644 index 000000000000..4e55da2dd095 --- /dev/null +++ b/enos/modules/target_ec2_spot_fleet/main.tf @@ -0,0 +1,388 @@ +terraform { + required_providers { + # We need to specify the provider source in each module until we publish it + # to the public registry + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + version = ">= 0.3.24" + } + } +} + +data "aws_vpc" "vpc" { + id = var.vpc_id +} + +data "aws_subnets" "vpc" { + filter { + name = "vpc-id" + values = [var.vpc_id] + } +} + +data "aws_kms_key" "kms_key" { + key_id = var.awskms_unseal_key_arn +} + +data "aws_iam_policy_document" "target" { + statement { + resources = ["*"] + + actions = [ + "ec2:DescribeInstances", + "secretsmanager:*" + ] + } + + statement { + resources = [var.awskms_unseal_key_arn] + + actions = [ + "kms:DescribeKey", + "kms:ListKeys", + "kms:Encrypt", + "kms:Decrypt", + "kms:GenerateDataKey" + ] + } +} + +data "aws_iam_policy_document" "target_role" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["ec2.amazonaws.com"] + } + } +} + +data "aws_iam_policy_document" "fleet" { + statement { + resources = ["*"] + + actions = [ + "ec2:DescribeImages", + "ec2:DescribeSubnets", + "ec2:RequestSpotInstances", + "ec2:TerminateInstances", + "ec2:DescribeInstanceStatus", + "ec2:CancelSpotFleetRequests", + "ec2:CreateTags", + "ec2:RunInstances", + "ec2:StartInstances", + "ec2:StopInstances", + ] + } + + statement { + effect = "Deny" + + resources = [ + "arn:aws:ec2:*:*:instance/*", + ] + + actions = [ + "ec2:RunInstances", + ] + + condition { + test = "StringNotEquals" + variable = "ec2:InstanceMarketType" + values = ["spot"] + } + } + + statement { + resources = ["*"] + + actions = [ + "iam:PassRole", + ] + + condition { + test = "StringEquals" + variable = "iam:PassedToService" + values = [ + "ec2.amazonaws.com", + ] + } + } + + statement { + resources = [ + "arn:aws:elasticloadbalancing:*:*:loadbalancer/*", + ] + + actions = [ + "elasticloadbalancing:RegisterInstancesWithLoadBalancer", + ] + } + + statement { + resources = [ + "arn:aws:elasticloadbalancing:*:*:*/*" + ] + + actions = [ + "elasticloadbalancing:RegisterTargets" + ] + } +} + +data "aws_iam_policy_document" "fleet_role" { + statement { + actions = ["sts:AssumeRole"] + + principals { + type = "Service" + identifiers = ["spotfleet.amazonaws.com"] + } + } +} + +data "enos_environment" "localhost" {} + +resource "random_string" "cluster_name" { + length = 8 + lower = true + upper = false + numeric = false + special = false +} + +resource "random_string" "unique_id" { + length = 4 + lower = true + upper = false + numeric = false + special = false +} + +locals { + instances = toset([for idx in range(var.instance_count) : tostring(idx)]) + cluster_name = coalesce(var.cluster_name, random_string.cluster_name.result) + name_prefix = "${var.project_name}-${local.cluster_name}-${random_string.unique_id.result}" + fleet_tag = "${local.name_prefix}-spot-fleet-target" + fleet_tags = { + Name = "${local.name_prefix}-target" + Type = local.cluster_name + SpotFleet = local.fleet_tag + } +} + +resource "aws_iam_role" "target" { + name = "${local.name_prefix}-target-role" + assume_role_policy = data.aws_iam_policy_document.target_role.json +} + +resource "aws_iam_instance_profile" "target" { + name = "${local.name_prefix}-target-profile" + role = aws_iam_role.target.name +} + +resource "aws_iam_role_policy" "target" { + name = "${local.name_prefix}-target-policy" + role = aws_iam_role.target.id + policy = data.aws_iam_policy_document.target.json +} + +resource "aws_iam_role" "fleet" { + name = "${local.name_prefix}-fleet-role" + assume_role_policy = data.aws_iam_policy_document.fleet_role.json +} + +resource "aws_iam_role_policy" "fleet" { + name = "${local.name_prefix}-fleet-policy" + role = aws_iam_role.fleet.id + policy = data.aws_iam_policy_document.fleet.json +} + +resource "aws_security_group" "target" { + name = "${local.name_prefix}-target" + description = "Target instance security group" + vpc_id = var.vpc_id + + # SSH traffic + ingress { + from_port = 22 + to_port = 22 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + # Vault traffic + ingress { + from_port = 8200 + to_port = 8201 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + formatlist("%s/32", var.ssh_allow_ips) + ]) + } + + # Consul traffic + ingress { + from_port = 8301 + to_port = 8301 + protocol = "tcp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + ingress { + from_port = 8301 + to_port = 8301 + protocol = "udp" + cidr_blocks = flatten([ + formatlist("%s/32", data.enos_environment.localhost.public_ip_addresses), + join(",", data.aws_vpc.vpc.cidr_block_associations.*.cidr_block), + ]) + } + + # Internal traffic + ingress { + from_port = 0 + to_port = 0 + protocol = "-1" + self = true + } + + # External traffic + egress { + from_port = 0 + to_port = 0 + protocol = "-1" + cidr_blocks = ["0.0.0.0/0"] + } + + tags = merge( + var.common_tags, + { + Name = "${local.name_prefix}-sg" + }, + ) +} + +resource "aws_launch_template" "target" { + name = "${local.name_prefix}-target" + image_id = var.ami_id + key_name = var.ssh_keypair + + iam_instance_profile { + name = aws_iam_instance_profile.target.name + } + + network_interfaces { + associate_public_ip_address = true + delete_on_termination = true + security_groups = [aws_security_group.target.id] + } + + tag_specifications { + resource_type = "instance" + + tags = merge( + var.common_tags, + local.fleet_tags, + ) + } +} + +# There are three primary knobs we can turn to try and optimize our costs by +# using a spot fleet: our min and max instance requirements, our max bid +# price, and the allocation strategy to use when fulfilling the spot request. +# We've currently configured our instance requirements to allow for anywhere +# from 2-4 vCPUs and 4-16GB of RAM. We intentionally have a wide range +# to allow for a large instance size pool to be considered. Our next knob is our +# max bid price. As we're using spot fleets to save on instance cost, we never +# want to pay more for an instance than we were on-demand. We've set the max price +# to equal what we pay for t3.medium instances on-demand, which are the smallest +# reliable size for Vault scenarios. The final knob is the allocation strategy +# that AWS will use when looking for instances that meet our resource and cost +# requirements. We're using the "lowestPrice" strategy to get the absolute +# cheapest machines that will fit the requirements, but it comes with a slightly +# higher capacity risk than say, "capacityOptimized" or "priceCapacityOptimized". +# Unless we see capacity issues or instances being shut down then we ought to +# stick with that strategy. +resource "aws_spot_fleet_request" "targets" { + allocation_strategy = "lowestPrice" + fleet_type = "request" + iam_fleet_role = aws_iam_role.fleet.arn + // Set this to zero so re-runs don't plan for replacement + instance_pools_to_use_count = 0 + target_capacity = var.instance_count + terminate_instances_on_delete = true + wait_for_fulfillment = true + + launch_template_config { + launch_template_specification { + id = aws_launch_template.target.id + version = aws_launch_template.target.latest_version + } + + overrides { + spot_price = var.spot_price_max + subnet_id = data.aws_subnets.vpc.ids[0] + + instance_requirements { + burstable_performance = "included" + + memory_mib { + min = var.instance_mem_min + max = var.instance_mem_max + } + + vcpu_count { + min = var.instance_cpu_min + max = var.instance_cpu_max + } + } + } + } + + tags = merge( + var.common_tags, + local.fleet_tags, + ) +} + +data "aws_instances" "targets" { + depends_on = [ + aws_spot_fleet_request.targets, + ] + + instance_tags = local.fleet_tags + instance_state_names = [ + "pending", + "running", + ] + + filter { + name = "image-id" + values = [var.ami_id] + } + + filter { + name = "iam-instance-profile.arn" + values = [aws_iam_instance_profile.target.arn] + } +} + +data "aws_instance" "targets" { + depends_on = [ + aws_spot_fleet_request.targets, + data.aws_instances.targets + ] + for_each = local.instances + + instance_id = data.aws_instances.targets.ids[each.key] +} diff --git a/enos/modules/target_ec2_spot_fleet/outputs.tf b/enos/modules/target_ec2_spot_fleet/outputs.tf new file mode 100644 index 000000000000..2248388da521 --- /dev/null +++ b/enos/modules/target_ec2_spot_fleet/outputs.tf @@ -0,0 +1,11 @@ +output "cluster_name" { + value = local.cluster_name +} + +output "hosts" { + description = "The spot fleet target hosts" + value = { for idx in range(var.instance_count) : idx => { + public_ip = data.aws_instance.targets[idx].public_ip + private_ip = data.aws_instance.targets[idx].private_ip + } } +} diff --git a/enos/modules/target_ec2_spot_fleet/variables.tf b/enos/modules/target_ec2_spot_fleet/variables.tf new file mode 100644 index 000000000000..da41866554f8 --- /dev/null +++ b/enos/modules/target_ec2_spot_fleet/variables.tf @@ -0,0 +1,88 @@ +variable "ami_id" { + description = "The machine image identifier" + type = string +} + +variable "awskms_unseal_key_arn" { + type = string + description = "The AWSKMS key ARN if using the awskms unseal method. If specified the instances will be granted kms permissions to the key" + default = null +} + +variable "cluster_name" { + type = string + description = "A unique cluster identifier" + default = null +} + +variable "common_tags" { + description = "Common tags for cloud resources" + type = map(string) + default = { + Project = "Vault" + } +} + +variable "instance_mem_min" { + description = "The minimum amount of memory in mebibytes for each instance in the fleet. (1 MiB = 1024 bytes)" + type = number + default = 4096 // ~4 GB +} + +variable "instance_mem_max" { + description = "The maximum amount of memory in mebibytes for each instance in the fleet. (1 MiB = 1024 bytes)" + type = number + default = 16385 // ~16 GB +} + +variable "instance_cpu_min" { + description = "The minimum number of vCPU's for each instance in the fleet" + type = number + default = 2 +} + +variable "instance_cpu_max" { + description = "The maximum number of vCPU's for each instance in the fleet" + type = number + default = 8 // Unlikely we'll ever get that high due to spot price bid protection +} + +variable "instance_count" { + description = "The number of target instances to create" + type = number + default = 3 +} + +variable "instance_type" { + description = "Shim variable for target module variable compatibility that is not used. The spot fleet determines instance sizes" + type = string + default = null +} + +variable "project_name" { + description = "A unique project name" + type = string +} + +variable "spot_price_max" { + description = "The maximum hourly price to pay for each target instance" + type = string + // Current on-demand cost of linux t3.medium in us-east. + default = "0.0416" +} + +variable "ssh_allow_ips" { + description = "Allowlisted IP addresses for SSH access to target nodes. The IP address of the machine running Enos will automatically allowlisted" + type = list(string) + default = [] +} + +variable "ssh_keypair" { + description = "SSH keypair used to connect to EC2 instances" + type = string +} + +variable "vpc_id" { + description = "The identifier of the VPC where the target instances will be created" + type = string +} diff --git a/enos/modules/vault_cluster/main.tf b/enos/modules/vault_cluster/main.tf new file mode 100644 index 000000000000..08455ca8f648 --- /dev/null +++ b/enos/modules/vault_cluster/main.tf @@ -0,0 +1,335 @@ +terraform { + required_providers { + # We need to specify the provider source in each module until we publish it + # to the public registry + enos = { + source = "app.terraform.io/hashicorp-qti/enos" + version = ">= 0.3.2" + } + } +} + +data "enos_environment" "localhost" {} + +locals { + bin_path = "${var.install_dir}/vault" + consul_bin_path = "${var.consul_install_dir}/consul" + key_shares = { + "awskms" = null + "shamir" = 5 + } + key_threshold = { + "awskms" = null + "shamir" = 3 + } + // In order to get Terraform to plan we have to use collections with keys + // that are known at plan time. In order for our module to work our var.target_hosts + // must be a map with known keys at plan time. Here we're creating locals + // that keep track of index values that point to our target hosts. + followers = toset(slice(local.instances, 1, length(local.instances))) + instances = [for idx in range(length(var.target_hosts)) : tostring(idx)] + leader = toset(slice(local.instances, 0, 1)) + recovery_shares = { + "awskms" = 5 + "shamir" = null + } + recovery_threshold = { + "awskms" = 3 + "shamir" = null + } + seal = { + "awskms" = { + type = "awskms" + attributes = { + kms_key_id = var.awskms_unseal_key_arn + } + } + "shamir" = { + type = "shamir" + attributes = null + } + } + storage_config = [for idx, host in var.target_hosts : (var.storage_backend == "raft" ? + merge( + { + node_id = "${var.storage_node_prefix}_${idx}" + }, + var.storage_backend_addl_config + ) : + { + address = "127.0.0.1:8500" + path = "vault" + }) + ] +} + +resource "enos_remote_exec" "install_packages" { + for_each = { + for idx, host in var.target_hosts : idx => var.target_hosts[idx] + if length(var.packages) > 0 + } + + content = templatefile("${path.module}/templates/install-packages.sh", { + packages = join(" ", var.packages) + }) + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +resource "enos_bundle_install" "consul" { + for_each = { + for idx, host in var.target_hosts : idx => var.target_hosts[idx] + if var.storage_backend == "consul" + } + + destination = var.consul_install_dir + release = merge(var.consul_release, { product = "consul" }) + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +resource "enos_bundle_install" "vault" { + for_each = var.target_hosts + + destination = var.install_dir + release = var.release == null ? var.release : merge({ product = "vault" }, var.release) + artifactory = var.artifactory_release + path = var.local_artifact_path + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +resource "enos_consul_start" "consul" { + for_each = enos_bundle_install.consul + + bin_path = local.consul_bin_path + data_dir = var.consul_data_dir + config = { + data_dir = var.consul_data_dir + datacenter = "dc1" + retry_join = ["provider=aws tag_key=Type tag_value=${var.consul_cluster_tag}"] + server = false + bootstrap_expect = 0 + log_level = "INFO" + log_file = var.consul_log_file + } + unit_name = "consul" + username = "consul" + + transport = { + ssh = { + host = var.target_hosts[each.key].public_ip + } + } +} + +resource "enos_vault_start" "leader" { + depends_on = [ + enos_consul_start.consul, + enos_bundle_install.vault, + ] + for_each = local.leader + + bin_path = local.bin_path + config_dir = var.config_dir + environment = var.config_env_vars + config = { + api_addr = "http://${var.target_hosts[each.value].private_ip}:8200" + cluster_addr = "http://${var.target_hosts[each.value].private_ip}:8201" + cluster_name = var.cluster_name + listener = { + type = "tcp" + attributes = { + address = "0.0.0.0:8200" + tls_disable = "true" + } + } + storage = { + type = var.storage_backend + attributes = ({ for key, value in local.storage_config[each.key] : key => value }) + } + seal = local.seal[var.unseal_method] + ui = true + } + license = var.license + manage_service = var.manage_service + username = "vault" + unit_name = "vault" + + transport = { + ssh = { + host = var.target_hosts[each.value].public_ip + } + } +} + +resource "enos_vault_start" "followers" { + depends_on = [ + enos_vault_start.leader, + ] + for_each = local.followers + + bin_path = local.bin_path + config_dir = var.config_dir + environment = var.config_env_vars + config = { + api_addr = "http://${var.target_hosts[each.value].private_ip}:8200" + cluster_addr = "http://${var.target_hosts[each.value].private_ip}:8201" + cluster_name = var.cluster_name + listener = { + type = "tcp" + attributes = { + address = "0.0.0.0:8200" + tls_disable = "true" + } + } + storage = { + type = var.storage_backend + attributes = { for key, value in local.storage_config[each.key] : key => value } + } + seal = local.seal[var.unseal_method] + ui = true + } + license = var.license + manage_service = var.manage_service + username = "vault" + unit_name = "vault" + + transport = { + ssh = { + host = var.target_hosts[each.value].public_ip + } + } +} + +resource "enos_vault_init" "leader" { + depends_on = [ + enos_vault_start.followers, + ] + for_each = toset([ + for idx, leader in local.leader : leader + if var.initialize_cluster + ]) + + bin_path = local.bin_path + vault_addr = enos_vault_start.leader[0].config.api_addr + + key_shares = local.key_shares[var.unseal_method] + key_threshold = local.key_threshold[var.unseal_method] + + recovery_shares = local.recovery_shares[var.unseal_method] + recovery_threshold = local.recovery_threshold[var.unseal_method] + + transport = { + ssh = { + host = var.target_hosts[each.value].public_ip + } + } +} + +resource "enos_vault_unseal" "leader" { + depends_on = [ + enos_vault_start.followers, + enos_vault_init.leader, + ] + for_each = enos_vault_init.leader // only unseal the leader if we initialized it + + bin_path = local.bin_path + vault_addr = enos_vault_start.leader[each.key].config.api_addr + seal_type = var.unseal_method + unseal_keys = var.unseal_method != "shamir" ? null : coalesce(var.shamir_unseal_keys, enos_vault_init.leader[0].unseal_keys_hex) + + transport = { + ssh = { + host = var.target_hosts[tolist(local.leader)[0]].public_ip + } + } +} + +resource "enos_vault_unseal" "followers" { + depends_on = [ + enos_vault_init.leader, + enos_vault_unseal.leader, + ] + // Only unseal followers if we're not using an auto-unseal method and we've + // initialized the cluster + for_each = toset([ + for idx, follower in local.followers : follower + if var.unseal_method == "shamir" && var.initialize_cluster + ]) + + bin_path = local.bin_path + vault_addr = enos_vault_start.followers[each.key].config.api_addr + seal_type = var.unseal_method + unseal_keys = var.unseal_method != "shamir" ? null : coalesce(var.shamir_unseal_keys, enos_vault_init.leader[0].unseal_keys_hex) + + transport = { + ssh = { + host = var.target_hosts[each.value].public_ip + } + } +} + +// Force unseal the cluster. This is used if the vault-cluster module is used +// to add additional nodes to a cluster via auto-pilot, or some other means. +// When that happens we'll want to set initialize_cluster to false and +// force_unseal to true. +resource "enos_vault_unseal" "maybe_force_unseal" { + depends_on = [ + enos_vault_start.followers, + ] + for_each = { + for idx, host in var.target_hosts : idx => host + if var.force_unseal && !var.initialize_cluster + } + + bin_path = local.bin_path + vault_addr = "http://localhost:8200" + seal_type = var.unseal_method + unseal_keys = coalesce( + var.shamir_unseal_keys, + try(enos_vault_init.leader[0].unseal_keys_hex, null), + ) + + transport = { + ssh = { + host = each.value.public_ip + } + } +} + +resource "enos_remote_exec" "vault_write_license" { + for_each = toset([ + for idx, leader in local.leader : leader + if var.initialize_cluster + ]) + + depends_on = [ + enos_vault_unseal.leader, + enos_vault_unseal.maybe_force_unseal, + ] + + content = templatefile("${path.module}/templates/vault-write-license.sh", { + bin_path = local.bin_path, + root_token = coalesce(var.root_token, try(enos_vault_init.leader[0].root_token, null), "none") + license = coalesce(var.license, "none") + }) + + transport = { + ssh = { + host = var.target_hosts[each.value].public_ip + } + } +} diff --git a/enos/modules/vault_cluster/outputs.tf b/enos/modules/vault_cluster/outputs.tf new file mode 100644 index 000000000000..8e72ef44668b --- /dev/null +++ b/enos/modules/vault_cluster/outputs.tf @@ -0,0 +1,55 @@ +output "public_ips" { + description = "Vault cluster target host public_ips" + value = [for host in var.target_hosts : host.public_ip] +} + +output "private_ips" { + description = "Vault cluster target host private_ips" + value = [for host in var.target_hosts : host.private_ip] +} + +output "target_hosts" { + description = "The vault cluster instances that were created" + + value = var.target_hosts +} +output "root_token" { + value = coalesce(var.root_token, try(enos_vault_init.leader[0].root_token, null), "none") +} + +output "unseal_keys_b64" { + value = try(enos_vault_init.leader[0].unseal_keys_b64, []) +} + +output "unseal_keys_hex" { + value = try(enos_vault_init.leader[0].unseal_keys_hex, null) +} + +output "unseal_shares" { + value = try(enos_vault_init.leader[0].unseal_keys_shares, -1) +} + +output "unseal_threshold" { + value = try(enos_vault_init.leader[0].unseal_keys_threshold, -1) +} + +output "recovery_keys_b64" { + value = try(enos_vault_init.leader[0].recovery_keys_b64, []) +} + +output "recovery_keys_hex" { + value = try(enos_vault_init.leader[0].recovery_keys_hex, []) +} + +output "recovery_key_shares" { + value = try(enos_vault_init.leader[0].recovery_keys_shares, -1) +} + +output "recovery_threshold" { + value = try(enos_vault_init.leader[0].recovery_keys_threshold, -1) +} + +output "cluster_name" { + description = "The Vault cluster name" + value = var.cluster_name +} diff --git a/enos/modules/vault_cluster/templates/install-packages.sh b/enos/modules/vault_cluster/templates/install-packages.sh new file mode 100755 index 000000000000..61b6a1272d09 --- /dev/null +++ b/enos/modules/vault_cluster/templates/install-packages.sh @@ -0,0 +1,44 @@ +#!/bin/bash + +set -ex -o pipefail + +packages="${packages}" + +if [ "$packages" == "" ] +then + echo "No dependencies to install." + exit 0 +fi + +function retry { + local retries=$1 + shift + local count=0 + + until "$@"; do + exit=$? + wait=$((2 ** count)) + count=$((count + 1)) + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + return "$exit" + fi + done + + return 0 +} + +echo "Installing Dependencies: $packages" +if [ -f /etc/debian_version ]; then + # Make sure cloud-init is not modifying our sources list while we're trying + # to install. + retry 7 grep ec2 /etc/apt/sources.list + + cd /tmp + retry 5 sudo apt update + retry 5 sudo apt install -y "$${packages[@]}" +else + cd /tmp + retry 7 sudo yum -y install "$${packages[@]}" +fi diff --git a/enos/modules/vault_cluster/templates/vault-write-license.sh b/enos/modules/vault_cluster/templates/vault-write-license.sh new file mode 100755 index 000000000000..10de84ba56f1 --- /dev/null +++ b/enos/modules/vault_cluster/templates/vault-write-license.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +license='${license}' +if test $license = "none"; then + exit 0 +fi + +function retry { + local retries=$1 + shift + local count=0 + + until "$@"; do + exit=$? + wait=$((2 ** count)) + count=$((count + 1)) + + if [ "$count" -lt "$retries" ]; then + sleep "$wait" + else + return "$exit" + fi + done + + return 0 +} + +export VAULT_ADDR=http://localhost:8200 +export VAULT_TOKEN='${root_token}' + +# Temporary hack until we can make the unseal resource handle legacy license +# setting. If we're running 1.8 and above then we shouldn't try to set a license. +ver=$(${bin_path} version) +if [[ "$(echo "$ver" |awk '{print $2}' |awk -F'.' '{print $2}')" -ge 8 ]]; then + exit 0 +fi + +retry 5 ${bin_path} write /sys/license text="$license" diff --git a/enos/modules/vault_cluster/variables.tf b/enos/modules/vault_cluster/variables.tf new file mode 100644 index 000000000000..d7be243e2efb --- /dev/null +++ b/enos/modules/vault_cluster/variables.tf @@ -0,0 +1,176 @@ +variable "artifactory_release" { + type = object({ + username = string + token = string + url = string + sha256 = string + }) + description = "The Artifactory release information to install Vault artifacts from Artifactory" + default = null +} + +variable "awskms_unseal_key_arn" { + type = string + description = "The AWSKMS key ARN if using the awskms unseal method" + default = null +} + +variable "cluster_name" { + type = string + description = "The Vault cluster name" + default = null +} + +variable "config_dir" { + type = string + description = "The directory to use for Vault configuration" + default = "/etc/vault.d" +} + +variable "config_env_vars" { + description = "Optional Vault configuration environment variables to set starting Vault" + type = map(string) + default = null +} + +variable "consul_cluster_tag" { + type = string + description = "The retry_join tag to use for Consul" + default = null +} + +variable "consul_data_dir" { + type = string + description = "The directory where the consul will store data" + default = "/opt/consul/data" +} + +variable "consul_install_dir" { + type = string + description = "The directory where the consul binary will be installed" + default = "/opt/consul/bin" +} + +variable "consul_log_file" { + type = string + description = "The file where the consul will write log output" + default = "/var/log/consul.log" +} + +variable "consul_release" { + type = object({ + version = string + edition = string + }) + description = "Consul release version and edition to install from releases.hashicorp.com" + default = { + version = "1.15.1" + edition = "oss" + } +} + +variable "force_unseal" { + type = bool + description = "Always unseal the Vault cluster even if we're not initializing it" + default = false +} + +variable "initialize_cluster" { + type = bool + description = "Initialize the Vault cluster" + default = true +} + +variable "install_dir" { + type = string + description = "The directory where the vault binary will be installed" + default = "/opt/vault/bin" +} + +variable "license" { + type = string + sensitive = true + description = "The value of the Vault license" + default = null +} + +variable "local_artifact_path" { + type = string + description = "The path to a locally built vault artifact to install. It can be a zip archive, RPM, or Debian package" + default = null +} + +variable "manage_service" { + type = bool + description = "Manage the Vault service users and systemd unit. Disable this to use configuration in RPM and Debian packages" + default = true +} + +variable "packages" { + type = list(string) + description = "A list of packages to install via the target host package manager" + default = [] +} + +variable "release" { + type = object({ + version = string + edition = string + }) + description = "Vault release version and edition to install from releases.hashicorp.com" + default = null +} + +variable "root_token" { + type = string + description = "The Vault root token that we can use to intialize and configure the cluster" + default = null +} + +variable "shamir_unseal_keys" { + type = list(string) + description = "Shamir unseal keys. Often only used adding additional nodes to an already initialized cluster." + default = null +} + +variable "storage_backend" { + type = string + description = "The storage backend to use" + default = "raft" + + validation { + condition = contains(["raft", "consul"], var.storage_backend) + error_message = "The storage_backend must be either raft or consul. No other storage backends are supported." + } +} + +variable "storage_backend_addl_config" { + type = map(any) + description = "An optional set of key value pairs to inject into the storage block" + default = {} +} + +variable "storage_node_prefix" { + type = string + description = "A prefix to use for each node in the Vault storage configuration" + default = "node" +} + +variable "target_hosts" { + description = "The target machines host addresses to use for the Vault cluster" + type = map(object({ + private_ip = string + public_ip = string + })) +} + +variable "unseal_method" { + type = string + description = "The method by which to unseal the Vault cluster" + default = "awskms" + + validation { + condition = contains(["awskms", "shamir"], var.unseal_method) + error_message = "The unseal_method must be either awskms or shamir. No other unseal methods are supported." + } +} diff --git a/go.mod b/go.mod index f3615c02aaae..094da0654c9f 100644 --- a/go.mod +++ b/go.mod @@ -15,9 +15,9 @@ replace github.com/hashicorp/vault/sdk => ./sdk replace go.etcd.io/etcd/client/pkg/v3 v3.5.0 => go.etcd.io/etcd/client/pkg/v3 v3.0.0-20210928084031-3df272774672 require ( - cloud.google.com/go/monitoring v1.8.0 - cloud.google.com/go/spanner v1.41.0 - cloud.google.com/go/storage v1.27.0 + cloud.google.com/go/monitoring v1.12.0 + cloud.google.com/go/spanner v1.44.0 + cloud.google.com/go/storage v1.28.1 github.com/Azure/azure-storage-blob-go v0.14.0 github.com/Azure/go-autorest/autorest v0.11.28 github.com/Azure/go-autorest/autorest/adal v0.9.20 @@ -45,7 +45,7 @@ require ( github.com/docker/go-connections v0.4.0 github.com/duosecurity/duo_api_golang v0.0.0-20190308151101-6c680f768e74 github.com/dustin/go-humanize v1.0.0 - github.com/fatih/color v1.13.0 + github.com/fatih/color v1.14.1 github.com/fatih/structs v1.1.0 github.com/favadi/protoc-go-inject-tag v1.3.0 github.com/ghodss/yaml v1.0.1-0.20190212211648-25d852aebe32 @@ -62,21 +62,21 @@ require ( github.com/google/go-github v17.0.0+incompatible github.com/google/go-metrics-stackdriver v0.2.0 github.com/google/tink/go v1.6.1 - github.com/hashicorp/cap v0.2.1-0.20220727210936-60cd1534e220 + github.com/hashicorp/cap v0.2.1-0.20230221194157-7894fed1633d github.com/hashicorp/consul-template v0.29.5 github.com/hashicorp/consul/api v1.17.0 github.com/hashicorp/errwrap v1.1.0 - github.com/hashicorp/eventlogger v0.1.0 + github.com/hashicorp/eventlogger v0.1.1 github.com/hashicorp/go-cleanhttp v0.5.2 github.com/hashicorp/go-discover v0.0.0-20210818145131-c573d69da192 github.com/hashicorp/go-gcp-common v0.8.0 - github.com/hashicorp/go-hclog v1.4.0 - github.com/hashicorp/go-kms-wrapping/v2 v2.0.7 + github.com/hashicorp/go-hclog v1.5.0 + github.com/hashicorp/go-kms-wrapping/v2 v2.0.8 github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2 v2.0.7-1 github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms/v2 v2.0.1 github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.7 github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 v2.0.7 - github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.7 + github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.8 github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2 v2.0.7 github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2 v2.0.7 github.com/hashicorp/go-memdb v1.3.3 @@ -107,7 +107,7 @@ require ( github.com/hashicorp/hcp-sdk-go v0.23.0 github.com/hashicorp/nomad/api v0.0.0-20220707195938-75f4c2237b28 github.com/hashicorp/raft v1.3.10 - github.com/hashicorp/raft-autopilot v0.2.0 + github.com/hashicorp/raft-autopilot v0.1.6 github.com/hashicorp/raft-boltdb/v2 v2.0.0-20210421194847-a7e34179d62c github.com/hashicorp/raft-snapshot v1.0.4 github.com/hashicorp/vault-plugin-auth-alicloud v0.14.0 @@ -115,13 +115,13 @@ require ( github.com/hashicorp/vault-plugin-auth-centrify v0.14.0 github.com/hashicorp/vault-plugin-auth-cf v0.14.0 github.com/hashicorp/vault-plugin-auth-gcp v0.15.0 - github.com/hashicorp/vault-plugin-auth-jwt v0.15.0 + github.com/hashicorp/vault-plugin-auth-jwt v0.15.2 github.com/hashicorp/vault-plugin-auth-kerberos v0.9.0 github.com/hashicorp/vault-plugin-auth-kubernetes v0.15.0 github.com/hashicorp/vault-plugin-auth-oci v0.13.1 github.com/hashicorp/vault-plugin-database-couchbase v0.9.0 - github.com/hashicorp/vault-plugin-database-elasticsearch v0.13.0 - github.com/hashicorp/vault-plugin-database-mongodbatlas v0.8.0 + github.com/hashicorp/vault-plugin-database-elasticsearch v0.13.1 + github.com/hashicorp/vault-plugin-database-mongodbatlas v0.9.0 github.com/hashicorp/vault-plugin-database-redis v0.2.0 github.com/hashicorp/vault-plugin-database-redis-elasticache v0.2.0 github.com/hashicorp/vault-plugin-database-snowflake v0.7.0 @@ -132,15 +132,15 @@ require ( github.com/hashicorp/vault-plugin-secrets-gcp v0.15.0 github.com/hashicorp/vault-plugin-secrets-gcpkms v0.14.0 github.com/hashicorp/vault-plugin-secrets-kubernetes v0.3.0 - github.com/hashicorp/vault-plugin-secrets-kv v0.14.0 + github.com/hashicorp/vault-plugin-secrets-kv v0.14.2 github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.9.1 - github.com/hashicorp/vault-plugin-secrets-openldap v0.10.0 + github.com/hashicorp/vault-plugin-secrets-openldap v0.10.1 github.com/hashicorp/vault-plugin-secrets-terraform v0.7.0 github.com/hashicorp/vault-testing-stepwise v0.1.3-0.20230203193428-3a789cb2c68f github.com/hashicorp/vault/api v1.9.0 github.com/hashicorp/vault/api/auth/approle v0.1.0 github.com/hashicorp/vault/api/auth/userpass v0.1.0 - github.com/hashicorp/vault/sdk v0.8.0 + github.com/hashicorp/vault/sdk v0.8.1 github.com/hashicorp/vault/vault/hcp_link/proto v0.0.0-20230201201504-b741fa893d77 github.com/influxdata/influxdb1-client v0.0.0-20200827194710-b269163b24ab github.com/jackc/pgx/v4 v4.15.0 @@ -182,7 +182,7 @@ require ( github.com/sasha-s/go-deadlock v0.2.0 github.com/sethvargo/go-limiter v0.7.1 github.com/shirou/gopsutil/v3 v3.22.6 - github.com/stretchr/testify v1.8.1 + github.com/stretchr/testify v1.8.2 go.etcd.io/bbolt v1.3.6 go.etcd.io/etcd/client/pkg/v3 v3.5.0 go.etcd.io/etcd/client/v2 v2.305.0 @@ -194,19 +194,20 @@ require ( go.opentelemetry.io/otel/trace v1.11.2 go.uber.org/atomic v1.9.0 go.uber.org/goleak v1.1.12 - golang.org/x/crypto v0.5.0 - golang.org/x/net v0.5.0 - golang.org/x/oauth2 v0.4.0 + golang.org/x/crypto v0.6.0 + golang.org/x/net v0.8.0 + golang.org/x/oauth2 v0.6.0 golang.org/x/sync v0.1.0 - golang.org/x/sys v0.4.0 - golang.org/x/term v0.4.0 - golang.org/x/tools v0.1.12 - google.golang.org/api v0.109.0 - google.golang.org/grpc v1.51.0 + golang.org/x/sys v0.6.0 + golang.org/x/term v0.6.0 + golang.org/x/tools v0.6.0 + google.golang.org/api v0.114.0 + google.golang.org/grpc v1.53.0 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0 - google.golang.org/protobuf v1.28.1 + google.golang.org/protobuf v1.29.1 gopkg.in/ory-am/dockertest.v3 v3.3.4 gopkg.in/square/go-jose.v2 v2.6.0 + gotest.tools/gotestsum v1.9.0 k8s.io/utils v0.0.0-20220728103510-ee6ede2d64ed layeh.com/radius v0.0.0-20190322222518-890bc1058917 mvdan.cc/gofumpt v0.3.1 @@ -214,11 +215,11 @@ require ( ) require ( - cloud.google.com/go v0.105.0 // indirect - cloud.google.com/go/compute v1.14.0 // indirect + cloud.google.com/go v0.110.0 // indirect + cloud.google.com/go/compute v1.18.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect - cloud.google.com/go/iam v0.8.0 // indirect - cloud.google.com/go/kms v1.6.0 // indirect + cloud.google.com/go/iam v0.12.0 // indirect + cloud.google.com/go/kms v1.9.0 // indirect code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f // indirect github.com/Azure/azure-pipeline-go v0.2.3 // indirect github.com/Azure/azure-sdk-for-go v67.2.0+incompatible // indirect @@ -269,20 +270,20 @@ require ( github.com/boombuler/barcode v1.0.1-0.20190219062509-6c824513bacc // indirect github.com/cenkalti/backoff v2.2.1+incompatible // indirect github.com/cenkalti/backoff/v4 v4.1.3 // indirect - github.com/census-instrumentation/opencensus-proto v0.3.0 // indirect + github.com/census-instrumentation/opencensus-proto v0.4.1 // indirect github.com/centrify/cloud-golang-sdk v0.0.0-20210923165758-a8c48d049166 // indirect - github.com/cespare/xxhash/v2 v2.1.1 // indirect + github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/circonus-labs/circonus-gometrics v2.3.1+incompatible // indirect github.com/circonus-labs/circonusllhist v0.1.3 // indirect github.com/cloudflare/circl v1.1.0 // indirect github.com/cloudfoundry-community/go-cfclient v0.0.0-20210823134051-721f0e559306 // indirect - github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 // indirect - github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 // indirect + github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe // indirect + github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b // indirect github.com/containerd/cgroups v1.0.3 // indirect - github.com/containerd/containerd v1.5.13 // indirect + github.com/containerd/containerd v1.5.17 // indirect github.com/containerd/continuity v0.3.0 // indirect github.com/coreos/go-oidc v2.2.1+incompatible // indirect - github.com/coreos/go-oidc/v3 v3.1.0 // indirect + github.com/coreos/go-oidc/v3 v3.5.0 // indirect github.com/coreos/go-semver v0.3.0 // indirect github.com/coreos/go-systemd/v22 v22.3.2 // indirect github.com/couchbase/gocb/v2 v2.3.3 // indirect @@ -292,19 +293,22 @@ require ( github.com/dgryski/go-metro v0.0.0-20180109044635-280f6062b5bc // indirect github.com/digitalocean/godo v1.7.5 // indirect github.com/dimchansky/utfbom v1.1.1 // indirect + github.com/dnephin/pflag v1.0.7 // indirect github.com/docker/cli v20.10.18+incompatible // indirect - github.com/docker/distribution v2.7.1+incompatible // indirect + github.com/docker/distribution v2.8.1+incompatible // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dsnet/compress v0.0.2-0.20210315054119-f66993602bf5 // indirect github.com/emicklei/go-restful/v3 v3.8.0 // indirect - github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 // indirect - github.com/envoyproxy/protoc-gen-validate v0.1.0 // indirect + github.com/envoyproxy/go-control-plane v0.10.3 // indirect + github.com/envoyproxy/protoc-gen-validate v0.9.1 // indirect github.com/evanphx/json-patch/v5 v5.6.0 // indirect github.com/form3tech-oss/jwt-go v3.2.5+incompatible // indirect + github.com/fsnotify/fsnotify v1.5.4 // indirect github.com/gabriel-vasile/mimetype v1.3.1 // indirect github.com/gammazero/deque v0.0.0-20190130191400-2afb3858e9c7 // indirect github.com/gammazero/workerpool v0.0.0-20190406235159-88d534f22b56 // indirect github.com/go-asn1-ber/asn1-ber v1.5.1 // indirect + github.com/go-jose/go-jose/v3 v3.0.0 // indirect github.com/go-ldap/ldif v0.0.0-20200320164324-fd88d9b715b3 // indirect github.com/go-logr/logr v1.2.3 // indirect github.com/go-logr/stdr v1.2.2 // indirect @@ -334,8 +338,8 @@ require ( github.com/google/gofuzz v1.1.0 // indirect github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 // indirect github.com/google/uuid v1.3.0 // indirect - github.com/googleapis/enterprise-certificate-proxy v0.2.1 // indirect - github.com/googleapis/gax-go/v2 v2.7.0 // indirect + github.com/googleapis/enterprise-certificate-proxy v0.2.3 // indirect + github.com/googleapis/gax-go/v2 v2.7.1 // indirect github.com/gophercloud/gophercloud v0.1.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect @@ -382,12 +386,12 @@ require ( github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/mailru/easyjson v0.7.6 // indirect github.com/mattn/go-ieproxy v0.0.1 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mediocregopher/radix/v4 v4.1.1 // indirect github.com/miekg/dns v1.1.41 // indirect github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect github.com/mitchellh/hashstructure v1.1.0 // indirect - github.com/mitchellh/pointerstructure v1.2.0 // indirect + github.com/mitchellh/pointerstructure v1.2.1 // indirect github.com/moby/sys/mount v0.2.0 // indirect github.com/moby/sys/mountinfo v0.5.0 // indirect github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect @@ -443,12 +447,12 @@ require ( go.opencensus.io v0.24.0 // indirect go.uber.org/multierr v1.7.0 // indirect go.uber.org/zap v1.19.1 // indirect - golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 // indirect - golang.org/x/text v0.6.0 // indirect + golang.org/x/mod v0.8.0 // indirect + golang.org/x/text v0.8.0 // indirect golang.org/x/time v0.0.0-20220411224347-583f2d630306 // indirect golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef // indirect + google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/ini.v1 v1.66.2 // indirect gopkg.in/jcmturner/goidentity.v3 v3.0.0 // indirect diff --git a/go.sum b/go.sum index e41d9e4fa012..3b2a285e60de 100644 --- a/go.sum +++ b/go.sum @@ -16,40 +16,41 @@ cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKV cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= -cloud.google.com/go v0.105.0 h1:DNtEKRBAAzeS4KyIory52wWHuClNaXJ5x1F7xa4q+5Y= -cloud.google.com/go v0.105.0/go.mod h1:PrLgOJNe5nfE9UMxKxgXj4mD3voiP+YQ6gdt6KMFOKM= +cloud.google.com/go v0.110.0 h1:Zc8gqp3+a9/Eyph2KDmcGaPtbKRIoqq4YTlL4NMD0Ys= +cloud.google.com/go v0.110.0/go.mod h1:SJnCLqQ0FCFGSZMUNUf84MV3Aia54kn7pi8st7tMzaY= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.14.0 h1:hfm2+FfxVmnRlh6LpB7cg1ZNU+5edAHmW679JePztk0= -cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo= +cloud.google.com/go/compute v1.18.0 h1:FEigFqoDbys2cvFkZ9Fjq4gnHBP55anJ0yQyau2f9oY= +cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs= +cloud.google.com/go/compute/metadata v0.2.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= -cloud.google.com/go/iam v0.8.0 h1:E2osAkZzxI/+8pZcxVLcDtAQx/u+hZXVryUaYQ5O0Kk= -cloud.google.com/go/iam v0.8.0/go.mod h1:lga0/y3iH6CX7sYqypWJ33hf7kkfXJag67naqGESjkE= -cloud.google.com/go/kms v1.6.0 h1:OWRZzrPmOZUzurjI2FBGtgY2mB1WaJkqhw6oIwSj0Yg= -cloud.google.com/go/kms v1.6.0/go.mod h1:Jjy850yySiasBUDi6KFUwUv2n1+o7QZFyuUJg6OgjA0= -cloud.google.com/go/longrunning v0.3.0 h1:NjljC+FYPV3uh5/OwWT6pVU+doBqMg2x/rZlE+CamDs= -cloud.google.com/go/monitoring v1.8.0 h1:c9riaGSPQ4dUKWB+M1Fl0N+iLxstMbCktdEwYSPGDvA= -cloud.google.com/go/monitoring v1.8.0/go.mod h1:E7PtoMJ1kQXWxPjB6mv2fhC5/15jInuulFdYYtlcvT4= +cloud.google.com/go/iam v0.12.0 h1:DRtTY29b75ciH6Ov1PHb4/iat2CLCvrOm40Q0a6DFpE= +cloud.google.com/go/iam v0.12.0/go.mod h1:knyHGviacl11zrtZUoDuYpDgLjvr28sLQaG0YB2GYAY= +cloud.google.com/go/kms v1.9.0 h1:b0votJQa/9DSsxgHwN33/tTLA7ZHVzfWhDCrfiXijSo= +cloud.google.com/go/kms v1.9.0/go.mod h1:qb1tPTgfF9RQP8e1wq4cLFErVuTJv7UsSC915J8dh3w= +cloud.google.com/go/longrunning v0.4.1 h1:v+yFJOfKC3yZdY6ZUI933pIYdhyhV8S3NpWrXWmg7jM= +cloud.google.com/go/monitoring v1.12.0 h1:+X79DyOP/Ny23XIqSIb37AvFWSxDN15w/ktklVvPLso= +cloud.google.com/go/monitoring v1.12.0/go.mod h1:yx8Jj2fZNEkL/GYZyTLS4ZtZEZN8WtDEiEqG4kLK50w= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/spanner v1.41.0 h1:NvdTpRwf7DTegbfFdPjAWyD7bOVu0VeMqcvR9aCQCAc= -cloud.google.com/go/spanner v1.41.0/go.mod h1:MLYDBJR/dY4Wt7ZaMIQ7rXOTLjYrmxLE/5ve9vFfWos= +cloud.google.com/go/spanner v1.44.0 h1:fba7k2apz4aI0BE59/kbeaJ78dPOXSz2PSuBIfe7SBM= +cloud.google.com/go/spanner v1.44.0/go.mod h1:G8XIgYdOK+Fbcpbs7p2fiprDw4CaZX63whnSMLVBxjk= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/storage v1.27.0 h1:YOO045NZI9RKfCj1c5A/ZtuuENUc8OAW+gHdGnDgyMQ= -cloud.google.com/go/storage v1.27.0/go.mod h1:x9DOL8TK/ygDUMieqwfhdpQryTeEkhGKMi80i/iqR2s= +cloud.google.com/go/storage v1.28.1 h1:F5QDG5ChchaAVQhINh24U99OWHURqrW8OmQcGKXcbgI= +cloud.google.com/go/storage v1.28.1/go.mod h1:Qnisd4CqDdo6BGs2AD5LLnEsmSQ80wQ5ogcBBKhU86Y= code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f h1:UrKzEwTgeiff9vxdrfdqxibzpWjxLnuXDI5m6z3GJAk= code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f/go.mod h1:sk5LnIjB/nIEU7yP5sDQExVm62wu0pBh3yrElngUisI= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= @@ -307,13 +308,15 @@ github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInq github.com/cenkalti/backoff/v4 v4.1.3 h1:cFAlzYUlVYDysBEH2T5hyJZMh3+5+WCBvSnK6Q8UtC4= github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.3.0 h1:t/LhUZLVitR1Ow2YOnduCsavhwFUklBMoGVYUCqmCqk= github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/census-instrumentation/opencensus-proto v0.4.1 h1:iKLQ0xPNFxR/2hzXZMrBo8f1j86j5WHzznCCQxV/b8g= +github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw= github.com/centrify/cloud-golang-sdk v0.0.0-20210923165758-a8c48d049166 h1:jQ93fKqb/wRmK/KiHpa7Tk9rmHeKXhp4j+5Sg/tENiY= github.com/centrify/cloud-golang-sdk v0.0.0-20210923165758-a8c48d049166/go.mod h1:c/gmvyN8lq6lYtHvrqqoXrg2xyN65N0mBmbikxFWXNE= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= -github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= +github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/checkpoint-restore/go-criu/v4 v4.1.0/go.mod h1:xUQBLp4RLc5zJtWY++yjOoMoB5lihDt7fai+75m+rGw= github.com/checkpoint-restore/go-criu/v5 v5.0.0/go.mod h1:cfwC0EG7HMUenopBsUf9d89JlCLQIfgVcNsNN0t6T2M= github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E= @@ -340,14 +343,16 @@ github.com/cloudfoundry-community/go-cfclient v0.0.0-20210823134051-721f0e559306 github.com/cloudfoundry-community/go-cfclient v0.0.0-20210823134051-721f0e559306/go.mod h1:0FdHblxw7g3M2PPICOw9i8YZOHP9dZTHbJUtoxL7Z/E= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4 h1:hzAQntlaYRkVSFEfj9OTWlVV1H155FMD8BTKktLv0QI= github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe h1:QQ3GSy+MqSHxm/d8nCtnAiZdYFd45cYZPs8vOOIYKfk= +github.com/cncf/udpa/go v0.0.0-20220112060539-c52dc94e7fbe/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1 h1:zH8ljVhhq7yC0MIeUL/IviMtY8hx2mK8cN9wEYb8ggw= github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20220314180256-7f1daf1720fc/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b h1:ACGZRIr7HsgBKHsueQ1yM4WaVaXh21ynwqsF8M8tXhA= +github.com/cncf/xds/go v0.0.0-20230105202645-06c439db220b/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/cockroach-go v0.0.0-20181001143604-e0a95dfd547c h1:2zRrJWIt/f9c9HhNHAgrRgq0San5gRRUJTBXLkchal0= @@ -391,8 +396,8 @@ github.com/containerd/containerd v1.5.0-beta.4/go.mod h1:GmdgZd2zA2GYIBZ0w09Zvgq github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoTJseu1FGOKuoA4nNb2s= github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g= github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c= -github.com/containerd/containerd v1.5.13 h1:XqvKw9i4P7/mFrC3TSM7yV5cwFZ9avXe6M3YANKnzEE= -github.com/containerd/containerd v1.5.13/go.mod h1:3AlCrzKROjIuP3JALsY14n8YtntaUDBu7vek+rPN5Vc= +github.com/containerd/containerd v1.5.17 h1:NLDEI//zhMZpR3DS/AP0qiN+dzYKNAwJaNXCnCmYcgY= +github.com/containerd/containerd v1.5.17/go.mod h1:7IN9MtIzTZH4WPEmD1gNH8bbTQXVX68yd3ZXxSHYCis= github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= github.com/containerd/continuity v0.0.0-20190827140505-75bee3e2ccb6/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y= @@ -456,8 +461,8 @@ github.com/coreos/go-iptables v0.5.0/go.mod h1:/mVI274lEDI2ns62jHCDnCyBF9Iwsmeka github.com/coreos/go-oidc v2.1.0+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= github.com/coreos/go-oidc v2.2.1+incompatible h1:mh48q/BqXqgjVHpy2ZY7WnWAbenxRjsz9N1i1YxjHAk= github.com/coreos/go-oidc v2.2.1+incompatible/go.mod h1:CgnwVTmzoESiwO9qyAFEMiHoZ1nMCKZlZ9V6mm3/LKc= -github.com/coreos/go-oidc/v3 v3.1.0 h1:6avEvcdvTa1qYsOZ6I5PRkSYHzpTNWgKYmaJfaYbrRw= -github.com/coreos/go-oidc/v3 v3.1.0/go.mod h1:rEJ/idjfUyfkBit1eI1fvyr+64/g9dcKpAm8MJMesvo= +github.com/coreos/go-oidc/v3 v3.5.0 h1:VxKtbccHZxs8juq7RdJntSqtXFtde9YpNpGn0yqgEHw= +github.com/coreos/go-oidc/v3 v3.5.0/go.mod h1:ecXRtV4romGPeO6ieExAsUK9cb/3fp9hXNz1tlv8PIM= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0 h1:wkHLiw0WNATZnSG7epLsujiMCgPAc9xhjJ4tgnAxmfM= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= @@ -512,6 +517,8 @@ github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/ github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/dnaeon/go-vcr v1.2.0 h1:zHCHvJYTMh1N7xnV7zf1m1GPBF9Ad0Jk/whtQ1663qI= github.com/dnaeon/go-vcr v1.2.0/go.mod h1:R4UdLID7HZT3taECzJs4YgbbH6PIGXB6W/sc5OLb6RQ= +github.com/dnephin/pflag v1.0.7 h1:oxONGlWxhmUct0YzKTgrpQv9AUA1wtPBn7zuSjJqptk= +github.com/dnephin/pflag v1.0.7/go.mod h1:uxE91IoWURlOiTUIA8Mq5ZZkAv3dPUfZNaT80Zm7OQE= github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v20.10.8+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v20.10.9+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= @@ -519,8 +526,9 @@ github.com/docker/cli v20.10.18+incompatible h1:f/GQLsVpo10VvToRay2IraVA1wHz9Kkt github.com/docker/cli v20.10.18+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= -github.com/docker/distribution v2.7.1+incompatible h1:a5mlkVzth6W5A4fOsS3D2EO5BUmsJpcB+cRlLU7cSug= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= +github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v1.4.2-0.20200319182547-c7ad2b866182/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v20.10.7+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= @@ -562,10 +570,12 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1 h1:xvqufLtNVwAhN8NMyWklVgxnWohi+wtMGQMhtxexlm0= -github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE= -github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= +github.com/envoyproxy/go-control-plane v0.10.3 h1:xdCVXxEe0Y3FQith+0cj2irwZudqGYvecuLB1HtdexY= +github.com/envoyproxy/go-control-plane v0.10.3/go.mod h1:fJJn/j26vwOu972OllsvAgJJM//w9BV6Fxbg2LuVd34= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/envoyproxy/protoc-gen-validate v0.6.7/go.mod h1:dyJXwwfPK2VSqiB9Klm1J6romD608Ba7Hij42vrOBCo= +github.com/envoyproxy/protoc-gen-validate v0.9.1 h1:PS7VIOgmSVhWUEeZwTe7z7zouA22Cr590PzXKbZHOVY= +github.com/envoyproxy/protoc-gen-validate v0.9.1/go.mod h1:OKNgG7TCp5pF4d6XftA0++PMirau2/yoOwVac3AbF2w= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/evanphx/json-patch/v5 v5.5.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= @@ -573,8 +583,9 @@ github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJ github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/color v1.13.0 h1:8LOYc1KYPPmyKMuN8QV2DNRWNbLo6LZ0iLs8+mlH53w= github.com/fatih/color v1.13.0/go.mod h1:kLAiJbzzSOZDVNGyDpeOxJ47H46qBXwg5ILebYFFOfk= +github.com/fatih/color v1.14.1 h1:qfhVLaG5s+nCROl1zJsZRxFeYrHLqWroPOQ8BWiNb4w= +github.com/fatih/color v1.14.1/go.mod h1:2oHN61fhTpgcxD3TSWCgKDiH1+x4OiDVVGH8WlgGZGg= github.com/fatih/structs v1.1.0 h1:Q7juDM0QtcnhCpeyLGQKyg4TOIghuNXrkL32pHAUMxo= github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M= github.com/favadi/protoc-go-inject-tag v1.3.0 h1:JPrmsmc/uBShG85uY5xGZIa5WJ0IaNZn6LZhQR9tIQE= @@ -588,8 +599,9 @@ github.com/frankban/quicktest v1.13.0/go.mod h1:qLE0fzW0VuyUAJgPU19zByoIr0HtCHN/ github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.5.1 h1:mZcQUHVQUQWoPXXtuf9yuEXKudkV2sx1E06UadKWpgI= github.com/fsnotify/fsnotify v1.5.1/go.mod h1:T3375wBYaZdLLcVNkcVbzGHY7f1l/uK5T5Ai1i3InKU= +github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwVZI= +github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= github.com/gabriel-vasile/mimetype v1.3.1 h1:qevA6c2MtE1RorlScnixeG0VA1H4xrXyhyX3oWBynNQ= github.com/gabriel-vasile/mimetype v1.3.1/go.mod h1:fA8fi6KUiG7MgQQ+mEWotXoEOvmxRtOJlERCzSmRvr8= @@ -618,6 +630,8 @@ github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9 github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-jose/go-jose/v3 v3.0.0 h1:s6rrhirfEP/CGIoc6p+PZAeogN2SxKav6Wp7+dyMWVo= +github.com/go-jose/go-jose/v3 v3.0.0/go.mod h1:RNkWWRld676jZEYoV3+XK8L2ZnNSvIsxFMht0mSX+u8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= @@ -819,6 +833,7 @@ github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2V github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A= github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -897,7 +912,7 @@ github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/ github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1 h1:d8MncMlErDFTwQGBK1xhv026j9kqhvw1Qv9IbWT1VLQ= +github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -916,12 +931,12 @@ github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/googleapis/enterprise-certificate-proxy v0.2.1 h1:RY7tHKZcRlk788d5WSo/e83gOyyy742E8GSs771ySpg= -github.com/googleapis/enterprise-certificate-proxy v0.2.1/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3 h1:yk9/cqRKtT9wXZSsRH9aurXEpJX+U6FLtpYTdC3R06k= +github.com/googleapis/enterprise-certificate-proxy v0.2.3/go.mod h1:AwSRAtLfXpU5Nm3pW+v7rGDHp09LsPtGY9MduiEsR9k= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.7.0 h1:IcsPKeInNvYi7eqSaDjiZqDDKu5rsmunY0Y1YupQSSQ= -github.com/googleapis/gax-go/v2 v2.7.0/go.mod h1:TEop28CZZQ2y+c0VxMUmu1lV+fQx57QpBWsYpwqHJx8= +github.com/googleapis/gax-go/v2 v2.7.1 h1:gF4c0zjUP2H/s/hEGyLA3I0fA2ZWjzYiONAD6cvPr8A= +github.com/googleapis/gax-go/v2 v2.7.1/go.mod h1:4orTrqY6hXxxaUL4LHIPl6lGo8vAE38/qKbhSAKP6QI= github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= github.com/googleapis/gnostic v0.2.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= @@ -951,10 +966,11 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgf github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= -github.com/hashicorp/cap v0.2.1-0.20220727210936-60cd1534e220 h1:Vgv3jG0kicczshK+lOHWJ9OososZjnjSu1YslqofFYY= -github.com/hashicorp/cap v0.2.1-0.20220727210936-60cd1534e220/go.mod h1:zb3VvIFA0lM2lbmO69NjowV9dJzJnZS89TaM9blXPJA= +github.com/hashicorp/cap v0.2.1-0.20230221194157-7894fed1633d h1:29noMC2UssBX3F/BUmk0/j4PRUU4QvPTfyeOn3tmcOA= +github.com/hashicorp/cap v0.2.1-0.20230221194157-7894fed1633d/go.mod h1:dHTmyMIVbzT981XxRoci5G//dfWmd/HhuNiCH6J5+IA= github.com/hashicorp/consul-template v0.29.5 h1:tzEo93RqODAX2cgOe/ke8xcpdPdxg5rxl6d22wE3f6c= github.com/hashicorp/consul-template v0.29.5/go.mod h1:SZGBPz/t0JaBwMOqM6q/mG66cBRA8IeDUjOwjO0Pa5M= github.com/hashicorp/consul/api v1.17.0 h1:aqytbw31uCPNn37ST+717IyGod+P1eTgSGu3yjRo4bs= @@ -968,8 +984,8 @@ github.com/hashicorp/errwrap v0.0.0-20141028054710-7554cd9344ce/go.mod h1:YH+1FK github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/eventlogger v0.1.0 h1:S6xc4gZVzewuDUP4R4Ngko419h/CGDuV/b4ADL3XLik= -github.com/hashicorp/eventlogger v0.1.0/go.mod h1:a3IXf1aEJfpCPzseTOrwKj4fVW/Qn3oEmpQeaIznzH0= +github.com/hashicorp/eventlogger v0.1.1 h1:zyCjxsy7KunFsMPZKU5PnwWEakSrp1zjj2vPFmrDaeo= +github.com/hashicorp/eventlogger v0.1.1/go.mod h1://CHt6/j+Q2lc0NlUB5af4aS2M0c0aVBg9/JfcpAyhM= github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= @@ -986,16 +1002,16 @@ github.com/hashicorp/go-hclog v0.14.1/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39 github.com/hashicorp/go-hclog v0.16.2/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v1.0.0/go.mod h1:whpDNt7SSdeAju8AWKIWsul05p54N/39EeqMAyrmvFQ= github.com/hashicorp/go-hclog v1.3.1/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-hclog v1.4.0 h1:ctuWFGrhFha8BnnzxqeRGidlEcQkDyL5u8J8t5eA11I= -github.com/hashicorp/go-hclog v1.4.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= +github.com/hashicorp/go-hclog v1.5.0 h1:bI2ocEMgcVlz55Oj1xZNBsVi900c7II+fWDyV9o+13c= +github.com/hashicorp/go-hclog v1.5.0/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJFeZnpfm2KLowc= github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0 h1:pSjQfW3vPtrOTcasTUKgCTQT7OGPPTTMVRrOfU6FJD8= github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0/go.mod h1:xvb32K2keAc+R8DSFG2IwDcydK9DBQE+fGA5fsw6hSk= -github.com/hashicorp/go-kms-wrapping/v2 v2.0.7 h1:P+dh3M6k5aNl2wXrA9s6zquMHWPaYIkotCffiMIYt6U= -github.com/hashicorp/go-kms-wrapping/v2 v2.0.7/go.mod h1:sDQAfwJGv25uGPZA04x87ERglCG6avnRcBT9wYoMII8= +github.com/hashicorp/go-kms-wrapping/v2 v2.0.8 h1:9Q2lu1YbbmiAgvYZ7Pr31RdlVonUpX+mmDL7Z7qTA2U= +github.com/hashicorp/go-kms-wrapping/v2 v2.0.8/go.mod h1:qTCjxGig/kjuj3hk1z8pOUrzbse/GxB1tGfbrq8tGJg= github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2 v2.0.7-1 h1:ZV26VJYcITBom0QqYSUOIj4HOHCVPEFjLqjxyXV/AbA= github.com/hashicorp/go-kms-wrapping/wrappers/aead/v2 v2.0.7-1/go.mod h1:b99cDSA+OzcyRoBZroSf174/ss/e6gUuS45wue9ZQfc= github.com/hashicorp/go-kms-wrapping/wrappers/alicloudkms/v2 v2.0.1 h1:ydUCtmr8f9F+mHZ1iCsvzqFTXqNVpewX3s9zcYipMKI= @@ -1004,8 +1020,8 @@ github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.7 h1:E3eEWpkofgPNrY github.com/hashicorp/go-kms-wrapping/wrappers/awskms/v2 v2.0.7/go.mod h1:j5vefRoguQUG7iM4reS/hKIZssU1lZRqNPM5Wow6UnM= github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 v2.0.7 h1:X27JWuPW6Gmi2l7NMm0pvnp7z7hhtns2TeIOQU93mqI= github.com/hashicorp/go-kms-wrapping/wrappers/azurekeyvault/v2 v2.0.7/go.mod h1:i7Dt9mDsVUQG/I639jtdQerliaO2SvvPnpYPhZ8CGZ4= -github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.7 h1:GXp8P2xb8SE6X/Iw+22nw6fkbkb9LPQlKC8NPOutXN8= -github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.7/go.mod h1:GTK8CQ239rq7u3XNw4Mooqb7hFZzewtwgoJONAXGcRE= +github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.8 h1:16I8OqBEuxZIowwn3jiLvhlx+z+ia4dJc9stvz0yUBU= +github.com/hashicorp/go-kms-wrapping/wrappers/gcpckms/v2 v2.0.8/go.mod h1:6QUMo5BrXAtbzSuZilqmx0A4px2u6PeFK7vfp2WIzeM= github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2 v2.0.7 h1:KeG3QGrbxbr2qAqCJdf3NR4ijAYwdcWLTmwSbR0yusM= github.com/hashicorp/go-kms-wrapping/wrappers/ocikms/v2 v2.0.7/go.mod h1:rXxYzjjGw4HltEwxPp9zYSRIo6R+rBf1MSPk01bvodc= github.com/hashicorp/go-kms-wrapping/wrappers/transit/v2 v2.0.7 h1:G25tZFw/LrAzJWxvS0/BFI7V1xAP/UsAIsgBwiE0mwo= @@ -1113,8 +1129,8 @@ github.com/hashicorp/raft v1.1.2-0.20191002163536-9c6bd3e3eb17/go.mod h1:vPAJM8A github.com/hashicorp/raft v1.2.0/go.mod h1:vPAJM8Asw6u8LxC3eJCUZmRP/E4QmUGE1R7g7k8sG/8= github.com/hashicorp/raft v1.3.10 h1:LR5QZX1VQd0DFWZfeCwWawyeKfpS/Tm1yjnJIY5X4Tw= github.com/hashicorp/raft v1.3.10/go.mod h1:J8naEwc6XaaCfts7+28whSeRvCqTd6e20BlCU3LtEO4= -github.com/hashicorp/raft-autopilot v0.2.0 h1:2/R2RPgamgRKgNWGQioULZvjeKXQZmDuw5Ty+6c+H7Y= -github.com/hashicorp/raft-autopilot v0.2.0/go.mod h1:q6tZ8UAZ5xio2gv2JvjgmtOlh80M6ic8xQYBe2Egkg8= +github.com/hashicorp/raft-autopilot v0.1.6 h1:C1q3RNF2FfXNZfHWbvVAu0QixaQK8K5pX4O5lh+9z4I= +github.com/hashicorp/raft-autopilot v0.1.6/go.mod h1:Af4jZBwaNOI+tXfIqIdbcAnh/UyyqIMj/pOISIfhArw= github.com/hashicorp/raft-boltdb v0.0.0-20171010151810-6e5ba93211ea/go.mod h1:pNv7Wc3ycL6F5oOWn+tPGo2gWD4a5X+yp/ntwdKLjRk= github.com/hashicorp/raft-boltdb/v2 v2.0.0-20210421194847-a7e34179d62c h1:oiKun9QlrOz5yQxMZJ3tf1kWtFYuKSJzxzEDxDPevj4= github.com/hashicorp/raft-boltdb/v2 v2.0.0-20210421194847-a7e34179d62c/go.mod h1:kiPs9g148eLShc2TYagUAyKDnD+dH9U+CQKsXzlY9xo= @@ -1132,8 +1148,8 @@ github.com/hashicorp/vault-plugin-auth-cf v0.14.0 h1:n/ojZukcH8YAOy/7JXITJn21byr github.com/hashicorp/vault-plugin-auth-cf v0.14.0/go.mod h1:BdvPbWtUuBhTW1HrYXj2OGoeAIzWENYsKF378RoKmw4= github.com/hashicorp/vault-plugin-auth-gcp v0.15.0 h1:EmfbQkYufMSFcbnOyn0f7bv2QYyyQyMx/D+qO04jfr0= github.com/hashicorp/vault-plugin-auth-gcp v0.15.0/go.mod h1:GvtgteMxgza9I/QXNKFOAW6/FX0FmsAOzE0nz5126H4= -github.com/hashicorp/vault-plugin-auth-jwt v0.15.0 h1:GGS/64MmoobWZFA07nYEPan9NLw2NhqRrmVLra7JHNM= -github.com/hashicorp/vault-plugin-auth-jwt v0.15.0/go.mod h1:c6UQCaBpR11jB52xzcIjiV/9RY+v+bZw1TY78ylf5ds= +github.com/hashicorp/vault-plugin-auth-jwt v0.15.2 h1:8Pa9ir5lNTBfO9KT5jrqOeRDhgBsWuWtZ79vv4/AIy4= +github.com/hashicorp/vault-plugin-auth-jwt v0.15.2/go.mod h1:cMm0kZEcMkvwMSfHXeM1obYnjkmeoHOq0dWizzDCxDQ= github.com/hashicorp/vault-plugin-auth-kerberos v0.9.0 h1:gdbrEwpPICDt8xQ7C595M+DXaojHvkA9/AhCKbvE+jY= github.com/hashicorp/vault-plugin-auth-kerberos v0.9.0/go.mod h1:dyGS9eHADGMJC42tTr+XliO2Ntssv4bUOK1Je9IEMMo= github.com/hashicorp/vault-plugin-auth-kubernetes v0.15.0 h1:uHsn1fJqxGxbWiiD2resMYZzPJWPwPMCGNCEziGHfwE= @@ -1142,10 +1158,10 @@ github.com/hashicorp/vault-plugin-auth-oci v0.13.1 h1:xThaZC9jzZoqqccfxTk11hfwgq github.com/hashicorp/vault-plugin-auth-oci v0.13.1/go.mod h1:O426Kf4nUXfwq+o0HqQuqpZygm6SiOY6eEXyjrZweYA= github.com/hashicorp/vault-plugin-database-couchbase v0.9.0 h1:hJOHJ9yZ9kt1/DuRaU5Sa339j3/QcPL4esT9JLQonYA= github.com/hashicorp/vault-plugin-database-couchbase v0.9.0/go.mod h1:skmG6MgIG6fjIOlOEgVKOcNlr1PcgHPUb9q1YQ5+Q9k= -github.com/hashicorp/vault-plugin-database-elasticsearch v0.13.0 h1:NwcbzQB529WtB/m7tZKxKiB6pQc0IyD3L80tk3mtBl8= -github.com/hashicorp/vault-plugin-database-elasticsearch v0.13.0/go.mod h1:wO8EPQs5bsBERD6MSQ+7Az+YJ4TFclCNxBo3r3VKeao= -github.com/hashicorp/vault-plugin-database-mongodbatlas v0.8.0 h1:wx/9Dh9YGGU7GiijwRfwPFBlWdmBEdf6n2VhgTdRtJU= -github.com/hashicorp/vault-plugin-database-mongodbatlas v0.8.0/go.mod h1:eWwd1Ba7aLU1tIAtmFsEhu9E023jkkypHawxhnAbZfc= +github.com/hashicorp/vault-plugin-database-elasticsearch v0.13.1 h1:nVO6F8V69E2fAQklh/Ds+EypVMutN4iIlt3sat9qW9M= +github.com/hashicorp/vault-plugin-database-elasticsearch v0.13.1/go.mod h1:wO8EPQs5bsBERD6MSQ+7Az+YJ4TFclCNxBo3r3VKeao= +github.com/hashicorp/vault-plugin-database-mongodbatlas v0.9.0 h1:wlWrg1z5Pyx+FTUCOzA9yh0FTI+pfA9tMrsFPFBcjjA= +github.com/hashicorp/vault-plugin-database-mongodbatlas v0.9.0/go.mod h1:4Ew6RNnA1NXtpLV0ijkwpE6pJE46G+suDKnTVMm+kXA= github.com/hashicorp/vault-plugin-database-redis v0.2.0 h1:Fg1inevnDhj58+/y5SY1CihLftytG1D+3QqbUJbHYUM= github.com/hashicorp/vault-plugin-database-redis v0.2.0/go.mod h1:hPj1vvjzsJ+g9PChP7iKqEJX7ttr03oz/RDEYsq8zZY= github.com/hashicorp/vault-plugin-database-redis-elasticache v0.2.0 h1:dgTT7E8xj56hjktMxHNAgFpy7pchpoQ20cIhDsBcgz8= @@ -1166,12 +1182,12 @@ github.com/hashicorp/vault-plugin-secrets-gcpkms v0.14.0 h1:eUC5ltK+1bkc+SVMzAUq github.com/hashicorp/vault-plugin-secrets-gcpkms v0.14.0/go.mod h1:86YCY86XuiQesV1jfjnV4icgoaxQdoUHONzDru+XQHA= github.com/hashicorp/vault-plugin-secrets-kubernetes v0.3.0 h1:Joz9SBwjpEOGu+Ynv60JC3fAA4UuLJzu7NcrKm6wMMs= github.com/hashicorp/vault-plugin-secrets-kubernetes v0.3.0/go.mod h1:NJeYBRgLVqjvkrVyZEe42oaqP3+xvVNMYdJoMWVoByU= -github.com/hashicorp/vault-plugin-secrets-kv v0.14.0 h1:PbveQUraOp9Bj7SVvFfssnmNYvlNTSHC6d/eLS+Am0c= -github.com/hashicorp/vault-plugin-secrets-kv v0.14.0/go.mod h1:YLsIcn9enkcyTqtuxmCXZ94nr2aeJCZhC+neHacX8SQ= +github.com/hashicorp/vault-plugin-secrets-kv v0.14.2 h1:13p50RIltQM/JH32uWZe9sAp16Uaj0zCLmVGPvS09qo= +github.com/hashicorp/vault-plugin-secrets-kv v0.14.2/go.mod h1:cAxt2o3BjRT5CbNLtgXuxTReaejvrgN/qk+no+DnwJ8= github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.9.1 h1:WkW8fyHxEdz1wGSTxCnSCrzXvgLXqXr8Iqp7upa/s4E= github.com/hashicorp/vault-plugin-secrets-mongodbatlas v0.9.1/go.mod h1:p96IECNtVwpvTq8RAw3dLlAYRWpG1n06XOoo0TkJnuk= -github.com/hashicorp/vault-plugin-secrets-openldap v0.10.0 h1:Q3nKBbHQ6E/kOa3amKvcbhYTbkz4U25BBTwH66LnF+0= -github.com/hashicorp/vault-plugin-secrets-openldap v0.10.0/go.mod h1:sYuxnuNY2O59fy+LACtvgrqUO/r0cnhAYTMqLajD9FE= +github.com/hashicorp/vault-plugin-secrets-openldap v0.10.1 h1:EN3/iEjPPmcpX9yihybQNHvewc+YoJw7aoKsio1WK5s= +github.com/hashicorp/vault-plugin-secrets-openldap v0.10.1/go.mod h1:sYuxnuNY2O59fy+LACtvgrqUO/r0cnhAYTMqLajD9FE= github.com/hashicorp/vault-plugin-secrets-terraform v0.7.0 h1:jgJpVKhV0Eh6EjpUEIf7VYH2D6D0xW2Lry9/3PI8hy0= github.com/hashicorp/vault-plugin-secrets-terraform v0.7.0/go.mod h1:GzYAJYytgbNNyT3S7rspz1cLE53E1oajFbEtaDUlVGU= github.com/hashicorp/vault-testing-stepwise v0.1.1/go.mod h1:3vUYn6D0ZadvstNO3YQQlIcp7u1a19MdoOC0NQ0yaOE= @@ -1188,6 +1204,7 @@ github.com/hashicorp/yamux v0.0.0-20211028200310-0bc27b27de87/go.mod h1:CtWFDAQg github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/huandu/xstrings v1.3.2 h1:L18LIDzqlW6xN2rEkpdV8+oL/IXWJ1APd+vsdYy4Wdw= github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= @@ -1333,6 +1350,7 @@ github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQ github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -1363,6 +1381,7 @@ github.com/linode/linodego v0.7.1/go.mod h1:ga11n3ivecUrPCHN0rANxKmfWBJVkOXfLMZi github.com/linuxkit/virtsock v0.0.0-20201010232012-f8cee7dfc7a3/go.mod h1:3r6x7q95whyfWQpmGZTu3gk3v2YkMi05HEzl7Tf7YEo= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/lyft/protoc-gen-star v0.6.0/go.mod h1:TGAoBVkt8w7MPG72TrKIu85MIdXwDuzJYeZuUPFPNwA= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= @@ -1405,8 +1424,9 @@ github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzp github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY= github.com/mediocregopher/radix/v4 v4.1.1 h1:JkZBEp0y8pWGNZkmO3RR5oEO5huwd4zKKt4rh1C+P8s= github.com/mediocregopher/radix/v4 v4.1.1/go.mod h1:ajchozX/6ELmydxWeWM6xCFHVpZ4+67LXHOTOVR0nCE= @@ -1450,8 +1470,8 @@ github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RR github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A= -github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A= -github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= +github.com/mitchellh/pointerstructure v1.2.1 h1:ZhBBeX8tSlRpu/FFhXH4RC4OJzFlqsQhoHZAz4x7TIw= +github.com/mitchellh/pointerstructure v1.2.1/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= @@ -1613,6 +1633,7 @@ github.com/pkg/errors v0.8.1-0.20171018195549-f15c970de5b7/go.mod h1:bwawxfHBFNV github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -1741,6 +1762,8 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= github.com/spf13/afero v1.2.1/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/afero v1.3.3/go.mod h1:5KUK8ByomD5Ti5Artl0RtHeI5pTF7MIDuXL3yY520V4= +github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.2-0.20171109065643-2da4a54c5cee/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= @@ -1777,8 +1800,9 @@ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.2/go.mod h1:R6va5+xMeoiuVRoj+gSkQ7d3FALtqAAGI1FQKckRals= github.com/stretchr/testify v1.7.5/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= -github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= +github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/syndtr/gocapability v0.0.0-20170704070218-db04d3cc01c8/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20180916011248-d98352740cb2/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww= @@ -1847,7 +1871,6 @@ github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8 h1:nIPpBwaJSVYIxUFsDv3M8ofm github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= -github.com/yhat/scrape v0.0.0-20161128144610-24b7890b0945/go.mod h1:4vRFPPNYllgCacoj+0FoKOjTW68rUhEfqPLiEJaK2w8= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d h1:splanxYIlg+5LfHAM6xpdFEAYOk8iySO56hMFq6uLyA= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= @@ -1855,6 +1878,7 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/gopher-lua v0.0.0-20200816102855-ee81675732da/go.mod h1:E1AXubJBdNmFERAOucpDIxNzeGfLzg0mYh+UfMWdChA= github.com/yuin/gopher-lua v0.0.0-20210529063254-f4c35e4016d9 h1:k/gmLsJDWwWqbLCur2yWnJzwQEKRcAHXo6seXGuSwWw= @@ -1906,6 +1930,7 @@ go.opentelemetry.io/otel/sdk v1.11.2/go.mod h1:wZ1WxImwpq+lVRo4vsmSOxdd+xwoUJ6rq go.opentelemetry.io/otel/trace v1.11.2 h1:Xf7hWSF2Glv0DE3MH7fBHvtpSBsjcBUe5MYAmZM/+y0= go.opentelemetry.io/otel/trace v1.11.2/go.mod h1:4N+yC7QEz7TTsG9BSRLNAa63eg5E06ObSbKPmxQ/pKA= go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= +go.opentelemetry.io/proto/otlp v0.15.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= @@ -1948,6 +1973,7 @@ golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190617133340-57b3e21c3d56/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190923035154-9ee001bba392/go.mod h1:/lpIB1dKB+9EgE3H3cr1v9wB50oz8l4C4h62xy7jSTY= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= @@ -1967,8 +1993,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y golang.org/x/crypto v0.0.0-20211215153901-e495a2d5b3d3/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220313003712-b769efc7c000/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= -golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= +golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -2003,8 +2029,10 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4 h1:6zppjxzCulZykYSLyVDYbneBfbaBIQPYMevg0bEwv2s= +golang.org/x/mod v0.5.0/go.mod h1:5OXOZSfqPIIbmVBIIKWRFfZjPR0E5r58TLhUjH0a2Ro= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= +golang.org/x/mod v0.8.0 h1:LUYupSeNrTNCGzR/hVBk2NHZO4hXcVaW1k4Qx7rjPx8= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180530234432-1e491301e022/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -2043,7 +2071,6 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200505041828-1ed23360d12c/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= @@ -2069,13 +2096,19 @@ golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210825183410-e898025ed96a/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211216030914-fe4d6282115f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.0.0-20220725212005-46097bf591d3/go.mod h1:AaygXjzTFtRAg2ttMY5RMuhpJ3cNnI0XpyFJD1iQRSM= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= +golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= +golang.org/x/net v0.4.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE= golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= +golang.org/x/net v0.8.0 h1:Zrh2ngAOFYneWTAIAPethzeaQLuHwhuBkuV6ZiRnUaQ= +golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2084,8 +2117,9 @@ golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M= -golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec= +golang.org/x/oauth2 v0.3.0/go.mod h1:rQrIauxkUhJ6CuwEXwymO2/eh4xz2ZWF1nBkcxS+tGk= +golang.org/x/oauth2 v0.6.0 h1:Lh8GPgSKBfWSwFvtuWOfeI3aAAnbXTSutYxJiOJFgIw= +golang.org/x/oauth2 v0.6.0/go.mod h1:ycmewcwgD4Rpr3eZJLSB4Kyyljb3qDh40vJ8STE5HKw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2098,6 +2132,7 @@ golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20220601150217-0de741cfad7f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.1.0 h1:wsuoTGHzEhffawBOhz5CYhcrV4IdKZbEyZjBMuTp12o= golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2207,14 +2242,15 @@ golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210818153620-00dd8d7831e7/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210906170528-6f6e22806c34/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210927094055-39ccf1dd6fa6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211007075335-d3039528d8ac/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211031064116-611d5d643895/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211116061358-0a5406a5449c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -2224,15 +2260,22 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= +golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.0.0-20220526004731-065cf7ba2467/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.4.0 h1:O7UWfv5+A2qiuulQk30kVinPoMtoIPeVaKLEgLpVkvg= +golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA= golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= +golang.org/x/term v0.6.0 h1:clScbb1cHjoCkyRbWwBEUZ5H/tIFu5TAXIqaZD0Gcjw= +golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -2245,8 +2288,11 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= +golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= +golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -2324,12 +2370,15 @@ golang.org/x/tools v0.0.0-20200916195026-c9a70fc28ce3/go.mod h1:z6u4i615ZeAfBE4X golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210101214203-2dba1e4ea05c/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.12 h1:VveCTK38A2rkS8ZqFY25HIDFscX5X9OoEhJd3quQmXU= +golang.org/x/tools v0.1.11/go.mod h1:SgwaegtQh8clINPpECJMqnxLv9I09HLqnW3RMqW0CA4= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= +golang.org/x/tools v0.6.0 h1:BOw41kyTf3PuCW1pVQf8+Cyg8pMlkYB1oo9iJ6D/lKM= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -2357,8 +2406,8 @@ google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0M google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/api v0.32.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= -google.golang.org/api v0.109.0 h1:sW9hgHyX497PP5//NUM7nqfV8D0iDfBApqq7sOh1XR8= -google.golang.org/api v0.109.0/go.mod h1:2Ts0XTHNVWxypznxWOYUeI4g3WdP9Pk2Qk58+a/O9MY= +google.golang.org/api v0.114.0 h1:1xQPji6cO2E2vLiI+C/XiFAnsn1WV3mjaEwGLhi3grE= +google.golang.org/api v0.114.0/go.mod h1:ifYI2ZsFK6/uGddGfAD5BMxlnkBqCmqHSDUVi45N5Yg= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -2408,8 +2457,10 @@ google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= google.golang.org/genproto v0.0.0-20210630183607-d20f26d13c79/go.mod h1:yiaVoXHpRzHGyxV3o4DktVWY4mSUErTKaeEOq6C3t3U= -google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef h1:uQ2vjV/sHTsWSqdKeLqmwitzgvjMl7o4IdtHwUDXSJY= -google.golang.org/genproto v0.0.0-20221227171554-f9683d7f8bef/go.mod h1:RGgjbofJ8xD9Sq1VVhDM1Vok1vRONV+rg+CjzG4SZKM= +google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= +google.golang.org/genproto v0.0.0-20220329172620-7be39ac1afc7/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA= +google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s= google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= @@ -2436,8 +2487,10 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc v1.41.0/go.mod h1:U3l9uK9J0sini8mHphKoXyaqDA/8VyGnDee1zzIUK6k= -google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U= -google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww= +google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= +google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ= +google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc= +google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0 h1:M1YKkFIboKNieVO5DLUEVzQfGwJD30Nv2jfUgzb5UcE= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -2453,8 +2506,10 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= +google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.29.1 h1:7QBf+IK2gx70Ap/hDsOmam3GE0v9HicjfEdAxE62UoM= +google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -2508,9 +2563,12 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= +gotest.tools/gotestsum v1.9.0 h1:Jbo/0k/sIOXIJu51IZxEAt27n77xspFEfL6SqKUR72A= +gotest.tools/gotestsum v1.9.0/go.mod h1:6JHCiN6TEjA7Kaz23q1bH0e2Dc3YJjDUZ0DmctFZf+w= gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk= gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8= -gotest.tools/v3 v3.2.0 h1:I0DwBVMGAx26dttAj1BtJLAkVGncrkkUXfJLC4Flt/I= +gotest.tools/v3 v3.3.0 h1:MfDY1b1/0xN1CyMlQDac0ziEy9zJQd9CXBRRDHw2jJo= +gotest.tools/v3 v3.3.0/go.mod h1:Mcr9QNxkg0uMvy/YElmo4SpXgJKWgQvYrT7Kw5RzJ1A= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/helper/builtinplugins/registry.go b/helper/builtinplugins/registry.go index 24539af5cfe7..8a24a58c34ad 100644 --- a/helper/builtinplugins/registry.go +++ b/helper/builtinplugins/registry.go @@ -146,7 +146,10 @@ func newRegistry() *registry { "snowflake-database-plugin": {Factory: dbSnowflake.New}, }, logicalBackends: map[string]logicalBackend{ - "ad": {Factory: logicalAd.Factory}, + "ad": { + Factory: logicalAd.Factory, + DeprecationStatus: consts.Deprecated, + }, "alicloud": {Factory: logicalAlicloud.Factory}, "aws": {Factory: logicalAws.Factory}, "azure": {Factory: logicalAzure.Factory}, diff --git a/helper/namespace/namespace.go b/helper/namespace/namespace.go index 93d68622dec5..c1226a5547f9 100644 --- a/helper/namespace/namespace.go +++ b/helper/namespace/namespace.go @@ -61,11 +61,8 @@ func RootContext(ctx context.Context) context.Context { return ContextWithNamespace(ctx, RootNamespace) } -// This function caches the ns to avoid doing a .Value lookup over and over, -// because it's called a *lot* in the request critical path. .Value is -// concurrency-safe so uses some kind of locking/atomicity, but it should never -// be read before first write, plus we don't believe this will be called from -// different goroutines, so it should be safe. +// FromContext retrieves the namespace from a context, or an error +// if there is no namespace in the context. func FromContext(ctx context.Context) (*Namespace, error) { if ctx == nil { return nil, errors.New("context was nil") diff --git a/helper/osutil/fileinfo.go b/helper/osutil/fileinfo.go index 4b6ba7910f50..59b99859ebae 100644 --- a/helper/osutil/fileinfo.go +++ b/helper/osutil/fileinfo.go @@ -64,3 +64,17 @@ func OwnerPermissionsMatch(path string, uid int, permissions int) error { return nil } + +// OwnerPermissionsMatchFile checks if vault user is the owner and permissions are secure for the input file +func OwnerPermissionsMatchFile(file *os.File, uid int, permissions int) error { + info, err := file.Stat() + if err != nil { + return fmt.Errorf("file stat error on path %q: %w", file.Name(), err) + } + err = checkPathInfo(info, file.Name(), uid, permissions) + if err != nil { + return err + } + + return nil +} diff --git a/helper/osutil/fileinfo_test.go b/helper/osutil/fileinfo_test.go index 0c77d4873ed1..febd11966a35 100644 --- a/helper/osutil/fileinfo_test.go +++ b/helper/osutil/fileinfo_test.go @@ -4,6 +4,7 @@ import ( "io/fs" "os" "os/user" + "path/filepath" "runtime" "strconv" "testing" @@ -82,3 +83,98 @@ func TestCheckPathInfo(t *testing.T) { } } } + +// TestOwnerPermissionsMatchFile creates a file and verifies that the current user of the process is the owner of the +// file +func TestOwnerPermissionsMatchFile(t *testing.T) { + currentUser, err := user.Current() + if err != nil { + t.Fatal("failed to get current user", err) + } + uid, err := strconv.ParseInt(currentUser.Uid, 0, 64) + if err != nil { + t.Fatal("failed to convert uid", err) + } + dir := t.TempDir() + path := filepath.Join(dir, "foo") + f, err := os.Create(path) + if err != nil { + t.Fatal("failed to create test file", err) + } + defer f.Close() + + info, err := os.Stat(path) + if err != nil { + t.Fatal("failed to stat test file", err) + } + + if err := OwnerPermissionsMatchFile(f, int(uid), int(info.Mode())); err != nil { + t.Fatalf("expected no error but got %v", err) + } +} + +// TestOwnerPermissionsMatchFile_OtherUser creates a file using the user that started the current process and verifies +// that a different user is not the owner of the file +func TestOwnerPermissionsMatchFile_OtherUser(t *testing.T) { + currentUser, err := user.Current() + if err != nil { + t.Fatal("failed to get current user", err) + } + uid, err := strconv.ParseInt(currentUser.Uid, 0, 64) + if err != nil { + t.Fatal("failed to convert uid", err) + } + dir := t.TempDir() + path := filepath.Join(dir, "foo") + f, err := os.Create(path) + if err != nil { + t.Fatal("failed to create test file", err) + } + defer f.Close() + + info, err := os.Stat(path) + if err != nil { + t.Fatal("failed to stat test file", err) + } + + if err := OwnerPermissionsMatchFile(f, int(uid)+1, int(info.Mode())); err == nil { + t.Fatalf("expected error but none") + } +} + +// TestOwnerPermissionsMatchFile_Symlink creates a file and a symlink to that file. The test verifies that the current +// user of the process is the owner of the file +func TestOwnerPermissionsMatchFile_Symlink(t *testing.T) { + currentUser, err := user.Current() + if err != nil { + t.Fatal("failed to get current user", err) + } + uid, err := strconv.ParseInt(currentUser.Uid, 0, 64) + if err != nil { + t.Fatal("failed to convert uid", err) + } + dir := t.TempDir() + path := filepath.Join(dir, "foo") + f, err := os.Create(path) + if err != nil { + t.Fatal("failed to create test file", err) + } + defer f.Close() + + symlink := filepath.Join(dir, "symlink") + err = os.Symlink(path, symlink) + if err != nil { + t.Fatal("failed to symlink file", err) + } + symlinkedFile, err := os.Open(symlink) + if err != nil { + t.Fatal("failed to open file", err) + } + info, err := os.Stat(symlink) + if err != nil { + t.Fatal("failed to stat test file", err) + } + if err := OwnerPermissionsMatchFile(symlinkedFile, int(uid), int(info.Mode())); err != nil { + t.Fatalf("expected no error but got %v", err) + } +} diff --git a/helper/random/string_generator.go b/helper/random/string_generator.go index 621930eb66f2..96dd69ec60e3 100644 --- a/helper/random/string_generator.go +++ b/helper/random/string_generator.go @@ -7,6 +7,7 @@ import ( "io" "math" "sort" + "sync" "time" "unicode" @@ -76,7 +77,8 @@ type StringGenerator struct { Rules serializableRules `mapstructure:"-" json:"rule"` // This is "rule" in JSON so it matches the HCL property type // CharsetRule to choose runes from. This is computed from the rules, not directly configurable - charset runes + charset runes + charsetLock sync.RWMutex } // Generate a random string from the charset and adhering to the provided rules. @@ -116,7 +118,10 @@ func (g *StringGenerator) generate(rng io.Reader) (str string, err error) { // If performance improvements need to be made, this can be changed to read a batch of // potential strings at once rather than one at a time. This will significantly // improve performance, but at the cost of added complexity. - candidate, err := randomRunes(rng, g.charset, g.Length) + g.charsetLock.RLock() + charset := g.charset + g.charsetLock.RUnlock() + candidate, err := randomRunes(rng, charset, g.Length) if err != nil { return "", fmt.Errorf("unable to generate random characters: %w", err) } @@ -229,6 +234,8 @@ func (g *StringGenerator) validateConfig() (err error) { merr = multierror.Append(merr, fmt.Errorf("specified rules require at least %d characters but %d is specified", minLen, g.Length)) } + g.charsetLock.Lock() + defer g.charsetLock.Unlock() // Ensure we have a charset & all characters are printable if len(g.charset) == 0 { // Yes this is mutating the generator but this is done so we don't have to compute this on every generation diff --git a/helper/testhelpers/ldap/ldaphelper.go b/helper/testhelpers/ldap/ldaphelper.go index b248c0294fda..79587ec6d9d1 100644 --- a/helper/testhelpers/ldap/ldaphelper.go +++ b/helper/testhelpers/ldap/ldaphelper.go @@ -33,6 +33,7 @@ func PrepareTestContainer(t *testing.T, version string) (cleanup func(), cfg *ld cfg.GroupDN = "ou=people,dc=planetexpress,dc=com" cfg.GroupAttr = "cn" cfg.RequestTimeout = 60 + cfg.MaximumPageSize = 1000 svc, err := runner.StartService(context.Background(), func(ctx context.Context, host string, port int) (docker.ServiceConfig, error) { connURL := fmt.Sprintf("ldap://%s:%d", host, port) diff --git a/helper/testhelpers/testhelpers.go b/helper/testhelpers/testhelpers.go index a9daabdda43e..5cb4fc3fe8fb 100644 --- a/helper/testhelpers/testhelpers.go +++ b/helper/testhelpers/testhelpers.go @@ -53,6 +53,9 @@ func GenerateRootWithError(t testing.T, cluster *vault.TestCluster, kind Generat keys = cluster.BarrierKeys } client := cluster.Cores[0].Client + oldNS := client.Namespace() + defer client.SetNamespace(oldNS) + client.ClearNamespace() var err error var status *api.GenerateRootStatusResponse @@ -174,6 +177,10 @@ func AttemptUnsealCore(c *vault.TestCluster, core *vault.TestClusterCore) error } client := core.Client + oldNS := client.Namespace() + defer client.SetNamespace(oldNS) + client.ClearNamespace() + client.Sys().ResetUnsealProcess() for j := 0; j < len(c.BarrierKeys); j++ { statusResp, err := client.Sys().Unseal(base64.StdEncoding.EncodeToString(c.BarrierKeys[j])) @@ -242,7 +249,10 @@ func DeriveActiveCore(t testing.T, cluster *vault.TestCluster) *vault.TestCluste t.Helper() for i := 0; i < 60; i++ { for _, core := range cluster.Cores { + oldNS := core.Client.Namespace() + core.Client.ClearNamespace() leaderResp, err := core.Client.Sys().Leader() + core.Client.SetNamespace(oldNS) if err != nil { t.Fatal(err) } @@ -260,7 +270,10 @@ func DeriveStandbyCores(t testing.T, cluster *vault.TestCluster) []*vault.TestCl t.Helper() cores := make([]*vault.TestClusterCore, 0, 2) for _, core := range cluster.Cores { + oldNS := core.Client.Namespace() + core.Client.ClearNamespace() leaderResp, err := core.Client.Sys().Leader() + core.Client.SetNamespace(oldNS) if err != nil { t.Fatal(err) } diff --git a/http/events.go b/http/events.go index aba3bb45cec8..e5eaa78be972 100644 --- a/http/events.go +++ b/http/events.go @@ -2,6 +2,7 @@ package http import ( "context" + "errors" "fmt" "net/http" "strconv" @@ -14,18 +15,17 @@ import ( "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" "github.com/hashicorp/vault/vault/eventbus" - "google.golang.org/protobuf/encoding/protojson" "nhooyr.io/websocket" ) type eventSubscribeArgs struct { - ctx context.Context - logger hclog.Logger - events *eventbus.EventBus - ns *namespace.Namespace - eventType logical.EventType - conn *websocket.Conn - json bool + ctx context.Context + logger hclog.Logger + events *eventbus.EventBus + ns *namespace.Namespace + pattern string + conn *websocket.Conn + json bool } // handleEventsSubscribeWebsocket runs forever, returning a websocket error code and reason @@ -33,7 +33,7 @@ type eventSubscribeArgs struct { func handleEventsSubscribeWebsocket(args eventSubscribeArgs) (websocket.StatusCode, string, error) { ctx := args.ctx logger := args.logger - ch, cancel, err := args.events.Subscribe(ctx, args.ns, args.eventType) + ch, cancel, err := args.events.Subscribe(ctx, args.ns, args.pattern) if err != nil { logger.Info("Error subscribing", "error", err) return websocket.StatusUnsupportedData, "Error subscribing", nil @@ -46,19 +46,26 @@ func handleEventsSubscribeWebsocket(args eventSubscribeArgs) (websocket.StatusCo logger.Info("Websocket context is done, closing the connection") return websocket.StatusNormalClosure, "", nil case message := <-ch: - logger.Debug("Sending message to websocket", "message", message) + logger.Debug("Sending message to websocket", "message", message.Payload) var messageBytes []byte + var messageType websocket.MessageType if args.json { - messageBytes, err = protojson.Marshal(message) + var ok bool + messageBytes, ok = message.Format("cloudevents-json") + if !ok { + logger.Warn("Could not get cloudevents JSON format") + return 0, "", errors.New("could not get cloudevents JSON format") + } + messageType = websocket.MessageText } else { - messageBytes, err = proto.Marshal(message) + messageBytes, err = proto.Marshal(message.Payload.(*logical.EventReceived)) + messageType = websocket.MessageBinary } if err != nil { logger.Warn("Could not serialize websocket event", "error", err) return 0, "", err } - messageString := string(messageBytes) + "\n" - err = args.conn.Write(ctx, websocket.MessageText, []byte(messageString)) + err = args.conn.Write(ctx, messageType, messageBytes) if err != nil { return 0, "", err } @@ -66,13 +73,25 @@ func handleEventsSubscribeWebsocket(args eventSubscribeArgs) (websocket.StatusCo } } -func handleEventsSubscribe(core *vault.Core) http.Handler { +func handleEventsSubscribe(core *vault.Core, req *logical.Request) http.Handler { return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { logger := core.Logger().Named("events-subscribe") - logger.Debug("Got request to", "url", r.URL, "version", r.Proto) ctx := r.Context() + + // ACL check + _, _, err := core.CheckToken(ctx, req, false) + if err != nil { + if errors.Is(err, logical.ErrPermissionDenied) { + respondError(w, http.StatusForbidden, logical.ErrPermissionDenied) + return + } + logger.Debug("Error validating token", "error", err) + respondError(w, http.StatusInternalServerError, fmt.Errorf("error validating token")) + return + } + ns, err := namespace.FromContext(ctx) if err != nil { logger.Info("Could not find namespace", "error", err) @@ -81,15 +100,14 @@ func handleEventsSubscribe(core *vault.Core) http.Handler { } prefix := "/v1/sys/events/subscribe/" - if ns.ID != "root" { - prefix = fmt.Sprintf("/v1/%s/sys/events/subscribe/", ns.Path) + if ns.ID != namespace.RootNamespaceID { + prefix = fmt.Sprintf("/v1/%ssys/events/subscribe/", ns.Path) } - eventTypeStr := strings.TrimSpace(strings.TrimPrefix(r.URL.Path, prefix)) - if eventTypeStr == "" { + pattern := strings.TrimSpace(strings.TrimPrefix(r.URL.Path, prefix)) + if pattern == "" { respondError(w, http.StatusBadRequest, fmt.Errorf("did not specify eventType to subscribe to")) return } - eventType := logical.EventType(eventTypeStr) json := false jsonRaw := r.URL.Query().Get("json") @@ -122,7 +140,7 @@ func handleEventsSubscribe(core *vault.Core) http.Handler { } }() - closeStatus, closeReason, err := handleEventsSubscribeWebsocket(eventSubscribeArgs{ctx, logger, core.Events(), ns, eventType, conn, json}) + closeStatus, closeReason, err := handleEventsSubscribeWebsocket(eventSubscribeArgs{ctx, logger, core.Events(), ns, pattern, conn, json}) if err != nil { closeStatus = websocket.CloseStatus(err) if closeStatus == -1 { diff --git a/http/events_test.go b/http/events_test.go index d3729781df4c..d3debcde6bab 100644 --- a/http/events_test.go +++ b/http/events_test.go @@ -2,12 +2,16 @@ package http import ( "context" + "encoding/json" + "fmt" + "net/http" "strings" "sync/atomic" "testing" "time" "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/api" "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/sdk/logical" "github.com/hashicorp/vault/vault" @@ -21,9 +25,18 @@ func TestEventsSubscribe(t *testing.T) { ln, addr := TestServer(t, core) defer ln.Close() + // unseal the core + keys, token := vault.TestCoreInit(t, core) + for _, key := range keys { + _, err := core.Unseal(key) + if err != nil { + t.Fatal(err) + } + } + stop := atomic.Bool{} - eventType := "abc" + const eventType = "abc" // send some events go func() { @@ -32,7 +45,10 @@ func TestEventsSubscribe(t *testing.T) { if err != nil { core.Logger().Info("Error generating UUID, exiting sender", "error", err) } - err = core.Events().SendInternal(namespace.RootContext(context.Background()), namespace.RootNamespace, nil, logical.EventType(eventType), &logical.EventData{ + pluginInfo := &logical.EventPluginInfo{ + MountPath: "secret", + } + err = core.Events().SendInternal(namespace.RootContext(context.Background()), namespace.RootNamespace, pluginInfo, logical.EventType(eventType), &logical.EventData{ Id: id, Metadata: nil, EntityIds: nil, @@ -49,21 +65,132 @@ func TestEventsSubscribe(t *testing.T) { stop.Store(true) }) - ctx, cancelFunc := context.WithTimeout(context.Background(), 5*time.Second) - t.Cleanup(cancelFunc) + ctx := context.Background() + wsAddr := strings.Replace(addr, "http", "ws", 1) + + testCases := []struct { + json bool + }{{true}, {false}} + + for _, testCase := range testCases { + url := fmt.Sprintf("%s/v1/sys/events/subscribe/%s?json=%v", wsAddr, eventType, testCase.json) + conn, _, err := websocket.Dial(ctx, url, &websocket.DialOptions{ + HTTPHeader: http.Header{"x-vault-token": []string{token}}, + }) + if err != nil { + t.Fatal(err) + } + t.Cleanup(func() { + conn.Close(websocket.StatusNormalClosure, "") + }) + + _, msg, err := conn.Read(ctx) + if err != nil { + t.Fatal(err) + } + if testCase.json { + event := map[string]interface{}{} + err = json.Unmarshal(msg, &event) + if err != nil { + t.Fatal(err) + } + t.Log(string(msg)) + data := event["data"].(map[string]interface{}) + if actualType := data["event_type"].(string); actualType != eventType { + t.Fatalf("Expeced event type %s, got %s", eventType, actualType) + } + pluginInfo, ok := data["plugin_info"].(map[string]interface{}) + if !ok || pluginInfo == nil { + t.Fatalf("No plugin_info object: %v", data) + } + mountPath, ok := pluginInfo["mount_path"].(string) + if !ok || mountPath != "secret" { + t.Fatalf("Wrong mount_path: %v", data) + } + innerEvent := data["event"].(map[string]interface{}) + if innerEvent["id"].(string) != event["id"].(string) { + t.Fatalf("IDs don't match, expected %s, got %s", innerEvent["id"].(string), event["id"].(string)) + } + if innerEvent["note"].(string) != "testing" { + t.Fatalf("Expected 'testing', got %s", innerEvent["note"].(string)) + } + + checkRequiredCloudEventsFields(t, event) + } + } +} + +func checkRequiredCloudEventsFields(t *testing.T, event map[string]interface{}) { + t.Helper() + for _, attr := range []string{"id", "source", "specversion", "type"} { + if v, ok := event[attr]; !ok { + t.Errorf("Missing attribute %s", attr) + } else if str, ok := v.(string); !ok { + t.Errorf("Expected %s to be string but got %T", attr, v) + } else if str == "" { + t.Errorf("%s was empty string", attr) + } + } +} + +// TestEventsSubscribeAuth tests that unauthenticated and unauthorized subscriptions +// fail correctly. +func TestEventsSubscribeAuth(t *testing.T) { + core := vault.TestCore(t) + ln, addr := TestServer(t, core) + defer ln.Close() + + // unseal the core + keys, root := vault.TestCoreInit(t, core) + for _, key := range keys { + _, err := core.Unseal(key) + if err != nil { + t.Fatal(err) + } + } + var nonPrivilegedToken string + // Fetch a valid non privileged token. + { + config := api.DefaultConfig() + config.Address = addr + + client, err := api.NewClient(config) + if err != nil { + t.Fatal(err) + } + client.SetToken(root) + + secret, err := client.Auth().Token().Create(&api.TokenCreateRequest{Policies: []string{"default"}}) + if err != nil { + t.Fatal(err) + } + if secret.Auth.ClientToken == "" { + t.Fatal("Failed to fetch a non privileged token") + } + nonPrivilegedToken = secret.Auth.ClientToken + } + + ctx := context.Background() wsAddr := strings.Replace(addr, "http", "ws", 1) - conn, _, err := websocket.Dial(ctx, wsAddr+"/v1/sys/events/subscribe/"+eventType+"?json=true", nil) - if err != nil { - t.Fatal(err) + + // Get a 403 with no token. + _, resp, err := websocket.Dial(ctx, wsAddr+"/v1/sys/events/subscribe/abc", nil) + if err == nil { + t.Error("Expected websocket error but got none") + } + if resp == nil || resp.StatusCode != http.StatusForbidden { + t.Errorf("Expected 403 but got %+v", resp) } - _, msg, err := conn.Read(ctx) - if err != nil { - t.Fatal(err) + // Get a 403 with a non privileged token. + _, resp, err = websocket.Dial(ctx, wsAddr+"/v1/sys/events/subscribe/abc", &websocket.DialOptions{ + HTTPHeader: http.Header{"x-vault-token": []string{nonPrivilegedToken}}, + }) + if err == nil { + t.Error("Expected websocket error but got none") } - msgJson := strings.TrimSpace(string(msg)) - if !strings.HasPrefix(msgJson, "{") || !strings.HasSuffix(msgJson, "}") { - t.Errorf("Expected to get JSON event but got: %v", msgJson) + if resp == nil || resp.StatusCode != http.StatusForbidden { + t.Errorf("Expected 403 but got %+v", resp) } } diff --git a/http/handler.go b/http/handler.go index 601aa803779a..34c4b12cf19c 100644 --- a/http/handler.go +++ b/http/handler.go @@ -26,7 +26,6 @@ import ( "github.com/hashicorp/go-secure-stdlib/parseutil" "github.com/hashicorp/go-sockaddr" "github.com/hashicorp/go-uuid" - "github.com/hashicorp/vault/helper/experiments" "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/internalshared/configutil" "github.com/hashicorp/vault/sdk/helper/consts" @@ -177,10 +176,6 @@ func handler(props *vault.HandlerProperties) http.Handler { mux.Handle("/v1/sys/storage/raft/join", handleSysRaftJoin(core)) mux.Handle("/v1/sys/internal/ui/feature-flags", handleSysInternalFeatureFlags(core)) - if core.IsExperimentEnabled(experiments.VaultExperimentEventsAlpha1) { - mux.Handle("/v1/sys/events/subscribe/", handleEventsSubscribe(core)) - } - for _, path := range injectDataIntoTopRoutes { mux.Handle(path, handleRequestForwarding(core, handleLogicalWithInjector(core))) } diff --git a/http/logical.go b/http/logical.go index 6cdf6bb07110..6b12a26f3bfe 100644 --- a/http/logical.go +++ b/http/logical.go @@ -13,7 +13,8 @@ import ( "strings" "time" - uuid "github.com/hashicorp/go-uuid" + "github.com/hashicorp/go-uuid" + "github.com/hashicorp/vault/helper/experiments" "github.com/hashicorp/vault/helper/namespace" "github.com/hashicorp/vault/sdk/helper/consts" "github.com/hashicorp/vault/sdk/logical" @@ -346,6 +347,24 @@ func handleLogicalInternal(core *vault.Core, injectDataIntoTopLevel bool, noForw return } + // Websockets need to be handled at HTTP layer instead of logical requests. + if core.IsExperimentEnabled(experiments.VaultExperimentEventsAlpha1) { + ns, err := namespace.FromContext(r.Context()) + if err != nil { + respondError(w, http.StatusInternalServerError, err) + return + } + nsPath := ns.Path + if ns.ID == namespace.RootNamespaceID { + nsPath = "" + } + if strings.HasPrefix(r.URL.Path, fmt.Sprintf("/v1/%ssys/events/subscribe/", nsPath)) { + handler := handleEventsSubscribe(core, req) + handler.ServeHTTP(w, r) + return + } + } + // Make the internal request. We attach the connection info // as well in case this is an authentication request that requires // it. Vault core handles stripping this if we need to. This also diff --git a/http/sys_config_state_test.go b/http/sys_config_state_test.go index 08361af49339..5b5d9aac0dfa 100644 --- a/http/sys_config_state_test.go +++ b/http/sys_config_state_test.go @@ -6,70 +6,193 @@ import ( "testing" "github.com/go-test/deep" + "github.com/hashicorp/vault/command/server" + "github.com/hashicorp/vault/internalshared/configutil" "github.com/hashicorp/vault/vault" ) func TestSysConfigState_Sanitized(t *testing.T) { - var resp *http.Response + cases := []struct { + name string + storageConfig *server.Storage + haStorageConfig *server.Storage + expectedStorageOutput map[string]interface{} + expectedHAStorageOutput map[string]interface{} + }{ + { + name: "raft storage", + storageConfig: &server.Storage{ + Type: "raft", + RedirectAddr: "http://127.0.0.1:8200", + ClusterAddr: "http://127.0.0.1:8201", + DisableClustering: false, + Config: map[string]string{ + "path": "/storage/path/raft", + "node_id": "raft1", + "max_entry_size": "2097152", + }, + }, + haStorageConfig: nil, + expectedStorageOutput: map[string]interface{}{ + "type": "raft", + "redirect_addr": "http://127.0.0.1:8200", + "cluster_addr": "http://127.0.0.1:8201", + "disable_clustering": false, + "raft": map[string]interface{}{ + "max_entry_size": "2097152", + }, + }, + expectedHAStorageOutput: nil, + }, + { + name: "inmem storage, no HA storage", + storageConfig: &server.Storage{ + Type: "inmem", + RedirectAddr: "http://127.0.0.1:8200", + ClusterAddr: "http://127.0.0.1:8201", + DisableClustering: false, + }, + haStorageConfig: nil, + expectedStorageOutput: map[string]interface{}{ + "type": "inmem", + "redirect_addr": "http://127.0.0.1:8200", + "cluster_addr": "http://127.0.0.1:8201", + "disable_clustering": false, + }, + expectedHAStorageOutput: nil, + }, + { + name: "inmem storage, raft HA storage", + storageConfig: &server.Storage{ + Type: "inmem", + RedirectAddr: "http://127.0.0.1:8200", + ClusterAddr: "http://127.0.0.1:8201", + DisableClustering: false, + }, + haStorageConfig: &server.Storage{ + Type: "raft", + RedirectAddr: "http://127.0.0.1:8200", + ClusterAddr: "http://127.0.0.1:8201", + DisableClustering: false, + Config: map[string]string{ + "path": "/storage/path/raft", + "node_id": "raft1", + "max_entry_size": "2097152", + }, + }, + expectedStorageOutput: map[string]interface{}{ + "type": "inmem", + "redirect_addr": "http://127.0.0.1:8200", + "cluster_addr": "http://127.0.0.1:8201", + "disable_clustering": false, + }, + expectedHAStorageOutput: map[string]interface{}{ + "type": "raft", + "redirect_addr": "http://127.0.0.1:8200", + "cluster_addr": "http://127.0.0.1:8201", + "disable_clustering": false, + "raft": map[string]interface{}{ + "max_entry_size": "2097152", + }, + }, + }, + } - core, _, token := vault.TestCoreUnsealed(t) - ln, addr := TestServer(t, core) - defer ln.Close() - TestServerAuth(t, addr, token) + for _, tc := range cases { + tc := tc - resp = testHttpGet(t, token, addr+"/v1/sys/config/state/sanitized") - testResponseStatus(t, resp, 200) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - var actual map[string]interface{} - var expected map[string]interface{} + var resp *http.Response + confRaw := &server.Config{ + Storage: tc.storageConfig, + HAStorage: tc.haStorageConfig, + SharedConfig: &configutil.SharedConfig{ + Listeners: []*configutil.Listener{ + { + Type: "tcp", + Address: "127.0.0.1", + }, + }, + }, + } - configResp := map[string]interface{}{ - "api_addr": "", - "cache_size": json.Number("0"), - "cluster_addr": "", - "cluster_cipher_suites": "", - "cluster_name": "", - "default_lease_ttl": json.Number("0"), - "default_max_request_duration": json.Number("0"), - "disable_cache": false, - "disable_clustering": false, - "disable_indexing": false, - "disable_mlock": false, - "disable_performance_standby": false, - "disable_printable_check": false, - "disable_sealwrap": false, - "experiments": nil, - "raw_storage_endpoint": false, - "detect_deadlocks": "", - "introspection_endpoint": false, - "disable_sentinel_trace": false, - "enable_ui": false, - "log_format": "", - "log_level": "", - "max_lease_ttl": json.Number("0"), - "pid_file": "", - "plugin_directory": "", - "plugin_file_uid": json.Number("0"), - "plugin_file_permissions": json.Number("0"), - "enable_response_header_hostname": false, - "enable_response_header_raft_node_id": false, - "log_requests_level": "", - } + conf := &vault.CoreConfig{ + RawConfig: confRaw, + } - expected = map[string]interface{}{ - "lease_id": "", - "renewable": false, - "lease_duration": json.Number("0"), - "wrap_info": nil, - "warnings": nil, - "auth": nil, - "data": configResp, - } + core, _, token := vault.TestCoreUnsealedWithConfig(t, conf) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp = testHttpGet(t, token, addr+"/v1/sys/config/state/sanitized") + testResponseStatus(t, resp, 200) + + var actual map[string]interface{} + var expected map[string]interface{} + + configResp := map[string]interface{}{ + "api_addr": "", + "cache_size": json.Number("0"), + "cluster_addr": "", + "cluster_cipher_suites": "", + "cluster_name": "", + "default_lease_ttl": json.Number("0"), + "default_max_request_duration": json.Number("0"), + "disable_cache": false, + "disable_clustering": false, + "disable_indexing": false, + "disable_mlock": false, + "disable_performance_standby": false, + "disable_printable_check": false, + "disable_sealwrap": false, + "experiments": nil, + "raw_storage_endpoint": false, + "detect_deadlocks": "", + "introspection_endpoint": false, + "disable_sentinel_trace": false, + "enable_ui": false, + "log_format": "", + "log_level": "", + "max_lease_ttl": json.Number("0"), + "pid_file": "", + "plugin_directory": "", + "plugin_file_uid": json.Number("0"), + "plugin_file_permissions": json.Number("0"), + "enable_response_header_hostname": false, + "enable_response_header_raft_node_id": false, + "log_requests_level": "", + "listeners": []interface{}{ + map[string]interface{}{ + "config": nil, + "type": "tcp", + }, + }, + "storage": tc.expectedStorageOutput, + } + + if tc.expectedHAStorageOutput != nil { + configResp["ha_storage"] = tc.expectedHAStorageOutput + } + + expected = map[string]interface{}{ + "lease_id": "", + "renewable": false, + "lease_duration": json.Number("0"), + "wrap_info": nil, + "warnings": nil, + "auth": nil, + "data": configResp, + } - testResponseBody(t, resp, &actual) - expected["request_id"] = actual["request_id"] + testResponseBody(t, resp, &actual) + expected["request_id"] = actual["request_id"] - if diff := deep.Equal(actual, expected); len(diff) > 0 { - t.Fatalf("bad mismatch response body: diff: %v", diff) + if diff := deep.Equal(actual, expected); len(diff) > 0 { + t.Fatalf("bad mismatch response body: diff: %v", diff) + } + }) } } diff --git a/http/sys_mount_test.go b/http/sys_mount_test.go index ae33a258164b..5675cce5e305 100644 --- a/http/sys_mount_test.go +++ b/http/sys_mount_test.go @@ -416,6 +416,72 @@ func TestSysMount_put(t *testing.T) { // for more info. } +// TestSysRemountSpacesFrom ensure we succeed in a remount where the 'from' mount has spaces in the name +func TestSysRemountSpacesFrom(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpPost(t, token, addr+"/v1/sys/mounts/foo%20bar", map[string]interface{}{ + "type": "kv", + "description": "foo", + }) + testResponseStatus(t, resp, 204) + + resp = testHttpPost(t, token, addr+"/v1/sys/remount", map[string]interface{}{ + "from": "foo bar", + "to": "baz", + }) + testResponseStatus(t, resp, 200) +} + +// TestSysRemountSpacesTo ensure we succeed in a remount where the 'to' mount has spaces in the name +func TestSysRemountSpacesTo(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpPost(t, token, addr+"/v1/sys/mounts/foo%20bar", map[string]interface{}{ + "type": "kv", + "description": "foo", + }) + testResponseStatus(t, resp, 204) + + resp = testHttpPost(t, token, addr+"/v1/sys/remount", map[string]interface{}{ + "from": "foo bar", + "to": "bar baz", + }) + testResponseStatus(t, resp, 200) +} + +// TestSysRemountTrailingSpaces ensures we fail on trailing spaces +func TestSysRemountTrailingSpaces(t *testing.T) { + core, _, token := vault.TestCoreUnsealed(t) + ln, addr := TestServer(t, core) + defer ln.Close() + TestServerAuth(t, addr, token) + + resp := testHttpPost(t, token, addr+"/v1/sys/mounts/foo%20bar", map[string]interface{}{ + "type": "kv", + "description": "foo", + }) + testResponseStatus(t, resp, 204) + + resp = testHttpPost(t, token, addr+"/v1/sys/remount", map[string]interface{}{ + "from": "foo bar", + "to": " baz ", + }) + testResponseStatus(t, resp, 400) + + resp = testHttpPost(t, token, addr+"/v1/sys/remount", map[string]interface{}{ + "from": " foo bar ", + "to": "baz", + }) + testResponseStatus(t, resp, 400) +} + func TestSysRemount(t *testing.T) { core, _, token := vault.TestCoreUnsealed(t) ln, addr := TestServer(t, core) diff --git a/physical/mssql/mssql.go b/physical/mssql/mssql.go index 65c85ae3e454..045b40638731 100644 --- a/physical/mssql/mssql.go +++ b/physical/mssql/mssql.go @@ -4,6 +4,7 @@ import ( "context" "database/sql" "fmt" + "regexp" "sort" "strconv" "strings" @@ -18,6 +19,7 @@ import ( // Verify MSSQLBackend satisfies the correct interfaces var _ physical.Backend = (*MSSQLBackend)(nil) +var identifierRegex = regexp.MustCompile(`^[\p{L}_][\p{L}\p{Nd}@#$_]*$`) type MSSQLBackend struct { dbTable string @@ -27,6 +29,13 @@ type MSSQLBackend struct { permitPool *physical.PermitPool } +func isInvalidIdentifier(name string) bool { + if !identifierRegex.MatchString(name) { + return true + } + return false +} + func NewMSSQLBackend(conf map[string]string, logger log.Logger) (physical.Backend, error) { username, ok := conf["username"] if !ok { @@ -68,11 +77,19 @@ func NewMSSQLBackend(conf map[string]string, logger log.Logger) (physical.Backen database = "Vault" } + if isInvalidIdentifier(database) { + return nil, fmt.Errorf("invalid database name") + } + table, ok := conf["table"] if !ok { table = "Vault" } + if isInvalidIdentifier(table) { + return nil, fmt.Errorf("invalid table name") + } + appname, ok := conf["appname"] if !ok { appname = "Vault" @@ -93,6 +110,10 @@ func NewMSSQLBackend(conf map[string]string, logger log.Logger) (physical.Backen schema = "dbo" } + if isInvalidIdentifier(schema) { + return nil, fmt.Errorf("invalid schema name") + } + connectionString := fmt.Sprintf("server=%s;app name=%s;connection timeout=%s;log=%s", server, appname, connectionTimeout, logLevel) if username != "" { connectionString += ";user id=" + username @@ -113,18 +134,17 @@ func NewMSSQLBackend(conf map[string]string, logger log.Logger) (physical.Backen db.SetMaxOpenConns(maxParInt) - if _, err := db.Exec("IF NOT EXISTS(SELECT * FROM sys.databases WHERE name = '" + database + "') CREATE DATABASE " + database); err != nil { + if _, err := db.Exec("IF NOT EXISTS(SELECT * FROM sys.databases WHERE name = ?) CREATE DATABASE "+database, database); err != nil { return nil, fmt.Errorf("failed to create mssql database: %w", err) } dbTable := database + "." + schema + "." + table - createQuery := "IF NOT EXISTS(SELECT 1 FROM " + database + ".INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE='BASE TABLE' AND TABLE_NAME='" + table + "' AND TABLE_SCHEMA='" + schema + - "') CREATE TABLE " + dbTable + " (Path VARCHAR(512) PRIMARY KEY, Value VARBINARY(MAX))" + createQuery := "IF NOT EXISTS(SELECT 1 FROM " + database + ".INFORMATION_SCHEMA.TABLES WHERE TABLE_TYPE='BASE TABLE' AND TABLE_NAME=? AND TABLE_SCHEMA=?) CREATE TABLE " + dbTable + " (Path VARCHAR(512) PRIMARY KEY, Value VARBINARY(MAX))" if schema != "dbo" { var num int - err = db.QueryRow("SELECT 1 FROM " + database + ".sys.schemas WHERE name = '" + schema + "'").Scan(&num) + err = db.QueryRow("SELECT 1 FROM "+database+".sys.schemas WHERE name = ?", schema).Scan(&num) switch { case err == sql.ErrNoRows: @@ -137,7 +157,7 @@ func NewMSSQLBackend(conf map[string]string, logger log.Logger) (physical.Backen } } - if _, err := db.Exec(createQuery); err != nil { + if _, err := db.Exec(createQuery, table, schema); err != nil { return nil, fmt.Errorf("failed to create mssql table: %w", err) } diff --git a/physical/mssql/mssql_test.go b/physical/mssql/mssql_test.go index f447b0355eb0..e026ff6fa2fd 100644 --- a/physical/mssql/mssql_test.go +++ b/physical/mssql/mssql_test.go @@ -4,13 +4,53 @@ import ( "os" "testing" + _ "github.com/denisenkom/go-mssqldb" log "github.com/hashicorp/go-hclog" "github.com/hashicorp/vault/sdk/helper/logging" "github.com/hashicorp/vault/sdk/physical" - - _ "github.com/denisenkom/go-mssqldb" ) +// TestInvalidIdentifier checks validity of an identifier +func TestInvalidIdentifier(t *testing.T) { + testcases := map[string]bool{ + "name": true, + "_name": true, + "Name": true, + "#name": false, + "?Name": false, + "9name": false, + "@name": false, + "$name": false, + " name": false, + "n ame": false, + "n4444444": true, + "_4321098765": true, + "_##$$@@__": true, + "_123name#@": true, + "name!": false, + "name%": false, + "name^": false, + "name&": false, + "name*": false, + "name(": false, + "name)": false, + "nåame": true, + "åname": true, + "name'": false, + "nam`e": false, + "пример": true, + "_#Āā@#$_ĂĄąćĈĉĊċ": true, + "ÛÜÝÞßàáâ": true, + "豈更滑a23$#@": true, + } + + for i, expected := range testcases { + if !isInvalidIdentifier(i) != expected { + t.Fatalf("unexpected identifier %s: expected validity %v", i, expected) + } + } +} + func TestMSSQLBackend(t *testing.T) { server := os.Getenv("MSSQL_SERVER") if server == "" { diff --git a/scripts/ci-helper.sh b/scripts/ci-helper.sh index 585f89786cf7..ba855fc20bc6 100755 --- a/scripts/ci-helper.sh +++ b/scripts/ci-helper.sh @@ -156,9 +156,10 @@ function build() { # Build our ldflags msg="--> Building Vault v$version, revision $revision, built $build_date" - # Strip the symbol and dwarf information by default + # Keep the symbol and dwarf information by default + # TODO: maybe add REMOVE_SYMBOLS? if [ -n "$KEEP_SYMBOLS" ]; then - ldflags="" + ldflags="-s -w " else ldflags="-s -w " fi diff --git a/sdk/framework/openapi.go b/sdk/framework/openapi.go index 57906672827a..d69e0b83e69d 100644 --- a/sdk/framework/openapi.go +++ b/sdk/framework/openapi.go @@ -547,14 +547,55 @@ func constructRequestResponseName(path, prefix, suffix string) string { return b.String() } +// specialPathMatch checks whether the given path matches one of the special +// paths, taking into account * and + wildcards (e.g. foo/+/bar/*) func specialPathMatch(path string, specialPaths []string) bool { - // Test for exact or prefix match of special paths. + // pathMatchesByParts determines if the path matches the special path's + // pattern, accounting for the '+' and '*' wildcards + pathMatchesByParts := func(pathParts []string, specialPathParts []string) bool { + if len(pathParts) < len(specialPathParts) { + return false + } + for i := 0; i < len(specialPathParts); i++ { + var ( + part = pathParts[i] + pattern = specialPathParts[i] + ) + if pattern == "+" { + continue + } + if pattern == "*" { + return true + } + if strings.HasSuffix(pattern, "*") && strings.HasPrefix(part, pattern[0:len(pattern)-1]) { + return true + } + if pattern != part { + return false + } + } + return len(pathParts) == len(specialPathParts) + } + + pathParts := strings.Split(path, "/") + for _, sp := range specialPaths { - if sp == path || - (strings.HasSuffix(sp, "*") && strings.HasPrefix(path, sp[0:len(sp)-1])) { + // exact match + if sp == path { + return true + } + + // match * + if strings.HasSuffix(sp, "*") && strings.HasPrefix(path, sp[0:len(sp)-1]) { + return true + } + + // match + + if strings.Contains(sp, "+") && pathMatchesByParts(pathParts, strings.Split(sp, "/")) { return true } } + return false } diff --git a/sdk/framework/openapi_test.go b/sdk/framework/openapi_test.go index 3d5789e9dca9..50f9bba17028 100644 --- a/sdk/framework/openapi_test.go +++ b/sdk/framework/openapi_test.go @@ -223,42 +223,123 @@ func TestOpenAPI_SplitFields(t *testing.T) { } func TestOpenAPI_SpecialPaths(t *testing.T) { - tests := []struct { - pattern string - rootPaths []string - root bool - unauthPaths []string - unauth bool + tests := map[string]struct { + pattern string + rootPaths []string + rootExpected bool + unauthenticatedPaths []string + unauthenticatedExpected bool }{ - {"foo", []string{}, false, []string{"foo"}, true}, - {"foo", []string{"foo"}, true, []string{"bar"}, false}, - {"foo/bar", []string{"foo"}, false, []string{"foo/*"}, true}, - {"foo/bar", []string{"foo/*"}, true, []string{"foo"}, false}, - {"foo/", []string{"foo/*"}, true, []string{"a", "b", "foo/"}, true}, - {"foo", []string{"foo*"}, true, []string{"a", "fo*"}, true}, - {"foo/bar", []string{"a", "b", "foo/*"}, true, []string{"foo/baz/*"}, false}, + "empty": { + pattern: "foo", + rootPaths: []string{}, + rootExpected: false, + unauthenticatedPaths: []string{}, + unauthenticatedExpected: false, + }, + "exact-match-unauthenticated": { + pattern: "foo", + rootPaths: []string{}, + rootExpected: false, + unauthenticatedPaths: []string{"foo"}, + unauthenticatedExpected: true, + }, + "exact-match-root": { + pattern: "foo", + rootPaths: []string{"foo"}, + rootExpected: true, + unauthenticatedPaths: []string{"bar"}, + unauthenticatedExpected: false, + }, + "asterisk-match-unauthenticated": { + pattern: "foo/bar", + rootPaths: []string{"foo"}, + rootExpected: false, + unauthenticatedPaths: []string{"foo/*"}, + unauthenticatedExpected: true, + }, + "asterisk-match-root": { + pattern: "foo/bar", + rootPaths: []string{"foo/*"}, + rootExpected: true, + unauthenticatedPaths: []string{"foo"}, + unauthenticatedExpected: false, + }, + "path-ends-with-slash": { + pattern: "foo/", + rootPaths: []string{"foo/*"}, + rootExpected: true, + unauthenticatedPaths: []string{"a", "b", "foo*"}, + unauthenticatedExpected: true, + }, + "asterisk-match-no-slash": { + pattern: "foo", + rootPaths: []string{"foo*"}, + rootExpected: true, + unauthenticatedPaths: []string{"a", "fo*"}, + unauthenticatedExpected: true, + }, + "multiple-root-paths": { + pattern: "foo/bar", + rootPaths: []string{"a", "b", "foo/*"}, + rootExpected: true, + unauthenticatedPaths: []string{"foo/baz/*"}, + unauthenticatedExpected: false, + }, + "plus-match-unauthenticated": { + pattern: "foo/bar/baz", + rootPaths: []string{"foo/bar"}, + rootExpected: false, + unauthenticatedPaths: []string{"foo/+/baz"}, + unauthenticatedExpected: true, + }, + "plus-match-root": { + pattern: "foo/bar/baz", + rootPaths: []string{"foo/+/baz"}, + rootExpected: true, + unauthenticatedPaths: []string{"foo/bar"}, + unauthenticatedExpected: false, + }, + "plus-and-asterisk": { + pattern: "foo/bar/baz/something", + rootPaths: []string{"foo/+/baz/*"}, + rootExpected: true, + unauthenticatedPaths: []string{"foo/+/baz*"}, + unauthenticatedExpected: true, + }, + "double-plus-good": { + pattern: "foo/bar/baz", + rootPaths: []string{"foo/+/+"}, + rootExpected: true, + unauthenticatedPaths: []string{"foo/bar"}, + unauthenticatedExpected: false, + }, } - for i, test := range tests { - doc := NewOASDocument("version") - path := Path{ - Pattern: test.pattern, - } - sp := &logical.Paths{ - Root: test.rootPaths, - Unauthenticated: test.unauthPaths, - } - err := documentPath(&path, sp, "kv", logical.TypeLogical, doc) - if err != nil { - t.Fatal(err) - } - result := test.root - if doc.Paths["/"+test.pattern].Sudo != result { - t.Fatalf("Test (root) %d: Expected %v got %v", i, test.root, result) - } - result = test.unauth - if doc.Paths["/"+test.pattern].Unauthenticated != result { - t.Fatalf("Test (unauth) %d: Expected %v got %v", i, test.unauth, result) - } + for name, test := range tests { + t.Run(name, func(t *testing.T) { + doc := NewOASDocument("version") + path := Path{ + Pattern: test.pattern, + } + specialPaths := &logical.Paths{ + Root: test.rootPaths, + Unauthenticated: test.unauthenticatedPaths, + } + + if err := documentPath(&path, specialPaths, "kv", logical.TypeLogical, doc); err != nil { + t.Fatal(err) + } + + actual := doc.Paths["/"+test.pattern].Sudo + if actual != test.rootExpected { + t.Fatalf("Test (root): expected: %v; got: %v", test.rootExpected, actual) + } + + actual = doc.Paths["/"+test.pattern].Unauthenticated + if actual != test.unauthenticatedExpected { + t.Fatalf("Test (unauth): expected: %v; got: %v", test.unauthenticatedExpected, actual) + } + }) } } diff --git a/sdk/go.mod b/sdk/go.mod index 5097dc4bf771..a51b3f919be3 100644 --- a/sdk/go.mod +++ b/sdk/go.mod @@ -15,7 +15,7 @@ require ( github.com/hashicorp/go-hclog v0.16.2 github.com/hashicorp/go-immutable-radix v1.3.1 github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0 - github.com/hashicorp/go-kms-wrapping/v2 v2.0.7 + github.com/hashicorp/go-kms-wrapping/v2 v2.0.8 github.com/hashicorp/go-multierror v1.1.1 github.com/hashicorp/go-plugin v1.4.5 github.com/hashicorp/go-retryablehttp v0.5.3 @@ -37,8 +37,8 @@ require ( github.com/ryanuber/go-glob v1.0.0 github.com/stretchr/testify v1.7.0 go.uber.org/atomic v1.9.0 - golang.org/x/crypto v0.5.0 - golang.org/x/text v0.6.0 + golang.org/x/crypto v0.6.0 + golang.org/x/text v0.7.0 google.golang.org/grpc v1.41.0 google.golang.org/protobuf v1.27.1 ) @@ -59,9 +59,9 @@ require ( github.com/pmezard/go-difflib v1.0.0 // indirect github.com/rogpeppe/go-internal v1.9.0 // indirect github.com/stretchr/objx v0.1.1 // indirect - golang.org/x/net v0.5.0 // indirect - golang.org/x/sys v0.4.0 // indirect - golang.org/x/term v0.4.0 // indirect + golang.org/x/net v0.7.0 // indirect + golang.org/x/sys v0.5.0 // indirect + golang.org/x/term v0.5.0 // indirect google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 // indirect gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c // indirect ) diff --git a/sdk/go.sum b/sdk/go.sum index 0dd2bfc05e8a..b33acfc9cfe3 100644 --- a/sdk/go.sum +++ b/sdk/go.sum @@ -96,8 +96,8 @@ github.com/hashicorp/go-immutable-radix v1.3.1 h1:DKHmCUm2hRBK510BaiZlwvpD40f8bJ github.com/hashicorp/go-immutable-radix v1.3.1/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0 h1:pSjQfW3vPtrOTcasTUKgCTQT7OGPPTTMVRrOfU6FJD8= github.com/hashicorp/go-kms-wrapping/entropy/v2 v2.0.0/go.mod h1:xvb32K2keAc+R8DSFG2IwDcydK9DBQE+fGA5fsw6hSk= -github.com/hashicorp/go-kms-wrapping/v2 v2.0.7 h1:P+dh3M6k5aNl2wXrA9s6zquMHWPaYIkotCffiMIYt6U= -github.com/hashicorp/go-kms-wrapping/v2 v2.0.7/go.mod h1:sDQAfwJGv25uGPZA04x87ERglCG6avnRcBT9wYoMII8= +github.com/hashicorp/go-kms-wrapping/v2 v2.0.8 h1:9Q2lu1YbbmiAgvYZ7Pr31RdlVonUpX+mmDL7Z7qTA2U= +github.com/hashicorp/go-kms-wrapping/v2 v2.0.8/go.mod h1:qTCjxGig/kjuj3hk1z8pOUrzbse/GxB1tGfbrq8tGJg= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= github.com/hashicorp/go-multierror v1.1.1 h1:H5DkEtf6CXdFp0N0Em5UCwQpXMWke8IA0+lD48awMYo= github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= @@ -223,8 +223,8 @@ golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnf golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= -golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= +golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= +golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -239,8 +239,8 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= +golang.org/x/net v0.7.0 h1:rJrUqqhjsgNp7KqAIc25s9pZnjU7TUcSY7HcVZjdn1g= +golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -264,15 +264,15 @@ golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU= +golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.4.0 h1:O7UWfv5+A2qiuulQk30kVinPoMtoIPeVaKLEgLpVkvg= -golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= +golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY= +golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo= +golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= diff --git a/sdk/helper/ldaputil/client.go b/sdk/helper/ldaputil/client.go index 8a7ac4822c34..0633cfe21c5e 100644 --- a/sdk/helper/ldaputil/client.go +++ b/sdk/helper/ldaputil/client.go @@ -28,6 +28,7 @@ func (c *Client) DialLDAP(cfg *ConfigEntry) (Connection, error) { var retErr *multierror.Error var conn Connection urls := strings.Split(cfg.Url, ",") + for _, uut := range urls { u, err := url.Parse(uut) if err != nil { @@ -40,12 +41,20 @@ func (c *Client) DialLDAP(cfg *ConfigEntry) (Connection, error) { } var tlsConfig *tls.Config + dialer := net.Dialer{ + Timeout: time.Duration(cfg.ConnectionTimeout) * time.Second, + } + switch u.Scheme { case "ldap": if port == "" { port = "389" } - conn, err = c.LDAP.Dial("tcp", net.JoinHostPort(host, port)) + + fullAddr := fmt.Sprintf("%s://%s", u.Scheme, net.JoinHostPort(host, port)) + opt := ldap.DialWithDialer(&dialer) + + conn, err = c.LDAP.DialURL(fullAddr, opt) if err != nil { break } @@ -68,7 +77,15 @@ func (c *Client) DialLDAP(cfg *ConfigEntry) (Connection, error) { if err != nil { break } - conn, err = c.LDAP.DialTLS("tcp", net.JoinHostPort(host, port), tlsConfig) + + fullAddr := fmt.Sprintf("%s://%s", u.Scheme, net.JoinHostPort(host, port)) + opt := ldap.DialWithDialer(&dialer) + tls := ldap.DialWithTLSConfig(tlsConfig) + + conn, err = c.LDAP.DialURL(fullAddr, opt, tls) + if err != nil { + break + } default: retErr = multierror.Append(retErr, fmt.Errorf("invalid LDAP scheme in url %q", net.JoinHostPort(host, port))) continue @@ -400,7 +417,7 @@ func (c *Client) performLdapFilterGroupsSearchPaging(cfg *ConfigEntry, conn Pagi cfg.GroupAttr, }, SizeLimit: math.MaxInt32, - }, math.MaxInt32) + }, uint32(cfg.MaximumPageSize)) if err != nil { return nil, fmt.Errorf("LDAP search failed: %w", err) } @@ -519,7 +536,7 @@ func (c *Client) GetLdapGroups(cfg *ConfigEntry, conn Connection, userDN string, if cfg.UseTokenGroups { entries, err = c.performLdapTokenGroupsSearch(cfg, conn, userDN) } else { - if paging, ok := conn.(PagingConnection); ok { + if paging, ok := conn.(PagingConnection); ok && cfg.MaximumPageSize >= 0 { entries, err = c.performLdapFilterGroupsSearchPaging(cfg, paging, userDN, username) } else { entries, err = c.performLdapFilterGroupsSearch(cfg, conn, userDN, username) diff --git a/sdk/helper/ldaputil/config.go b/sdk/helper/ldaputil/config.go index 43844da22b13..80d5c55ea6e7 100644 --- a/sdk/helper/ldaputil/config.go +++ b/sdk/helper/ldaputil/config.go @@ -6,6 +6,7 @@ import ( "encoding/pem" "errors" "fmt" + "math" "strings" "text/template" @@ -226,6 +227,18 @@ Default: ({{.UserAttr}}={{.Username}})`, Description: "Timeout, in seconds, for the connection when making requests against the server before returning back an error.", Default: "90s", }, + + "connection_timeout": { + Type: framework.TypeDurationSecond, + Description: "Timeout, in seconds, when attempting to connect to the LDAP server before trying the next URL in the configuration.", + Default: "30s", + }, + + "max_page_size": { + Type: framework.TypeInt, + Description: "The maximum number of results to return for a single paged query. If not set, the server default will be used for paged searches. A requested max_page_size of 0 is interpreted as no limit by LDAP servers. If set to a negative value, search requests will not be paged.", + Default: math.MaxInt32, + }, } } @@ -392,6 +405,14 @@ func NewConfigEntry(existing *ConfigEntry, d *framework.FieldData) (*ConfigEntry cfg.RequestTimeout = d.Get("request_timeout").(int) } + if _, ok := d.Raw["connection_timeout"]; ok || !hadExisting { + cfg.ConnectionTimeout = d.Get("connection_timeout").(int) + } + + if _, ok := d.Raw["max_page_size"]; ok || !hadExisting { + cfg.MaximumPageSize = d.Get("max_page_size").(int) + } + return cfg, nil } @@ -418,6 +439,8 @@ type ConfigEntry struct { UseTokenGroups bool `json:"use_token_groups"` UsePre111GroupCNBehavior *bool `json:"use_pre111_group_cn_behavior"` RequestTimeout int `json:"request_timeout"` + ConnectionTimeout int `json:"connection_timeout"` + MaximumPageSize int `json:"max_page_size"` // These json tags deviate from snake case because there was a past issue // where the tag was being ignored, causing it to be jsonified as "CaseSensitiveNames", etc. @@ -455,7 +478,9 @@ func (c *ConfigEntry) PasswordlessMap() map[string]interface{} { "use_token_groups": c.UseTokenGroups, "anonymous_group_search": c.AnonymousGroupSearch, "request_timeout": c.RequestTimeout, + "connection_timeout": c.ConnectionTimeout, "username_as_alias": c.UsernameAsAlias, + "max_page_size": c.MaximumPageSize, } if c.CaseSensitiveNames != nil { m["case_sensitive_names"] = *c.CaseSensitiveNames diff --git a/sdk/helper/ldaputil/config_test.go b/sdk/helper/ldaputil/config_test.go index 32edb5dffaad..3a169ef81687 100644 --- a/sdk/helper/ldaputil/config_test.go +++ b/sdk/helper/ldaputil/config_test.go @@ -71,15 +71,16 @@ func testConfig(t *testing.T) *ConfigEntry { t.Helper() return &ConfigEntry{ - Url: "ldap://138.91.247.105", - UserDN: "example,com", - BindDN: "kitty", - BindPassword: "cats", - TLSMaxVersion: "tls12", - TLSMinVersion: "tls12", - RequestTimeout: 30, - ClientTLSCert: "", - ClientTLSKey: "", + Url: "ldap://138.91.247.105", + UserDN: "example,com", + BindDN: "kitty", + BindPassword: "cats", + TLSMaxVersion: "tls12", + TLSMinVersion: "tls12", + RequestTimeout: 30, + ConnectionTimeout: 15, + ClientTLSCert: "", + ClientTLSKey: "", } } @@ -138,6 +139,7 @@ var jsonConfig = []byte(`{ "tls_max_version": "tls12", "tls_min_version": "tls12", "request_timeout": 30, + "connection_timeout": 15, "ClientTLSCert": "", "ClientTLSKey": "" }`) @@ -168,6 +170,8 @@ var jsonConfigDefault = []byte(` "use_pre111_group_cn_behavior": null, "username_as_alias": false, "request_timeout": 90, + "connection_timeout": 30, + "max_page_size": 2147483647, "CaseSensitiveNames": false, "ClientTLSCert": "", "ClientTLSKey": "" diff --git a/sdk/helper/ldaputil/ldap.go b/sdk/helper/ldaputil/ldap.go index 82ace01773cc..73e36b230dc0 100644 --- a/sdk/helper/ldaputil/ldap.go +++ b/sdk/helper/ldaputil/ldap.go @@ -1,8 +1,6 @@ package ldaputil import ( - "crypto/tls" - "github.com/go-ldap/ldap/v3" ) @@ -13,16 +11,11 @@ func NewLDAP() LDAP { // LDAP provides ldap functionality, but through an interface // rather than statically. This allows faking it for tests. type LDAP interface { - Dial(network, addr string) (Connection, error) - DialTLS(network, addr string, config *tls.Config) (Connection, error) + DialURL(addr string, opts ...ldap.DialOpt) (Connection, error) } type ldapIfc struct{} -func (l *ldapIfc) Dial(network, addr string) (Connection, error) { - return ldap.Dial(network, addr) -} - -func (l *ldapIfc) DialTLS(network, addr string, config *tls.Config) (Connection, error) { - return ldap.DialTLS(network, addr, config) +func (l *ldapIfc) DialURL(addr string, opts ...ldap.DialOpt) (Connection, error) { + return ldap.DialURL(addr, opts...) } diff --git a/sdk/helper/ocsp/client.go b/sdk/helper/ocsp/client.go index e54fdeface46..c1b9c2fbc37a 100644 --- a/sdk/helper/ocsp/client.go +++ b/sdk/helper/ocsp/client.go @@ -24,6 +24,7 @@ import ( "time" "github.com/hashicorp/go-hclog" + "github.com/hashicorp/go-multierror" "github.com/hashicorp/go-retryablehttp" lru "github.com/hashicorp/golang-lru" "github.com/hashicorp/vault/sdk/helper/certutil" @@ -283,12 +284,8 @@ func (c *Client) retryOCSP( headers map[string]string, reqBody []byte, issuer *x509.Certificate, -) (ocspRes *ocsp.Response, ocspResBytes []byte, ocspS *ocspStatus, err error) { - origHost := *ocspHost +) (ocspRes *ocsp.Response, ocspResBytes []byte, ocspS *ocspStatus, retErr error) { doRequest := func(request *retryablehttp.Request) (*http.Response, error) { - if err != nil { - return nil, err - } if request != nil { request = request.WithContext(ctx) for k, v := range headers { @@ -303,43 +300,152 @@ func (c *Client) retryOCSP( return res, err } - ocspHost.Path = ocspHost.Path + "/" + base64.StdEncoding.EncodeToString(reqBody) - var res *http.Response - request, err := req("GET", ocspHost.String(), nil) - if err != nil { - return nil, nil, nil, err - } - if res, err = doRequest(request); err != nil { - return nil, nil, nil, err - } else { - defer res.Body.Close() - } - if res.StatusCode == http.StatusMethodNotAllowed { - request, err := req("POST", origHost.String(), bytes.NewBuffer(reqBody)) + for _, method := range []string{"GET", "POST"} { + reqUrl := *ocspHost + var body []byte + + switch method { + case "GET": + reqUrl.Path = reqUrl.Path + "/" + base64.StdEncoding.EncodeToString(reqBody) + case "POST": + body = reqBody + default: + // Programming error; all request/systems errors are multierror + // and appended. + return nil, nil, nil, fmt.Errorf("unknown request method: %v", method) + } + + var res *http.Response + request, err := req(method, reqUrl.String(), bytes.NewBuffer(body)) if err != nil { - return nil, nil, nil, err + err = fmt.Errorf("error creating %v request: %w", method, err) + retErr = multierror.Append(retErr, err) + continue } - if res, err := doRequest(request); err != nil { - return nil, nil, nil, err + if res, err = doRequest(request); err != nil { + err = fmt.Errorf("error doing %v request: %w", method, err) + retErr = multierror.Append(retErr, err) + continue } else { defer res.Body.Close() } + + if res.StatusCode != http.StatusOK { + err = fmt.Errorf("HTTP code is not OK on %v request. %v: %v", method, res.StatusCode, res.Status) + retErr = multierror.Append(retErr, err) + continue + } + + ocspResBytes, err = io.ReadAll(res.Body) + if err != nil { + err = fmt.Errorf("error reading %v request body: %w", method, err) + retErr = multierror.Append(retErr, err) + continue + } + + // Reading an OCSP response shouldn't be fatal. A misconfigured + // endpoint might return invalid results for e.g., GET but return + // valid results for POST on retry. This could happen if e.g., the + // server responds with JSON. + ocspRes, err = ocsp.ParseResponse(ocspResBytes /*issuer = */, nil /* !!unsafe!! */) + if err != nil { + err = fmt.Errorf("error parsing %v OCSP response: %w", method, err) + retErr = multierror.Append(retErr, err) + continue + } + + // Above, we use the unsafe issuer=nil parameter to ocsp.ParseResponse + // because Go's library does the wrong thing. + // + // Here, we lack a full chain, but we know we trust the parent issuer, + // so if the Go library incorrectly discards useful certificates, we + // likely cannot verify this without passing through the full chain + // back to the root. + // + // Instead, take one of two paths: 1. if there is no certificate in + // the ocspRes, verify the OCSP response directly with our trusted + // issuer certificate, or 2. if there is a certificate, either verify + // it directly matches our trusted issuer certificate, or verify it + // is signed by our trusted issuer certificate. + // + // See also: https://github.com/golang/go/issues/59641 + // + // This addresses the !!unsafe!! behavior above. + if ocspRes.Certificate == nil { + if err := ocspRes.CheckSignatureFrom(issuer); err != nil { + err = fmt.Errorf("error directly verifying signature on %v OCSP response: %w", method, err) + retErr = multierror.Append(retErr, err) + continue + } + } else { + // Because we have at least one certificate here, we know that + // Go's ocsp library verified the signature from this certificate + // onto the response and it was valid. Now we need to know we trust + // this certificate. There's two ways we can do this: + // + // 1. Via confirming issuer == ocspRes.Certificate, or + // 2. Via confirming ocspRes.Certificate.CheckSignatureFrom(issuer). + if !bytes.Equal(issuer.Raw, ocspRes.Raw) { + // 1 must not hold, so 2 holds; verify the signature. + if err := ocspRes.Certificate.CheckSignatureFrom(issuer); err != nil { + err = fmt.Errorf("error checking chain of trust on %v OCSP response via %v failed: %w", method, issuer.Subject.String(), err) + retErr = multierror.Append(retErr, err) + continue + } + + // Verify the OCSP responder certificate is still valid and + // contains the required EKU since it is a delegated OCSP + // responder certificate. + if ocspRes.Certificate.NotAfter.Before(time.Now()) { + err := fmt.Errorf("error checking delegated OCSP responder on %v OCSP response: certificate has expired", method) + retErr = multierror.Append(retErr, err) + continue + } + haveEKU := false + for _, ku := range ocspRes.Certificate.ExtKeyUsage { + if ku == x509.ExtKeyUsageOCSPSigning { + haveEKU = true + break + } + } + if !haveEKU { + err := fmt.Errorf("error checking delegated OCSP responder on %v OCSP response: certificate lacks the OCSP Signing EKU", method) + retErr = multierror.Append(retErr, err) + continue + } + } + } + + // While we haven't validated the signature on the OCSP response, we + // got what we presume is a definitive answer and simply changing + // methods will likely not help us in that regard. Use this status + // to return without retrying another method, when it looks definitive. + // + // We don't accept ocsp.Unknown here: presumably, we could've hit a CDN + // with static mapping of request->responses, with a default "unknown" + // handler for everything else. By retrying here, we use POST, which + // could hit a live OCSP server with fresher data than the cached CDN. + if ocspRes.Status == ocsp.Good || ocspRes.Status == ocsp.Revoked { + break + } + + // Here, we didn't have a valid response. Even though we didn't get an + // error, we should inform the user that this (valid-looking) response + // wasn't utilized. + err = fmt.Errorf("fetched %v OCSP response of status %v; wanted either good (%v) or revoked (%v)", method, ocspRes.Status, ocsp.Good, ocsp.Revoked) + retErr = multierror.Append(retErr, err) } - if res.StatusCode != http.StatusOK { - return nil, nil, nil, fmt.Errorf("HTTP code is not OK. %v: %v", res.StatusCode, res.Status) - } - ocspResBytes, err = io.ReadAll(res.Body) - if err != nil { - return nil, nil, nil, err - } - ocspRes, err = ocsp.ParseResponse(ocspResBytes, issuer) - if err != nil { - return nil, nil, nil, err + + if ocspRes != nil && ocspResBytes != nil { + // Clear retErr, because we have one parseable-but-maybe-not-quite-correct + // OCSP response. + retErr = nil + ocspS = &ocspStatus{ + code: ocspSuccess, + } } - return ocspRes, ocspResBytes, &ocspStatus{ - code: ocspSuccess, - }, nil + return } // GetRevocationStatus checks the certificate revocation status for subject using issuer certificate. diff --git a/sdk/helper/ocsp/ocsp_test.go b/sdk/helper/ocsp/ocsp_test.go index 2f3f1976d2a8..892391d29bf4 100644 --- a/sdk/helper/ocsp/ocsp_test.go +++ b/sdk/helper/ocsp/ocsp_test.go @@ -8,6 +8,7 @@ import ( "crypto" "crypto/tls" "crypto/x509" + "encoding/pem" "errors" "fmt" "io" @@ -18,9 +19,16 @@ import ( "testing" "time" + "github.com/hashicorp/vault/api" + "github.com/hashicorp/vault/builtin/logical/pki" + vaulthttp "github.com/hashicorp/vault/http" + "github.com/hashicorp/vault/sdk/logical" + "github.com/hashicorp/vault/vault" + "github.com/hashicorp/go-hclog" "github.com/hashicorp/go-retryablehttp" lru "github.com/hashicorp/golang-lru" + "github.com/stretchr/testify/require" "golang.org/x/crypto/ocsp" ) @@ -424,6 +432,165 @@ func TestCanEarlyExitForOCSP(t *testing.T) { } } +func TestWithVaultPKI(t *testing.T) { + t.Parallel() + + coreConfig := &vault.CoreConfig{ + LogicalBackends: map[string]logical.Factory{ + "pki": pki.Factory, + }, + } + cluster := vault.NewTestCluster(t, coreConfig, &vault.TestClusterOptions{ + HandlerFunc: vaulthttp.Handler, + }) + + cluster.Start() + defer cluster.Cleanup() + cores := cluster.Cores + vault.TestWaitActive(t, cores[0].Core) + client := cores[0].Client + + err := client.Sys().Mount("pki", &api.MountInput{ + Type: "pki", + Config: api.MountConfigInput{ + DefaultLeaseTTL: "16h", + MaxLeaseTTL: "32h", + }, + }) + require.NoError(t, err) + + resp, err := client.Logical().Write("pki/root/generate/internal", map[string]interface{}{ + "ttl": "40h", + "common_name": "Root R1", + "key_type": "ec", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["issuer_id"]) + rootIssuerId := resp.Data["issuer_id"].(string) + + // Set URLs pointing to the issuer. + _, err = client.Logical().Write("pki/config/cluster", map[string]interface{}{ + "path": client.Address() + "/v1/pki", + "aia_path": client.Address() + "/v1/pki", + }) + require.NoError(t, err) + + _, err = client.Logical().Write("pki/config/urls", map[string]interface{}{ + "enable_templating": true, + "crl_distribution_points": "{{cluster_aia_path}}/issuer/{{issuer_id}}/crl/der", + "issuing_certificates": "{{cluster_aia_path}}/issuer/{{issuer_id}}/der", + "ocsp_servers": "{{cluster_aia_path}}/ocsp", + }) + require.NoError(t, err) + + // Build an intermediate CA + resp, err = client.Logical().Write("pki/intermediate/generate/internal", map[string]interface{}{ + "common_name": "Int X1", + "key_type": "ec", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["csr"]) + intermediateCSR := resp.Data["csr"].(string) + + resp, err = client.Logical().Write("pki/root/sign-intermediate", map[string]interface{}{ + "csr": intermediateCSR, + "ttl": "20h", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["certificate"]) + intermediateCert := resp.Data["certificate"] + + resp, err = client.Logical().Write("pki/intermediate/set-signed", map[string]interface{}{ + "certificate": intermediateCert, + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["imported_issuers"]) + rawImportedIssuers := resp.Data["imported_issuers"].([]interface{}) + require.Equal(t, len(rawImportedIssuers), 1) + importedIssuer := rawImportedIssuers[0].(string) + require.NotEmpty(t, importedIssuer) + + // Set intermediate as default. + _, err = client.Logical().Write("pki/config/issuers", map[string]interface{}{ + "default": importedIssuer, + }) + require.NoError(t, err) + + // Setup roles for root, intermediate. + _, err = client.Logical().Write("pki/roles/example-root", map[string]interface{}{ + "allowed_domains": "example.com", + "allow_subdomains": "true", + "max_ttl": "1h", + "key_type": "ec", + "issuer_ref": rootIssuerId, + }) + require.NoError(t, err) + + _, err = client.Logical().Write("pki/roles/example-int", map[string]interface{}{ + "allowed_domains": "example.com", + "allow_subdomains": "true", + "max_ttl": "1h", + "key_type": "ec", + }) + require.NoError(t, err) + + // Issue certs and validate them against OCSP. + for _, path := range []string{"pki/issue/example-int", "pki/issue/example-root"} { + t.Logf("Validating against path: %v", path) + resp, err = client.Logical().Write(path, map[string]interface{}{ + "common_name": "test.example.com", + "ttl": "5m", + }) + require.NoError(t, err) + require.NotNil(t, resp) + require.NotNil(t, resp.Data) + require.NotEmpty(t, resp.Data["certificate"]) + require.NotEmpty(t, resp.Data["issuing_ca"]) + require.NotEmpty(t, resp.Data["serial_number"]) + + certPEM := resp.Data["certificate"].(string) + certBlock, _ := pem.Decode([]byte(certPEM)) + require.NotNil(t, certBlock) + cert, err := x509.ParseCertificate(certBlock.Bytes) + require.NoError(t, err) + require.NotNil(t, cert) + + issuerPEM := resp.Data["issuing_ca"].(string) + issuerBlock, _ := pem.Decode([]byte(issuerPEM)) + require.NotNil(t, issuerBlock) + issuer, err := x509.ParseCertificate(issuerBlock.Bytes) + require.NoError(t, err) + require.NotNil(t, issuer) + + serialNumber := resp.Data["serial_number"].(string) + + conf := &VerifyConfig{ + OcspFailureMode: FailOpenFalse, + ExtraCas: []*x509.Certificate{cluster.CACert}, + } + ocspClient := New(testLogFactory, 10) + + err = ocspClient.VerifyLeafCertificate(context.Background(), cert, issuer, conf) + require.NoError(t, err) + + _, err = client.Logical().Write("pki/revoke", map[string]interface{}{ + "serial_number": serialNumber, + }) + require.NoError(t, err) + + err = ocspClient.VerifyLeafCertificate(context.Background(), cert, issuer, conf) + require.Error(t, err) + } +} + var testLogger = hclog.New(hclog.DefaultOptions) func testLogFactory() hclog.Logger { diff --git a/sdk/logical/event.pb.go b/sdk/logical/event.pb.go index 4f26233a3727..1925c6ae9870 100644 --- a/sdk/logical/event.pb.go +++ b/sdk/logical/event.pb.go @@ -9,7 +9,7 @@ package logical import ( protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" - timestamppb "google.golang.org/protobuf/types/known/timestamppb" + structpb "google.golang.org/protobuf/types/known/structpb" reflect "reflect" sync "sync" ) @@ -130,7 +130,7 @@ type EventData struct { // as a hash of other fields with sufficient uniqueness. Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` // Arbitrary non-secret data. Optional. - Metadata []byte `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` + Metadata *structpb.Struct `protobuf:"bytes,2,opt,name=metadata,proto3" json:"metadata,omitempty"` // Any IDs that the event relates to, i.e., UUIDs, paths. EntityIds []string `protobuf:"bytes,3,rep,name=entity_ids,json=entityIds,proto3" json:"entity_ids,omitempty"` // Human-readable note. @@ -176,7 +176,7 @@ func (x *EventData) GetId() string { return "" } -func (x *EventData) GetMetadata() []byte { +func (x *EventData) GetMetadata() *structpb.Struct { if x != nil { return x.Metadata } @@ -206,10 +206,9 @@ type EventReceived struct { Event *EventData `protobuf:"bytes,1,opt,name=event,proto3" json:"event,omitempty"` // namespace path - Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` - EventType string `protobuf:"bytes,3,opt,name=event_type,json=eventType,proto3" json:"event_type,omitempty"` - PluginInfo *EventPluginInfo `protobuf:"bytes,4,opt,name=plugin_info,json=pluginInfo,proto3" json:"plugin_info,omitempty"` - Timestamp *timestamppb.Timestamp `protobuf:"bytes,5,opt,name=timestamp,proto3" json:"timestamp,omitempty"` + Namespace string `protobuf:"bytes,2,opt,name=namespace,proto3" json:"namespace,omitempty"` + EventType string `protobuf:"bytes,3,opt,name=event_type,json=eventType,proto3" json:"event_type,omitempty"` + PluginInfo *EventPluginInfo `protobuf:"bytes,4,opt,name=plugin_info,json=pluginInfo,proto3" json:"plugin_info,omitempty"` } func (x *EventReceived) Reset() { @@ -272,59 +271,49 @@ func (x *EventReceived) GetPluginInfo() *EventPluginInfo { return nil } -func (x *EventReceived) GetTimestamp() *timestamppb.Timestamp { - if x != nil { - return x.Timestamp - } - return nil -} - var File_sdk_logical_event_proto protoreflect.FileDescriptor var file_sdk_logical_event_proto_rawDesc = []byte{ 0x0a, 0x17, 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x07, 0x6c, 0x6f, 0x67, 0x69, 0x63, - 0x61, 0x6c, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, - 0x6f, 0x74, 0x6f, 0x22, 0xd1, 0x01, 0x0a, 0x0f, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x50, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x6f, 0x75, 0x6e, 0x74, - 0x5f, 0x63, 0x6c, 0x61, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6d, 0x6f, - 0x75, 0x6e, 0x74, 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x6f, 0x75, 0x6e, - 0x74, 0x5f, 0x61, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x0d, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, - 0x1d, 0x0a, 0x0a, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x09, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x50, 0x61, 0x74, 0x68, 0x12, 0x16, - 0x0a, 0x06, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, - 0x5f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, - 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, - 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x6a, 0x0a, 0x09, 0x45, 0x76, 0x65, 0x6e, 0x74, - 0x44, 0x61, 0x74, 0x61, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x02, 0x69, 0x64, 0x12, 0x1a, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, - 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, - 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x65, 0x6e, 0x74, 0x69, 0x74, 0x79, 0x49, 0x64, 0x73, 0x12, - 0x12, 0x0a, 0x04, 0x6e, 0x6f, 0x74, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, - 0x6f, 0x74, 0x65, 0x22, 0xeb, 0x01, 0x0a, 0x0d, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x63, - 0x65, 0x69, 0x76, 0x65, 0x64, 0x12, 0x28, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x45, - 0x76, 0x65, 0x6e, 0x74, 0x44, 0x61, 0x74, 0x61, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x12, - 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, - 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, - 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, - 0x09, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x0b, - 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x5f, 0x69, 0x6e, 0x66, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, - 0x0b, 0x32, 0x18, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, - 0x74, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x52, 0x0a, 0x70, 0x6c, 0x75, - 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, - 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x42, 0x28, 0x5a, 0x26, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, - 0x68, 0x61, 0x73, 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, - 0x73, 0x64, 0x6b, 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x33, + 0x61, 0x6c, 0x1a, 0x1c, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2f, 0x73, 0x74, 0x72, 0x75, 0x63, 0x74, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x22, 0xd1, 0x01, 0x0a, 0x0f, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, + 0x49, 0x6e, 0x66, 0x6f, 0x12, 0x1f, 0x0a, 0x0b, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x63, 0x6c, + 0x61, 0x73, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x6d, 0x6f, 0x75, 0x6e, 0x74, + 0x43, 0x6c, 0x61, 0x73, 0x73, 0x12, 0x25, 0x0a, 0x0e, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x61, + 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x6d, + 0x6f, 0x75, 0x6e, 0x74, 0x41, 0x63, 0x63, 0x65, 0x73, 0x73, 0x6f, 0x72, 0x12, 0x1d, 0x0a, 0x0a, + 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x70, 0x61, 0x74, 0x68, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, + 0x52, 0x09, 0x6d, 0x6f, 0x75, 0x6e, 0x74, 0x50, 0x61, 0x74, 0x68, 0x12, 0x16, 0x0a, 0x06, 0x70, + 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x5f, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0d, 0x70, 0x6c, 0x75, + 0x67, 0x69, 0x6e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x83, 0x01, 0x0a, 0x09, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x44, 0x61, + 0x74, 0x61, 0x12, 0x0e, 0x0a, 0x02, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x02, + 0x69, 0x64, 0x12, 0x33, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x0b, 0x32, 0x17, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x75, 0x63, 0x74, 0x52, 0x08, 0x6d, + 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x6e, 0x74, 0x69, 0x74, + 0x79, 0x5f, 0x69, 0x64, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x09, 0x52, 0x09, 0x65, 0x6e, 0x74, + 0x69, 0x74, 0x79, 0x49, 0x64, 0x73, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x6f, 0x74, 0x65, 0x18, 0x04, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x6f, 0x74, 0x65, 0x22, 0xb1, 0x01, 0x0a, 0x0d, 0x45, + 0x76, 0x65, 0x6e, 0x74, 0x52, 0x65, 0x63, 0x65, 0x69, 0x76, 0x65, 0x64, 0x12, 0x28, 0x0a, 0x05, + 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x12, 0x2e, 0x6c, 0x6f, + 0x67, 0x69, 0x63, 0x61, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x44, 0x61, 0x74, 0x61, 0x52, + 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x70, + 0x61, 0x63, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x6e, 0x61, 0x6d, 0x65, 0x73, + 0x70, 0x61, 0x63, 0x65, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, + 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x54, + 0x79, 0x70, 0x65, 0x12, 0x39, 0x0a, 0x0b, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x5f, 0x69, 0x6e, + 0x66, 0x6f, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x18, 0x2e, 0x6c, 0x6f, 0x67, 0x69, 0x63, + 0x61, 0x6c, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x50, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, + 0x66, 0x6f, 0x52, 0x0a, 0x70, 0x6c, 0x75, 0x67, 0x69, 0x6e, 0x49, 0x6e, 0x66, 0x6f, 0x42, 0x28, + 0x5a, 0x26, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x68, 0x61, 0x73, + 0x68, 0x69, 0x63, 0x6f, 0x72, 0x70, 0x2f, 0x76, 0x61, 0x75, 0x6c, 0x74, 0x2f, 0x73, 0x64, 0x6b, + 0x2f, 0x6c, 0x6f, 0x67, 0x69, 0x63, 0x61, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -341,15 +330,15 @@ func file_sdk_logical_event_proto_rawDescGZIP() []byte { var file_sdk_logical_event_proto_msgTypes = make([]protoimpl.MessageInfo, 3) var file_sdk_logical_event_proto_goTypes = []interface{}{ - (*EventPluginInfo)(nil), // 0: logical.EventPluginInfo - (*EventData)(nil), // 1: logical.EventData - (*EventReceived)(nil), // 2: logical.EventReceived - (*timestamppb.Timestamp)(nil), // 3: google.protobuf.Timestamp + (*EventPluginInfo)(nil), // 0: logical.EventPluginInfo + (*EventData)(nil), // 1: logical.EventData + (*EventReceived)(nil), // 2: logical.EventReceived + (*structpb.Struct)(nil), // 3: google.protobuf.Struct } var file_sdk_logical_event_proto_depIdxs = []int32{ - 1, // 0: logical.EventReceived.event:type_name -> logical.EventData - 0, // 1: logical.EventReceived.plugin_info:type_name -> logical.EventPluginInfo - 3, // 2: logical.EventReceived.timestamp:type_name -> google.protobuf.Timestamp + 3, // 0: logical.EventData.metadata:type_name -> google.protobuf.Struct + 1, // 1: logical.EventReceived.event:type_name -> logical.EventData + 0, // 2: logical.EventReceived.plugin_info:type_name -> logical.EventPluginInfo 3, // [3:3] is the sub-list for method output_type 3, // [3:3] is the sub-list for method input_type 3, // [3:3] is the sub-list for extension type_name diff --git a/sdk/logical/event.proto b/sdk/logical/event.proto index e6cc304cd178..594bcf1dde09 100644 --- a/sdk/logical/event.proto +++ b/sdk/logical/event.proto @@ -4,7 +4,7 @@ option go_package = "github.com/hashicorp/vault/sdk/logical"; package logical; -import "google/protobuf/timestamp.proto"; +import "google/protobuf/struct.proto"; // EventPluginInfo contains data related to the plugin that generated an event. message EventPluginInfo { @@ -33,7 +33,7 @@ message EventData { // as a hash of other fields with sufficient uniqueness. string id = 1; // Arbitrary non-secret data. Optional. - bytes metadata = 2; + google.protobuf.Struct metadata = 2; // Any IDs that the event relates to, i.e., UUIDs, paths. repeated string entity_ids = 3; // Human-readable note. @@ -48,5 +48,4 @@ message EventReceived { string namespace = 2; string event_type = 3; EventPluginInfo plugin_info = 4; - google.protobuf.Timestamp timestamp = 5; } diff --git a/sdk/logical/events.go b/sdk/logical/events.go index 9840a9ca47c2..e96e6d709005 100644 --- a/sdk/logical/events.go +++ b/sdk/logical/events.go @@ -7,8 +7,8 @@ import ( ) // ID is an alias to GetId() for CloudEvents compatibility. -func (x *EventData) ID() string { - return x.GetId() +func (x *EventReceived) ID() string { + return x.Event.GetId() } // NewEvent returns an event with a new, random EID. diff --git a/sdk/plugin/grpc_events.go b/sdk/plugin/grpc_events.go index 14fad9491e50..d1d9fc02634a 100644 --- a/sdk/plugin/grpc_events.go +++ b/sdk/plugin/grpc_events.go @@ -34,6 +34,10 @@ type GRPCEventsServer struct { } func (s *GRPCEventsServer) SendEvent(ctx context.Context, req *pb.SendEventRequest) (*pb.Empty, error) { + if s.impl == nil { + return &pb.Empty{}, nil + } + err := s.impl.Send(ctx, logical.EventType(req.EventType), req.Event) if err != nil { return nil, err diff --git a/serviceregistration/kubernetes/testing/testserver.go b/serviceregistration/kubernetes/testing/testserver.go index 50232a2e573e..4f406eb6871b 100644 --- a/serviceregistration/kubernetes/testing/testserver.go +++ b/serviceregistration/kubernetes/testing/testserver.go @@ -1,6 +1,7 @@ package testing import ( + _ "embed" "encoding/json" "fmt" "io/ioutil" @@ -18,15 +19,27 @@ import ( const ( ExpectedNamespace = "default" ExpectedPodName = "shell-demo" - - // File names of samples pulled from real life. - caCrtFile = "ca.crt" - respGetPod = "resp-get-pod.json" - respNotFound = "resp-not-found.json" - respUpdatePod = "resp-update-pod.json" - tokenFile = "token" ) +// Pull real-life-based testing data in from files at compile time. +// We decided to embed them in the test binary because of past issues +// with reading files that we encountered on CI workers. + +//go:embed ca.crt +var caCrt string + +//go:embed resp-get-pod.json +var getPodResponse string + +//go:embed resp-not-found.json +var notFoundResponse string + +//go:embed resp-update-pod.json +var updatePodTagsResponse string + +//go:embed token +var token string + var ( // ReturnGatewayTimeouts toggles whether the test server should return, // well, gateway timeouts... @@ -78,28 +91,6 @@ func Server(t *testing.T) (testState *State, testConf *Conf, closeFunc func()) { } } - // Read in our sample files. - token, err := readFile(tokenFile) - if err != nil { - t.Fatal(err) - } - caCrt, err := readFile(caCrtFile) - if err != nil { - t.Fatal(err) - } - notFoundResponse, err := readFile(respNotFound) - if err != nil { - t.Fatal(err) - } - getPodResponse, err := readFile(respGetPod) - if err != nil { - t.Fatal(err) - } - updatePodTagsResponse, err := readFile(respUpdatePod) - if err != nil { - t.Fatal(err) - } - // Plant our token in a place where it can be read for the config. tmpToken, err := ioutil.TempFile("", "token") if err != nil { diff --git a/shamir/shamir.go b/shamir/shamir.go index e311f501d9bc..22e3c337dcb1 100644 --- a/shamir/shamir.go +++ b/shamir/shamir.go @@ -86,31 +86,40 @@ func div(a, b uint8) uint8 { panic("divide by zero") } - log_a := logTable[a] - log_b := logTable[b] - diff := ((int(log_a) - int(log_b)) + 255) % 255 - - ret := int(expTable[diff]) + ret := int(mult(a, inverse(b))) // Ensure we return zero if a is zero but aren't subject to timing attacks ret = subtle.ConstantTimeSelect(subtle.ConstantTimeByteEq(a, 0), 0, ret) return uint8(ret) } +// inverse calculates the inverse of a number in GF(2^8) +func inverse(a uint8) uint8 { + b := mult(a, a) + c := mult(a, b) + b = mult(c, c) + b = mult(b, b) + c = mult(b, c) + b = mult(b, b) + b = mult(b, b) + b = mult(b, c) + b = mult(b, b) + b = mult(a, b) + + return mult(b, b) +} + // mult multiplies two numbers in GF(2^8) func mult(a, b uint8) (out uint8) { - log_a := logTable[a] - log_b := logTable[b] - sum := (int(log_a) + int(log_b)) % 255 - - ret := int(expTable[sum]) + var r uint8 = 0 + var i uint8 = 8 - // Ensure we return zero if either a or b are zero but aren't subject to - // timing attacks - ret = subtle.ConstantTimeSelect(subtle.ConstantTimeByteEq(a, 0), 0, ret) - ret = subtle.ConstantTimeSelect(subtle.ConstantTimeByteEq(b, 0), 0, ret) + for i > 0 { + i-- + r = (-(b >> i & 1) & a) ^ (-(r >> 7) & 0x1B) ^ (r + r) + } - return uint8(ret) + return r } // add combines two numbers in GF(2^8) diff --git a/shamir/tables.go b/shamir/tables.go deleted file mode 100644 index 07ec4e5283fc..000000000000 --- a/shamir/tables.go +++ /dev/null @@ -1,79 +0,0 @@ -package shamir - -// Tables taken from http://www.samiam.org/galois.html -// They use 0xe5 (229) as the generator - -var ( - // logTable provides the log(X)/log(g) at each index X - logTable = [256]uint8{ - 0x00, 0xff, 0xc8, 0x08, 0x91, 0x10, 0xd0, 0x36, - 0x5a, 0x3e, 0xd8, 0x43, 0x99, 0x77, 0xfe, 0x18, - 0x23, 0x20, 0x07, 0x70, 0xa1, 0x6c, 0x0c, 0x7f, - 0x62, 0x8b, 0x40, 0x46, 0xc7, 0x4b, 0xe0, 0x0e, - 0xeb, 0x16, 0xe8, 0xad, 0xcf, 0xcd, 0x39, 0x53, - 0x6a, 0x27, 0x35, 0x93, 0xd4, 0x4e, 0x48, 0xc3, - 0x2b, 0x79, 0x54, 0x28, 0x09, 0x78, 0x0f, 0x21, - 0x90, 0x87, 0x14, 0x2a, 0xa9, 0x9c, 0xd6, 0x74, - 0xb4, 0x7c, 0xde, 0xed, 0xb1, 0x86, 0x76, 0xa4, - 0x98, 0xe2, 0x96, 0x8f, 0x02, 0x32, 0x1c, 0xc1, - 0x33, 0xee, 0xef, 0x81, 0xfd, 0x30, 0x5c, 0x13, - 0x9d, 0x29, 0x17, 0xc4, 0x11, 0x44, 0x8c, 0x80, - 0xf3, 0x73, 0x42, 0x1e, 0x1d, 0xb5, 0xf0, 0x12, - 0xd1, 0x5b, 0x41, 0xa2, 0xd7, 0x2c, 0xe9, 0xd5, - 0x59, 0xcb, 0x50, 0xa8, 0xdc, 0xfc, 0xf2, 0x56, - 0x72, 0xa6, 0x65, 0x2f, 0x9f, 0x9b, 0x3d, 0xba, - 0x7d, 0xc2, 0x45, 0x82, 0xa7, 0x57, 0xb6, 0xa3, - 0x7a, 0x75, 0x4f, 0xae, 0x3f, 0x37, 0x6d, 0x47, - 0x61, 0xbe, 0xab, 0xd3, 0x5f, 0xb0, 0x58, 0xaf, - 0xca, 0x5e, 0xfa, 0x85, 0xe4, 0x4d, 0x8a, 0x05, - 0xfb, 0x60, 0xb7, 0x7b, 0xb8, 0x26, 0x4a, 0x67, - 0xc6, 0x1a, 0xf8, 0x69, 0x25, 0xb3, 0xdb, 0xbd, - 0x66, 0xdd, 0xf1, 0xd2, 0xdf, 0x03, 0x8d, 0x34, - 0xd9, 0x92, 0x0d, 0x63, 0x55, 0xaa, 0x49, 0xec, - 0xbc, 0x95, 0x3c, 0x84, 0x0b, 0xf5, 0xe6, 0xe7, - 0xe5, 0xac, 0x7e, 0x6e, 0xb9, 0xf9, 0xda, 0x8e, - 0x9a, 0xc9, 0x24, 0xe1, 0x0a, 0x15, 0x6b, 0x3a, - 0xa0, 0x51, 0xf4, 0xea, 0xb2, 0x97, 0x9e, 0x5d, - 0x22, 0x88, 0x94, 0xce, 0x19, 0x01, 0x71, 0x4c, - 0xa5, 0xe3, 0xc5, 0x31, 0xbb, 0xcc, 0x1f, 0x2d, - 0x3b, 0x52, 0x6f, 0xf6, 0x2e, 0x89, 0xf7, 0xc0, - 0x68, 0x1b, 0x64, 0x04, 0x06, 0xbf, 0x83, 0x38, - } - - // expTable provides the anti-log or exponentiation value - // for the equivalent index - expTable = [256]uint8{ - 0x01, 0xe5, 0x4c, 0xb5, 0xfb, 0x9f, 0xfc, 0x12, - 0x03, 0x34, 0xd4, 0xc4, 0x16, 0xba, 0x1f, 0x36, - 0x05, 0x5c, 0x67, 0x57, 0x3a, 0xd5, 0x21, 0x5a, - 0x0f, 0xe4, 0xa9, 0xf9, 0x4e, 0x64, 0x63, 0xee, - 0x11, 0x37, 0xe0, 0x10, 0xd2, 0xac, 0xa5, 0x29, - 0x33, 0x59, 0x3b, 0x30, 0x6d, 0xef, 0xf4, 0x7b, - 0x55, 0xeb, 0x4d, 0x50, 0xb7, 0x2a, 0x07, 0x8d, - 0xff, 0x26, 0xd7, 0xf0, 0xc2, 0x7e, 0x09, 0x8c, - 0x1a, 0x6a, 0x62, 0x0b, 0x5d, 0x82, 0x1b, 0x8f, - 0x2e, 0xbe, 0xa6, 0x1d, 0xe7, 0x9d, 0x2d, 0x8a, - 0x72, 0xd9, 0xf1, 0x27, 0x32, 0xbc, 0x77, 0x85, - 0x96, 0x70, 0x08, 0x69, 0x56, 0xdf, 0x99, 0x94, - 0xa1, 0x90, 0x18, 0xbb, 0xfa, 0x7a, 0xb0, 0xa7, - 0xf8, 0xab, 0x28, 0xd6, 0x15, 0x8e, 0xcb, 0xf2, - 0x13, 0xe6, 0x78, 0x61, 0x3f, 0x89, 0x46, 0x0d, - 0x35, 0x31, 0x88, 0xa3, 0x41, 0x80, 0xca, 0x17, - 0x5f, 0x53, 0x83, 0xfe, 0xc3, 0x9b, 0x45, 0x39, - 0xe1, 0xf5, 0x9e, 0x19, 0x5e, 0xb6, 0xcf, 0x4b, - 0x38, 0x04, 0xb9, 0x2b, 0xe2, 0xc1, 0x4a, 0xdd, - 0x48, 0x0c, 0xd0, 0x7d, 0x3d, 0x58, 0xde, 0x7c, - 0xd8, 0x14, 0x6b, 0x87, 0x47, 0xe8, 0x79, 0x84, - 0x73, 0x3c, 0xbd, 0x92, 0xc9, 0x23, 0x8b, 0x97, - 0x95, 0x44, 0xdc, 0xad, 0x40, 0x65, 0x86, 0xa2, - 0xa4, 0xcc, 0x7f, 0xec, 0xc0, 0xaf, 0x91, 0xfd, - 0xf7, 0x4f, 0x81, 0x2f, 0x5b, 0xea, 0xa8, 0x1c, - 0x02, 0xd1, 0x98, 0x71, 0xed, 0x25, 0xe3, 0x24, - 0x06, 0x68, 0xb3, 0x93, 0x2c, 0x6f, 0x3e, 0x6c, - 0x0a, 0xb8, 0xce, 0xae, 0x74, 0xb1, 0x42, 0xb4, - 0x1e, 0xd3, 0x49, 0xe9, 0x9c, 0xc8, 0xc6, 0xc7, - 0x22, 0x6e, 0xdb, 0x20, 0xbf, 0x43, 0x51, 0x52, - 0x66, 0xb2, 0x76, 0x60, 0xda, 0xc5, 0xf3, 0xf6, - 0xaa, 0xcd, 0x9a, 0xa0, 0x75, 0x54, 0x0e, 0x01, - } -) diff --git a/shamir/tables_test.go b/shamir/tables_test.go deleted file mode 100644 index 81aa983b1087..000000000000 --- a/shamir/tables_test.go +++ /dev/null @@ -1,13 +0,0 @@ -package shamir - -import "testing" - -func TestTables(t *testing.T) { - for i := 1; i < 256; i++ { - logV := logTable[i] - expV := expTable[logV] - if expV != uint8(i) { - t.Fatalf("bad: %d log: %d exp: %d", i, logV, expV) - } - } -} diff --git a/tools/tools.go b/tools/tools.go index 9a4972b73c68..5f81033df61c 100644 --- a/tools/tools.go +++ b/tools/tools.go @@ -17,6 +17,7 @@ package tools //go:generate go install google.golang.org/grpc/cmd/protoc-gen-go-grpc //go:generate go install github.com/favadi/protoc-go-inject-tag //go:generate go install github.com/golangci/revgrep/cmd/revgrep +//go:generate go install gotest.tools/gotestsum import ( _ "golang.org/x/tools/cmd/goimports" @@ -31,4 +32,6 @@ import ( _ "github.com/favadi/protoc-go-inject-tag" _ "github.com/golangci/revgrep/cmd/revgrep" + + _ "gotest.tools/gotestsum" ) diff --git a/ui/app/adapters/kubernetes/role.js b/ui/app/adapters/kubernetes/role.js index 1bb268d2e201..301ae6419888 100644 --- a/ui/app/adapters/kubernetes/role.js +++ b/ui/app/adapters/kubernetes/role.js @@ -32,7 +32,7 @@ export default class KubernetesRoleAdapter extends NamedPathAdapter { } generateCredentials(backend, data) { const generateCredentialsUrl = `${this.buildURL()}/${encodePath(backend)}/creds/${data.role}`; - + delete data.role; return this.ajax(generateCredentialsUrl, 'POST', { data }).then((response) => { const { lease_id, lease_duration, data } = response; diff --git a/ui/app/adapters/namespace.js b/ui/app/adapters/namespace.js index ff8eff59bf26..36d023e22e55 100644 --- a/ui/app/adapters/namespace.js +++ b/ui/app/adapters/namespace.js @@ -1,27 +1,27 @@ import ApplicationAdapter from './application'; -export default ApplicationAdapter.extend({ +export default class NamespaceAdapter extends ApplicationAdapter { pathForType() { return 'namespaces'; - }, + } urlForFindAll(modelName, snapshot) { if (snapshot.adapterOptions && snapshot.adapterOptions.forUser) { return `/${this.urlPrefix()}/internal/ui/namespaces`; } return `/${this.urlPrefix()}/namespaces?list=true`; - }, + } urlForCreateRecord(modelName, snapshot) { const id = snapshot.attr('path'); return this.buildURL(modelName, id); - }, + } createRecord(store, type, snapshot) { const id = snapshot.attr('path'); - return this._super(...arguments).then(() => { + return super.createRecord(...arguments).then(() => { return { id }; }); - }, + } findAll(store, type, sinceToken, snapshot) { if (snapshot.adapterOptions && typeof snapshot.adapterOptions.namespace !== 'undefined') { @@ -29,9 +29,9 @@ export default ApplicationAdapter.extend({ namespace: snapshot.adapterOptions.namespace, }); } - return this._super(...arguments); - }, + return super.findAll(...arguments); + } query() { return this.ajax(`/${this.urlPrefix()}/namespaces?list=true`); - }, -}); + } +} diff --git a/ui/app/app.js b/ui/app/app.js index ee85a0150fba..115474cda763 100644 --- a/ui/app/app.js +++ b/ui/app/app.js @@ -15,16 +15,7 @@ export default class App extends Application { }, replication: { dependencies: { - services: [ - 'auth', - 'flash-messages', - 'namespace', - 'replication-mode', - 'router', - 'store', - 'version', - 'wizard', - ], + services: ['auth', 'flash-messages', 'namespace', 'replication-mode', 'router', 'store', 'version'], externalRoutes: { replication: 'vault.cluster.replication.index', }, @@ -41,7 +32,6 @@ export default class App extends Application { 'router', 'store', 'version', - 'wizard', 'secret-mount-path', ], externalRoutes: { @@ -69,7 +59,6 @@ export default class App extends Application { 'secret-mount-path', 'store', 'version', - 'wizard', ], externalRoutes: { secrets: 'vault.cluster.secrets.backends', diff --git a/ui/app/components/auth-config-form/config.js b/ui/app/components/auth-config-form/config.js index 0e21139f2ca1..bb0c2bc77be4 100644 --- a/ui/app/components/auth-config-form/config.js +++ b/ui/app/components/auth-config-form/config.js @@ -23,7 +23,6 @@ const AuthConfigBase = Component.extend({ flashMessages: service(), router: service(), - wizard: service(), saveModel: task( waitFor(function* () { try { @@ -36,9 +35,6 @@ const AuthConfigBase = Component.extend({ } return; } - if (this.wizard.currentMachine === 'authentication' && this.wizard.featureState === 'config') { - this.wizard.transitionFeatureMachine(this.wizard.featureState, 'CONTINUE'); - } this.router.transitionTo('vault.cluster.access.methods').followRedirects(); this.flashMessages.success('The configuration was saved successfully.'); }) diff --git a/ui/app/components/auth-config-form/options.js b/ui/app/components/auth-config-form/options.js index 191c9c136514..b5d5fed67d29 100644 --- a/ui/app/components/auth-config-form/options.js +++ b/ui/app/components/auth-config-form/options.js @@ -20,14 +20,13 @@ import { waitFor } from '@ember/test-waiters'; export default AuthConfigComponent.extend({ flashMessages: service(), router: service(), - wizard: service(), saveModel: task( waitFor(function* () { const data = this.model.config.serialize(); data.description = this.model.description; - // token_type should not be tuneable for the token auth method, default is 'default-service' + // token_type should not be tuneable for the token auth method. if (this.model.type === 'token') { delete data.token_type; } @@ -49,9 +48,6 @@ export default AuthConfigComponent.extend({ } return; } - if (this.wizard.currentMachine === 'authentication' && this.wizard.featureState === 'config') { - this.wizard.transitionFeatureMachine(this.wizard.featureState, 'CONTINUE'); - } this.router.transitionTo('vault.cluster.access.methods').followRedirects(); this.flashMessages.success('The configuration was saved successfully.'); }) diff --git a/ui/app/components/auth-form.js b/ui/app/components/auth-form.js index 48df029f7929..3eb77e9ea171 100644 --- a/ui/app/components/auth-form.js +++ b/ui/app/components/auth-form.js @@ -8,6 +8,7 @@ import { computed } from '@ember/object'; import { supportedAuthBackends } from 'vault/helpers/supported-auth-backends'; import { task, timeout } from 'ember-concurrency'; import { waitFor } from '@ember/test-waiters'; +import { v4 as uuidv4 } from 'uuid'; const BACKENDS = supportedAuthBackends(); @@ -307,7 +308,7 @@ export default Component.extend(DEFAULTS, { } // add nonce field for okta backend if (backend.type === 'okta') { - data.nonce = crypto.randomUUID(); + data.nonce = uuidv4(); // add a default path of okta if it doesn't exist to be used for Okta Number Challenge if (!data.path) { data.path = 'okta'; diff --git a/ui/app/components/auth-info.js b/ui/app/components/auth-info.js index 1ca7f9cef994..37430e69bebb 100644 --- a/ui/app/components/auth-info.js +++ b/ui/app/components/auth-info.js @@ -17,7 +17,6 @@ import { tracked } from '@glimmer/tracking'; */ export default class AuthInfoComponent extends Component { @service auth; - @service wizard; @service router; @tracked fakeRenew = false; @@ -36,11 +35,6 @@ export default class AuthInfoComponent extends Component { this.router.transitionTo(...arguments); } - @action - restartGuide() { - this.wizard.restartGuide(); - } - @action renewToken() { this.fakeRenew = true; diff --git a/ui/app/components/auth-jwt.js b/ui/app/components/auth-jwt.js index 41110ebbf259..724ed90c0664 100644 --- a/ui/app/components/auth-jwt.js +++ b/ui/app/components/auth-jwt.js @@ -134,7 +134,7 @@ export default Component.extend({ let { namespace, path, state, code } = oidcState; - // The namespace can be either be passed as a query paramter, or be embedded + // The namespace can be either be passed as a query parameter, or be embedded // in the state param in the format `,ns=`. So if // `namespace` is empty, check for namespace in state as well. if (namespace === '' || this.featureFlagService.managedNamespaceRoot) { @@ -171,6 +171,14 @@ export default Component.extend({ if (e && e.preventDefault) { e.preventDefault(); } + try { + await this.fetchRole.perform(this.roleName, { debounce: false }); + } catch (error) { + // this task could be cancelled if the instances in didReceiveAttrs resolve after this was started + if (error?.name !== 'TaskCancelation') { + throw error; + } + } if (!this.isOIDC || !this.role || !this.role.authUrl) { let message = this.errorMessage; if (!this.role) { @@ -182,14 +190,6 @@ export default Component.extend({ this.onError(message); return; } - try { - await this.fetchRole.perform(this.roleName, { debounce: false }); - } catch (error) { - // this task could be cancelled if the instances in didReceiveAttrs resolve after this was started - if (error?.name !== 'TaskCancelation') { - throw error; - } - } const win = this.getWindow(); const POPUP_WIDTH = 500; diff --git a/ui/app/components/configure-ssh-secret.js b/ui/app/components/configure-ssh-secret.js index 41832e10ee06..961bbe4267e8 100644 --- a/ui/app/components/configure-ssh-secret.js +++ b/ui/app/components/configure-ssh-secret.js @@ -6,20 +6,28 @@ import { action } from '@ember/object'; * * @example * ```js - * + * * ``` * * @param {string} model - ssh secret engine model * @param {Function} saveConfig - parent action which updates the configuration - * + * @param {boolean} loading - property in parent that updates depending on status of parent's action + * */ export default class ConfigureSshSecretComponent extends Component { @action - saveConfig(data, event) { + delete() { + this.args.saveConfig({ delete: true }); + } + + @action + saveConfig(event) { event.preventDefault(); - this.args.saveConfig(data); + this.args.saveConfig({ delete: false }); } } diff --git a/ui/app/components/database-connection.js b/ui/app/components/database-connection.js index c71c7ac776f1..ea32bcc77b9a 100644 --- a/ui/app/components/database-connection.js +++ b/ui/app/components/database-connection.js @@ -19,7 +19,6 @@ export default class DatabaseConnectionEdit extends Component { @service store; @service router; @service flashMessages; - @service wizard; @tracked showPasswordField = false; // used for edit mode @@ -27,13 +26,6 @@ export default class DatabaseConnectionEdit extends Component { @tracked showSaveModal = false; // used for create mode - constructor() { - super(...arguments); - if (this.wizard.featureState === 'details' || this.wizard.featureState === 'connection') { - this.wizard.transitionFeatureMachine(this.wizard.featureState, 'CONTINUE', 'database'); - } - } - rotateCredentials(backend, name) { const adapter = this.store.adapterFor('database/connection'); return adapter.rotateRootCredentials(backend, name); diff --git a/ui/app/components/database-role-edit.js b/ui/app/components/database-role-edit.js index 64ea98eacebe..70e8077faf56 100644 --- a/ui/app/components/database-role-edit.js +++ b/ui/app/components/database-role-edit.js @@ -9,17 +9,10 @@ const SHOW_ROUTE = 'vault.cluster.secrets.backend.show'; export default class DatabaseRoleEdit extends Component { @service router; @service flashMessages; - @service wizard; @service store; constructor() { super(...arguments); - if ( - this.wizard.featureState === 'displayConnection' || - this.wizard.featureState === 'displayRoleDatabase' - ) { - this.wizard.transitionFeatureMachine(this.wizard.featureState, 'CONTINUE', 'database'); - } if (this.args.initialKey) { this.args.model.database = [this.args.initialKey]; } diff --git a/ui/app/components/diff-version-selector.js b/ui/app/components/diff-version-selector.js index eafdcad417e9..876a8da112f0 100644 --- a/ui/app/components/diff-version-selector.js +++ b/ui/app/components/diff-version-selector.js @@ -3,7 +3,6 @@ import Component from '@glimmer/component'; import { inject as service } from '@ember/service'; import { action } from '@ember/object'; import { tracked } from '@glimmer/tracking'; -import { htmlSafe } from '@ember/template'; /** * @module DiffVersionSelector @@ -59,10 +58,10 @@ export default class DiffVersionSelector extends Component { if (delta === undefined) { this.statesMatch = true; // params: value, replacer (all properties included), space (white space and indentation, line break, etc.) - this.visualDiff = htmlSafe(JSON.stringify(leftSideVersionData, undefined, 2)); + this.visualDiff = JSON.stringify(leftSideVersionData, undefined, 2); } else { this.statesMatch = false; - this.visualDiff = htmlSafe(jsondiffpatch.formatters.html.format(delta, rightSideVersionData)); + this.visualDiff = jsondiffpatch.formatters.html.format(delta, rightSideVersionData); } } diff --git a/ui/app/components/generate-credentials.js b/ui/app/components/generate-credentials.js index 74526b209443..7d832a1d2cd6 100644 --- a/ui/app/components/generate-credentials.js +++ b/ui/app/components/generate-credentials.js @@ -26,7 +26,6 @@ const MODEL_TYPES = { }; export default Component.extend({ - wizard: service(), store: service(), router: service(), // set on the component @@ -58,13 +57,6 @@ export default Component.extend({ this.createOrReplaceModel(); }, - didReceiveAttrs() { - this._super(); - if (this.wizard.featureState === 'displayRole') { - this.wizard.transitionFeatureMachine(this.wizard.featureState, 'CONTINUE', this.backendType); - } - }, - willDestroy() { if (!this.model.isDestroyed && !this.model.isDestroying) { this.model.unloadRecord(); @@ -98,17 +90,10 @@ export default Component.extend({ create() { const model = this.model; this.set('loading', true); - this.model - .save() - .catch(() => { - if (this.wizard.featureState === 'credentials') { - this.wizard.transitionFeatureMachine(this.wizard.featureState, 'ERROR', this.backendType); - } - }) - .finally(() => { - model.set('hasGenerated', true); - this.set('loading', false); - }); + this.model.save().finally(() => { + model.set('hasGenerated', true); + this.set('loading', false); + }); }, codemirrorUpdated(attr, val, codemirror) { diff --git a/ui/app/components/keymgmt/distribute.js b/ui/app/components/keymgmt/distribute.js index f64620720d34..798a39435644 100644 --- a/ui/app/components/keymgmt/distribute.js +++ b/ui/app/components/keymgmt/distribute.js @@ -35,7 +35,6 @@ export default class KeymgmtDistribute extends Component { @service store; @service flashMessages; @service router; - @service wizard; @tracked keyModel; @tracked isNewKey = false; @@ -57,14 +56,6 @@ export default class KeymgmtDistribute extends Component { this.getKeyInfo(this.args.key); } this.formData.operations = []; - this.updateWizard('nextStep'); - } - - updateWizard(key) { - // wizard will pause unless we manually continue it -- verify that keymgmt tutorial is in progress - if (this.wizard[key] === 'distribute') { - this.wizard.transitionFeatureMachine(this.wizard.featureState, 'CONTINUE', 'keymgmt'); - } } get keyTypes() { @@ -198,8 +189,6 @@ export default class KeymgmtDistribute extends Component { this.store.clearDataset('keymgmt/key'); const providerModel = this.store.peekRecord('keymgmt/provider', provider); providerModel.fetchKeys(providerModel.keys?.meta?.currentPage || 1); - // move wizard forward if tutorial is in progress - this.updateWizard('featureState'); this.args.onClose(); }) .catch((e) => { diff --git a/ui/app/components/license-banners.js b/ui/app/components/license-banners.js index 91f4d2b15044..2839a3a29421 100644 --- a/ui/app/components/license-banners.js +++ b/ui/app/components/license-banners.js @@ -1,3 +1,11 @@ +import Component from '@glimmer/component'; +import { action } from '@ember/object'; +import { tracked } from '@glimmer/tracking'; +import { inject as service } from '@ember/service'; +import isAfter from 'date-fns/isAfter'; +import differenceInDays from 'date-fns/differenceInDays'; +import localStorage from 'vault/lib/local-storage'; + /** * @module LicenseBanners * LicenseBanners components are used to display Vault-specific license expiry messages @@ -9,11 +17,23 @@ * @param {string} expiry - RFC3339 date timestamp */ -import Component from '@glimmer/component'; -import isAfter from 'date-fns/isAfter'; -import differenceInDays from 'date-fns/differenceInDays'; - export default class LicenseBanners extends Component { + @service version; + + @tracked warningDismissed; + @tracked expiredDismissed; + + constructor() { + super(...arguments); + // do not dismiss any banners if the user has updated their version + const dismissedBanner = localStorage.getItem(`dismiss-license-banner-${this.currentVersion}`); // returns either warning or expired + this.updateDismissType(dismissedBanner); + } + + get currentVersion() { + return this.version.version; + } + get licenseExpired() { if (!this.args.expiry) return false; return isAfter(new Date(), new Date(this.args.expiry)); @@ -24,4 +44,22 @@ export default class LicenseBanners extends Component { if (!this.args.expiry) return 99; return differenceInDays(new Date(this.args.expiry), new Date()); } + + @action + dismissBanner(dismissAction) { + // if a client's version changed their old localStorage key will still exists. + localStorage.cleanUpStorage('dismiss-license-banner', `dismiss-license-banner-${this.currentVersion}`); + // updates localStorage and then updates the template by calling updateDismissType + localStorage.setItem(`dismiss-license-banner-${this.currentVersion}`, dismissAction); + this.updateDismissType(dismissAction); + } + + updateDismissType(dismissType) { + // updates tracked properties to update template + if (dismissType === 'warning') { + this.warningDismissed = true; + } else if (dismissType === 'expired') { + this.expiredDismissed = true; + } + } } diff --git a/ui/app/components/mount-backend-form.js b/ui/app/components/mount-backend-form.js index 8d5f2d741c2c..585c91a17633 100644 --- a/ui/app/components/mount-backend-form.js +++ b/ui/app/components/mount-backend-form.js @@ -21,7 +21,6 @@ import { methods } from 'vault/helpers/mountable-auth-methods'; export default class MountBackendForm extends Component { @service store; - @service wizard; @service flashMessages; // validation related properties @@ -139,22 +138,9 @@ export default class MountBackendForm extends Component { this.args.mountModel[name] = value; } - @action - onTypeChange(path, value) { - if (path === 'type') { - this.wizard.set('componentState', value); - } - } - @action setMountType(value) { this.args.mountModel.type = value; this.checkPathChange(value); - if (value) { - this.wizard.transitionFeatureMachine(this.wizard.featureState, 'CONTINUE', this.args.mountModel.type); - } else if (this.wizard.featureState === 'idle') { - // resets wizard - this.wizard.transitionFeatureMachine(this.wizard.featureState, 'RESET', this.args.mountModel.type); - } } } diff --git a/ui/app/components/namespace-link.js b/ui/app/components/namespace-link.js index 2e8d65a01922..65a7390aef60 100644 --- a/ui/app/components/namespace-link.js +++ b/ui/app/components/namespace-link.js @@ -11,10 +11,13 @@ export default Component.extend({ //public api targetNamespace: null, showLastSegment: false, + // set to true if targetNamespace is passed in unmodified + // otherwise, this assumes it is parsed as in namespace-picker + unparsed: false, - normalizedNamespace: computed('targetNamespace', function () { - const ns = this.targetNamespace; - return (ns || '').replace(/\.+/g, '/').replace(/☃/g, '.'); + normalizedNamespace: computed('targetNamespace', 'unparsed', function () { + const ns = this.targetNamespace || ''; + return this.unparsed ? ns : ns.replace(/\.+/g, '/').replace(/☃/g, '.'); }), namespaceDisplay: computed('normalizedNamespace', 'showLastSegment', function () { diff --git a/ui/app/components/role-edit.js b/ui/app/components/role-edit.js index 861e3d7d125a..aead8aaeabec 100644 --- a/ui/app/components/role-edit.js +++ b/ui/app/components/role-edit.js @@ -12,7 +12,6 @@ const SHOW_ROUTE = 'vault.cluster.secrets.backend.show'; export default Component.extend(FocusOnInsertMixin, { router: service(), - wizard: service(), mode: null, emptyData: '{\n}', @@ -21,19 +20,6 @@ export default Component.extend(FocusOnInsertMixin, { model: null, requestInFlight: or('model.isLoading', 'model.isReloading', 'model.isSaving'), - didReceiveAttrs() { - this._super(...arguments); - if ( - (this.wizard.featureState === 'details' && this.mode === 'create') || - (this.wizard.featureState === 'role' && this.mode === 'show') - ) { - this.wizard.transitionFeatureMachine(this.wizard.featureState, 'CONTINUE', this.backendType); - } - if (this.wizard.featureState === 'displayRole') { - this.wizard.transitionFeatureMachine(this.wizard.featureState, 'NOOP', this.backendType); - } - }, - willDestroyElement() { this._super(...arguments); if (this.model && this.model.isError) { @@ -69,9 +55,6 @@ export default Component.extend(FocusOnInsertMixin, { const model = this.model; return model[method]().then(() => { if (!model.isError) { - if (this.wizard.featureState === 'role') { - this.wizard.transitionFeatureMachine('role', 'CONTINUE', this.backendType); - } successCallback(model); } }); diff --git a/ui/app/components/secret-create-or-update.js b/ui/app/components/secret-create-or-update.js index 634fb4b05ffe..7e93fd0eb82c 100644 --- a/ui/app/components/secret-create-or-update.js +++ b/ui/app/components/secret-create-or-update.js @@ -48,7 +48,6 @@ export default class SecretCreateOrUpdate extends Component { @service controlGroup; @service router; @service store; - @service wizard; @action setup(elem, [secretData, model, mode]) { @@ -164,9 +163,6 @@ export default class SecretCreateOrUpdate extends Component { }); } saveComplete(callback, key) { - if (this.wizard.featureState === 'secret') { - this.wizard.transitionFeatureMachine('secret', 'CONTINUE'); - } callback(key); } transitionToRoute() { diff --git a/ui/app/components/secret-edit.js b/ui/app/components/secret-edit.js index 2a9ab3a0efe8..1818bbadec84 100644 --- a/ui/app/components/secret-edit.js +++ b/ui/app/components/secret-edit.js @@ -27,7 +27,6 @@ import { maybeQueryRecord } from 'vault/macros/maybe-query-record'; import { alias, or } from '@ember/object/computed'; export default class SecretEdit extends Component { - @service wizard; @service store; @tracked secretData = null; @@ -43,10 +42,6 @@ export default class SecretEdit extends Component { } this.secretData = KVObject.create({ content: [] }).fromJSON(model.secretData); this.codemirrorString = this.secretData.toJSONString(); - if (this.wizard.featureState === 'details' && this.args.mode === 'create') { - const engine = model.backend.includes('kv') ? 'kv' : model.backend; - this.wizard.transitionFeatureMachine('details', 'CONTINUE', engine); - } } @maybeQueryRecord( diff --git a/ui/app/components/splash-page.js b/ui/app/components/splash-page.js index 8a1d4f5461cd..e5560f76dcf5 100644 --- a/ui/app/components/splash-page.js +++ b/ui/app/components/splash-page.js @@ -9,7 +9,7 @@ * content here * { if (!key.isError) { - if (this.wizard.featureState === 'secret') { - this.wizard.transitionFeatureMachine('secret', 'CONTINUE'); - } else { - if (this.wizard.featureState === 'encryption') { - this.wizard.transitionFeatureMachine('encryption', 'CONTINUE', 'transit'); - } - } successCallback(key); } }); diff --git a/ui/app/components/wizard-content.js b/ui/app/components/wizard-content.js index 92fae5c9122e..f925d9cd07f8 100644 --- a/ui/app/components/wizard-content.js +++ b/ui/app/components/wizard-content.js @@ -3,7 +3,6 @@ import { inject as service } from '@ember/service'; import Component from '@ember/component'; import { computed } from '@ember/object'; import { FEATURE_MACHINE_STEPS, INIT_STEPS } from 'vault/helpers/wizard-constants'; -import { htmlSafe } from '@ember/template'; export default Component.extend({ wizard: service(), @@ -84,25 +83,25 @@ export default Component.extend({ const bar = []; if (this.currentTutorialProgress) { bar.push({ - style: htmlSafe(`width:${this.currentTutorialProgress.percentage}%;`), + style: `width:${this.currentTutorialProgress.percentage}%;`, completed: false, showIcon: true, }); } else { if (this.currentFeatureProgress) { this.completedFeatures.forEach((feature) => { - bar.push({ style: htmlSafe('width:100%;'), completed: true, feature: feature, showIcon: true }); + bar.push({ style: 'width:100%;', completed: true, feature: feature, showIcon: true }); }); this.wizard.featureList.forEach((feature) => { if (feature === this.currentMachine) { bar.push({ - style: htmlSafe(`width:${this.currentFeatureProgress.percentage}%;`), + style: `width:${this.currentFeatureProgress.percentage}%;`, completed: this.currentFeatureProgress.percentage == 100 ? true : false, feature: feature, showIcon: true, }); } else { - bar.push({ style: htmlSafe('width:0%;'), completed: false, feature: feature, showIcon: true }); + bar.push({ style: 'width:0%;', completed: false, feature: feature, showIcon: true }); } }); } diff --git a/ui/app/components/wizard/features-selection.js b/ui/app/components/wizard/features-selection.js index b44f5831b4cb..9fadb6d5243f 100644 --- a/ui/app/components/wizard/features-selection.js +++ b/ui/app/components/wizard/features-selection.js @@ -3,7 +3,6 @@ import { inject as service } from '@ember/service'; import Component from '@ember/component'; import { computed } from '@ember/object'; import { FEATURE_MACHINE_TIME } from 'vault/helpers/wizard-constants'; -import { htmlSafe } from '@ember/template'; export default Component.extend({ wizard: service(), @@ -50,10 +49,10 @@ export default Component.extend({ }), selectProgress: computed('selectedFeatures', function () { let bar = this.selectedFeatures.map((feature) => { - return { style: htmlSafe('width:0%;'), completed: false, showIcon: true, feature: feature }; + return { style: 'width:0%;', completed: false, showIcon: true, feature: feature }; }); if (bar.length === 0) { - bar = [{ style: htmlSafe('width:0%;'), showIcon: false }]; + bar = [{ style: 'width:0%;', showIcon: false }]; } return bar; }), diff --git a/ui/app/controllers/vault/cluster/init.js b/ui/app/controllers/vault/cluster/init.js index 9c4439bfa6af..4707bb071d88 100644 --- a/ui/app/controllers/vault/cluster/init.js +++ b/ui/app/controllers/vault/cluster/init.js @@ -1,5 +1,4 @@ import { computed } from '@ember/object'; -import { inject as service } from '@ember/service'; import Controller from '@ember/controller'; const DEFAULTS = { @@ -12,8 +11,6 @@ const DEFAULTS = { }; export default Controller.extend(DEFAULTS, { - wizard: service(), - reset() { this.setProperties(DEFAULTS); }, @@ -22,8 +19,6 @@ export default Controller.extend(DEFAULTS, { this.set('loading', false); this.set('keyData', resp); this.model.reload(); - this.wizard.set('initEvent', 'SAVE'); - this.wizard.transitionTutorialMachine(this.wizard.currentState, 'TOSAVE'); }, initError(e) { diff --git a/ui/app/controllers/vault/cluster/policies/index.js b/ui/app/controllers/vault/cluster/policies/index.js index b75e2703a92a..e8d617f755e2 100644 --- a/ui/app/controllers/vault/cluster/policies/index.js +++ b/ui/app/controllers/vault/cluster/policies/index.js @@ -4,7 +4,6 @@ import Controller from '@ember/controller'; export default Controller.extend({ flashMessages: service(), - wizard: service(), queryParams: { page: 'page', @@ -58,9 +57,6 @@ export default Controller.extend({ // this will clear the dataset cache on the store this.send('reload'); flash.success(`${policyType.toUpperCase()} policy "${name}" was successfully deleted.`); - if (this.wizard.featureState === 'delete') { - this.wizard.transitionFeatureMachine('delete', 'CONTINUE', policyType); - } }) .catch((e) => { const errors = e.errors ? e.errors.join('') : e.message; diff --git a/ui/app/controllers/vault/cluster/policy/edit.js b/ui/app/controllers/vault/cluster/policy/edit.js index 927e70ea44ef..dd5deff9bfcc 100644 --- a/ui/app/controllers/vault/cluster/policy/edit.js +++ b/ui/app/controllers/vault/cluster/policy/edit.js @@ -5,7 +5,6 @@ import { inject as service } from '@ember/service'; export default class PolicyEditController extends Controller { @service router; @service flashMessages; - @service wizard; @action async deletePolicy() { @@ -14,9 +13,6 @@ export default class PolicyEditController extends Controller { await this.model.destroyRecord(); this.flashMessages.success(`${policyType.toUpperCase()} policy "${name}" was successfully deleted.`); this.router.transitionTo('vault.cluster.policies', policyType); - if (this.wizard.featureState === 'delete') { - this.wizard.transitionFeatureMachine('delete', 'CONTINUE', policyType); - } } catch (error) { this.model.rollbackAttributes(); const errors = error.errors ? error.errors.join('. ') : error.message; diff --git a/ui/app/controllers/vault/cluster/settings/auth/enable.js b/ui/app/controllers/vault/cluster/settings/auth/enable.js index d0e02c950797..da57dddab4b5 100644 --- a/ui/app/controllers/vault/cluster/settings/auth/enable.js +++ b/ui/app/controllers/vault/cluster/settings/auth/enable.js @@ -1,11 +1,8 @@ -import { inject as service } from '@ember/service'; import Controller from '@ember/controller'; export default Controller.extend({ - wizard: service(), actions: { onMountSuccess: function (type, path) { - this.wizard.transitionFeatureMachine(this.wizard.featureState, 'CONTINUE', type); const transition = this.transitionToRoute('vault.cluster.settings.auth.configure', path); return transition.followRedirects(); }, diff --git a/ui/app/controllers/vault/cluster/settings/configure-secret-backend.js b/ui/app/controllers/vault/cluster/settings/configure-secret-backend.js index 402eb873b26b..69829cffdb05 100644 --- a/ui/app/controllers/vault/cluster/settings/configure-secret-backend.js +++ b/ui/app/controllers/vault/cluster/settings/configure-secret-backend.js @@ -31,7 +31,6 @@ export default Controller.extend(CONFIG_ATTRS, { this.model .saveCA({ isDelete }) .then(() => { - this.set('loading', false); this.send('refreshRoute'); this.set('configured', !isDelete); if (isDelete) { @@ -43,6 +42,9 @@ export default Controller.extend(CONFIG_ATTRS, { .catch((error) => { const errorMessage = error.errors ? error.errors.join('. ') : error; this.flashMessages.danger(errorMessage); + }) + .finally(() => { + this.set('loading', false); }); } }, diff --git a/ui/app/controllers/vault/cluster/settings/mount-secret-backend.js b/ui/app/controllers/vault/cluster/settings/mount-secret-backend.js index 4b2c306c36e5..0991ac9f33af 100644 --- a/ui/app/controllers/vault/cluster/settings/mount-secret-backend.js +++ b/ui/app/controllers/vault/cluster/settings/mount-secret-backend.js @@ -7,7 +7,6 @@ import { action } from '@ember/object'; const SUPPORTED_BACKENDS = supportedSecretBackends(); export default class MountSecretBackendController extends Controller { - @service wizard; @service router; @action @@ -27,8 +26,6 @@ export default class MountSecretBackendController extends Controller { } else { transition = this.router.transitionTo('vault.cluster.secrets.backends'); } - return transition.followRedirects().then(() => { - this.wizard.transitionFeatureMachine(this.wizard.featureState, 'CONTINUE', type); - }); + return transition.followRedirects(); } } diff --git a/ui/app/controllers/vault/cluster/unseal.js b/ui/app/controllers/vault/cluster/unseal.js index 8d1e76adbdc0..ea5d4a8ce6a2 100644 --- a/ui/app/controllers/vault/cluster/unseal.js +++ b/ui/app/controllers/vault/cluster/unseal.js @@ -1,22 +1,15 @@ -import { inject as service } from '@ember/service'; import Controller from '@ember/controller'; export default Controller.extend({ - wizard: service(), showLicenseError: false, actions: { - transitionToCluster(resp) { + transitionToCluster() { return this.model.reload().then(() => { - this.wizard.transitionTutorialMachine(this.wizard.currentState, 'CONTINUE', resp); return this.transitionToRoute('vault.cluster', this.model.name); }); }, - setUnsealState(resp) { - this.wizard.set('componentState', resp); - }, - isUnsealed(data) { return data.sealed === false; }, diff --git a/ui/app/initializers/ember-data-identifiers.js b/ui/app/initializers/ember-data-identifiers.js new file mode 100644 index 000000000000..c22c2af7fe63 --- /dev/null +++ b/ui/app/initializers/ember-data-identifiers.js @@ -0,0 +1,26 @@ +import { setIdentifierGenerationMethod } from '@ember-data/store'; +import { dasherize } from '@ember/string'; +import { v4 as uuidv4 } from 'uuid'; + +export function initialize() { + // see this GH issue for more information https://github.com/emberjs/data/issues/8106 + // Ember Data uses uuidv4 library to generate ids which relies on the crypto API which is no available in unsecure contexts + // the suggested polyfill was added in 4.6.2 so until we upgrade we need to define our own id generation method + // https://api.emberjs.com/ember-data/4.5/classes/IdentifierCache/methods/getOrCreateRecordIdentifier?anchor=getOrCreateRecordIdentifier + // the uuid library was brought in to replace other usages of crypto in the app so it is safe to use in unsecure contexts + // adapted from defaultGenerationMethod -- https://github.com/emberjs/data/blob/v4.5.0/packages/store/addon/-private/identifier-cache.ts#LL82-L94C2 + setIdentifierGenerationMethod((data) => { + if (data.lid) { + return data.lid; + } + if (data.id) { + return `@lid:${dasherize(data.type)}-${data.id}`; + } + return uuidv4(); + }); +} + +export default { + name: 'ember-data-identifiers', + initialize, +}; diff --git a/ui/app/lib/local-storage.js b/ui/app/lib/local-storage.js index 86556835c7f8..5447118e3281 100644 --- a/ui/app/lib/local-storage.js +++ b/ui/app/lib/local-storage.js @@ -15,4 +15,14 @@ export default { keys() { return Object.keys(window.localStorage); }, + + cleanUpStorage(string, keyToKeep) { + if (!string) return; + const relevantKeys = this.keys().filter((str) => str.startsWith(string)); + relevantKeys?.forEach((key) => { + if (key !== keyToKeep) { + localStorage.removeItem(key); + } + }); + }, }; diff --git a/ui/app/models/mount-config.js b/ui/app/models/mount-config.js index 7ef0a9567d42..b2de642e946e 100644 --- a/ui/app/models/mount-config.js +++ b/ui/app/models/mount-config.js @@ -42,12 +42,19 @@ export default class MountConfigModel extends Model { }) passthroughRequestHeaders; + @attr({ + label: 'Allowed response headers', + helpText: 'Headers to allow, allowing a plugin to include them in the response.', + editType: 'stringArray', + }) + allowedResponseHeaders; + @attr('string', { label: 'Token Type', helpText: - "The type of token that should be generated via this role. Can be `service`, `batch`, or `default` to use the mount's default (which unless changed will be `service` tokens).", - possibleValues: ['default', 'batch', 'service'], - defaultFormValue: 'default', + 'The type of token that should be generated via this role. For `default-service` and `default-batch` service and batch tokens will be issued respectively, unless the auth method explicitly requests a different type.', + possibleValues: ['default-service', 'default-batch', 'batch', 'service'], + noDefault: true, }) tokenType; } diff --git a/ui/app/models/pki/certificate/generate.js b/ui/app/models/pki/certificate/generate.js index dafefde4057c..d508e7eb52ac 100644 --- a/ui/app/models/pki/certificate/generate.js +++ b/ui/app/models/pki/certificate/generate.js @@ -1,6 +1,5 @@ import { attr } from '@ember-data/model'; import { withFormFields } from 'vault/decorators/model-form-fields'; -import { withModelValidations } from 'vault/decorators/model-validations'; import PkiCertificateBaseModel from './base'; const generateFromRole = [ @@ -21,11 +20,6 @@ const generateFromRole = [ ], }, ]; -const validations = { - commonName: [{ type: 'presence', message: 'Common name is required.' }], -}; - -@withModelValidations(validations) @withFormFields(null, generateFromRole) export default class PkiCertificateGenerateModel extends PkiCertificateBaseModel { getHelpUrl(backend) { diff --git a/ui/app/models/role-jwt.js b/ui/app/models/role-jwt.js index 6de28f121204..d2de124f3a28 100644 --- a/ui/app/models/role-jwt.js +++ b/ui/app/models/role-jwt.js @@ -1,31 +1,30 @@ import Model, { attr } from '@ember-data/model'; -import { computed } from '@ember/object'; import parseURL from 'core/utils/parse-url'; const DOMAIN_STRINGS = { - github: 'GitHub', - gitlab: 'GitLab', - google: 'Google', - ping: 'Ping', - okta: 'Okta', - auth0: 'Auth0', + 'github.com': 'GitHub', + 'gitlab.com': 'GitLab', + 'google.com': 'Google', + 'ping.com': 'Ping', + 'okta.com': 'Okta', + 'auth0.com': 'Auth0', }; const PROVIDER_WITH_LOGO = ['GitLab', 'Google', 'Auth0']; export { DOMAIN_STRINGS, PROVIDER_WITH_LOGO }; -export default Model.extend({ - authUrl: attr('string'), +export default class RoleJwtModel extends Model { + @attr('string') authUrl; - providerName: computed('authUrl', function () { + get providerName() { const { hostname } = parseURL(this.authUrl); const firstMatch = Object.keys(DOMAIN_STRINGS).find((name) => hostname.includes(name)); return DOMAIN_STRINGS[firstMatch] || null; - }), + } - providerButtonComponent: computed('providerName', function () { + get providerButtonComponent() { const { providerName } = this; return PROVIDER_WITH_LOGO.includes(providerName) ? `auth-button-${providerName.toLowerCase()}` : null; - }), -}); + } +} diff --git a/ui/app/models/secret-engine.js b/ui/app/models/secret-engine.js index 640296c8f873..b0251c46a32b 100644 --- a/ui/app/models/secret-engine.js +++ b/ui/app/models/secret-engine.js @@ -83,7 +83,9 @@ export default SecretEngineModel.extend({ const fields = ['type', 'path', 'description', 'accessor', 'local', 'sealWrap']; // no ttl options for keymgmt const ttl = type !== 'keymgmt' ? 'defaultLeaseTtl,maxLeaseTtl,' : ''; - fields.push(`config.{${ttl}auditNonHmacRequestKeys,auditNonHmacResponseKeys,passthroughRequestHeaders}`); + fields.push( + `config.{${ttl}auditNonHmacRequestKeys,auditNonHmacResponseKeys,passthroughRequestHeaders,allowedResponseHeaders}` + ); if (type === 'kv' || type === 'generic') { fields.push('version'); } @@ -105,14 +107,14 @@ export default SecretEngineModel.extend({ optionFields = [ 'version', ...CORE_OPTIONS, - `config.{defaultLeaseTtl,maxLeaseTtl,auditNonHmacRequestKeys,auditNonHmacResponseKeys,passthroughRequestHeaders}`, + `config.{defaultLeaseTtl,maxLeaseTtl,auditNonHmacRequestKeys,auditNonHmacResponseKeys,passthroughRequestHeaders,allowedResponseHeaders}`, ]; break; case 'generic': optionFields = [ 'version', ...CORE_OPTIONS, - `config.{defaultLeaseTtl,maxLeaseTtl,auditNonHmacRequestKeys,auditNonHmacResponseKeys,passthroughRequestHeaders}`, + `config.{defaultLeaseTtl,maxLeaseTtl,auditNonHmacRequestKeys,auditNonHmacResponseKeys,passthroughRequestHeaders,allowedResponseHeaders}`, ]; break; case 'database': @@ -120,21 +122,21 @@ export default SecretEngineModel.extend({ defaultFields = ['path', 'config.{defaultLeaseTtl}', 'config.{maxLeaseTtl}']; optionFields = [ ...CORE_OPTIONS, - 'config.{auditNonHmacRequestKeys,auditNonHmacResponseKeys,passthroughRequestHeaders}', + 'config.{auditNonHmacRequestKeys,auditNonHmacResponseKeys,passthroughRequestHeaders,allowedResponseHeaders}', ]; break; case 'keymgmt': // no ttl options for keymgmt optionFields = [ ...CORE_OPTIONS, - 'config.{auditNonHmacRequestKeys,auditNonHmacResponseKeys,passthroughRequestHeaders}', + 'config.{auditNonHmacRequestKeys,auditNonHmacResponseKeys,passthroughRequestHeaders,allowedResponseHeaders}', ]; break; default: defaultFields = ['path']; optionFields = [ ...CORE_OPTIONS, - `config.{defaultLeaseTtl,maxLeaseTtl,auditNonHmacRequestKeys,auditNonHmacResponseKeys,passthroughRequestHeaders}`, + `config.{defaultLeaseTtl,maxLeaseTtl,auditNonHmacRequestKeys,auditNonHmacResponseKeys,passthroughRequestHeaders,allowedResponseHeaders}`, ]; break; } diff --git a/ui/app/routes/application.js b/ui/app/routes/application.js index 1f8c8ec09852..c5baa0a055b3 100644 --- a/ui/app/routes/application.js +++ b/ui/app/routes/application.js @@ -1,12 +1,10 @@ import { inject as service } from '@ember/service'; -import { next } from '@ember/runloop'; import Route from '@ember/routing/route'; import ControlGroupError from 'vault/lib/control-group-error'; export default Route.extend({ controlGroup: service(), routing: service('router'), - wizard: service(), namespaceService: service('namespace'), featureFlagService: service('featureFlag'), @@ -60,28 +58,6 @@ export default Route.extend({ return true; }, - didTransition() { - const wizard = this.wizard; - - if (wizard.get('currentState') !== 'active.feature') { - return true; - } - next(() => { - const applicationURL = this.routing.currentURL; - const activeRoute = this.routing.currentRouteName; - - if (this.wizard.setURLAfterTransition) { - this.set('wizard.setURLAfterTransition', false); - this.set('wizard.expectedURL', applicationURL); - this.set('wizard.expectedRouteName', activeRoute); - } - const expectedRouteName = this.wizard.expectedRouteName; - if (this.routing.isActive(expectedRouteName) === false) { - wizard.transitionTutorialMachine(wizard.get('currentState'), 'PAUSE'); - } - }); - return true; - }, }, async beforeModel() { diff --git a/ui/app/routes/vault/cluster/access/method/item.js b/ui/app/routes/vault/cluster/access/method/item.js index 9371a9a7e501..f0416d8ab119 100644 --- a/ui/app/routes/vault/cluster/access/method/item.js +++ b/ui/app/routes/vault/cluster/access/method/item.js @@ -3,7 +3,6 @@ import Route from '@ember/routing/route'; import { singularize } from 'ember-inflector'; export default Route.extend({ - wizard: service(), pathHelp: service('path-help'), beforeModel() { diff --git a/ui/app/routes/vault/cluster/access/method/item/list.js b/ui/app/routes/vault/cluster/access/method/item/list.js index d2e4315e891d..8ae1ef61359d 100644 --- a/ui/app/routes/vault/cluster/access/method/item/list.js +++ b/ui/app/routes/vault/cluster/access/method/item/list.js @@ -5,7 +5,6 @@ import ListRoute from 'vault/mixins/list-route'; export default Route.extend(ListRoute, { store: service(), - wizard: service(), pathHelp: service('path-help'), getMethodAndModelInfo() { diff --git a/ui/app/routes/vault/cluster/access/method/section.js b/ui/app/routes/vault/cluster/access/method/section.js index 904f2f5190a2..5a5a29c25ccb 100644 --- a/ui/app/routes/vault/cluster/access/method/section.js +++ b/ui/app/routes/vault/cluster/access/method/section.js @@ -1,11 +1,8 @@ import AdapterError from '@ember-data/adapter/error'; import { set } from '@ember/object'; -import { inject as service } from '@ember/service'; import Route from '@ember/routing/route'; export default Route.extend({ - wizard: service(), - model(params) { const { section_name: section } = params; if (section !== 'configuration') { @@ -13,9 +10,7 @@ export default Route.extend({ set(error, 'httpStatus', 404); throw error; } - const backend = this.modelFor('vault.cluster.access.method'); - this.wizard.transitionFeatureMachine(this.wizard.featureState, 'DETAILS', backend.type); - return backend; + return this.modelFor('vault.cluster.access.method'); }, setupController(controller) { diff --git a/ui/app/routes/vault/cluster/auth.js b/ui/app/routes/vault/cluster/auth.js index 1c24097978ff..7bf2e80f933c 100644 --- a/ui/app/routes/vault/cluster/auth.js +++ b/ui/app/routes/vault/cluster/auth.js @@ -10,7 +10,6 @@ export default ClusterRouteBase.extend({ }, flashMessages: service(), version: service(), - wizard: service(), beforeModel() { return this._super().then(() => { return this.version.fetchFeatures(); @@ -30,15 +29,4 @@ export default ClusterRouteBase.extend({ this.flashMessages.stickyInfo(config.welcomeMessage); } }, - activate() { - this.wizard.set('initEvent', 'LOGIN'); - this.wizard.transitionTutorialMachine(this.wizard.currentState, 'TOLOGIN'); - }, - actions: { - willTransition(transition) { - if (transition.targetName !== this.routeName) { - this.wizard.transitionTutorialMachine(this.wizard.currentState, 'INITDONE'); - } - }, - }, }); diff --git a/ui/app/routes/vault/cluster/init.js b/ui/app/routes/vault/cluster/init.js index 820de51eda0d..1495fc503fc3 100644 --- a/ui/app/routes/vault/cluster/init.js +++ b/ui/app/routes/vault/cluster/init.js @@ -1,12 +1,3 @@ -import { inject as service } from '@ember/service'; import ClusterRoute from './cluster-route-base'; -export default ClusterRoute.extend({ - wizard: service(), - - activate() { - // always start from idle instead of using the current state - this.wizard.transitionTutorialMachine('idle', 'INIT'); - this.wizard.set('initEvent', 'START'); - }, -}); +export default ClusterRoute.extend({}); diff --git a/ui/app/routes/vault/cluster/oidc-callback.js b/ui/app/routes/vault/cluster/oidc-callback.js index c7fa09918c81..61219c658d09 100644 --- a/ui/app/routes/vault/cluster/oidc-callback.js +++ b/ui/app/routes/vault/cluster/oidc-callback.js @@ -8,10 +8,18 @@ export default Route.extend({ afterModel() { let { auth_path: path, code, state } = this.paramsFor(this.routeName); let { namespaceQueryParam: namespace } = this.paramsFor('vault.cluster'); - // only replace namespace param from cluster if state has a namespace + // namespace from state takes precedence over the cluster's ns if (state?.includes(',ns=')) { [state, namespace] = state.split(',ns='); } + // some SSO providers do not return a url-encoded state param + // check for namespace using URLSearchParams instead of paramsFor + const queryString = decodeURIComponent(window.location.search); + const urlParams = new URLSearchParams(queryString); + const checkState = urlParams.get('state'); + if (checkState?.includes(',ns=')) { + [state, namespace] = checkState.split(',ns='); + } path = window.decodeURIComponent(path); const source = 'oidc-callback'; // required by event listener in auth-jwt component const queryParams = { source, path: path || '', code: code || '', state: state || '' }; diff --git a/ui/app/routes/vault/cluster/policies/create.js b/ui/app/routes/vault/cluster/policies/create.js index 66f7813dc32b..c4256b05260b 100644 --- a/ui/app/routes/vault/cluster/policies/create.js +++ b/ui/app/routes/vault/cluster/policies/create.js @@ -6,17 +6,9 @@ import UnsavedModelRoute from 'vault/mixins/unsaved-model-route'; export default Route.extend(UnloadModelRoute, UnsavedModelRoute, { store: service(), version: service(), - wizard: service(), model() { const policyType = this.policyType(); - if ( - policyType === 'acl' && - this.wizard.currentMachine === 'policies' && - this.wizard.featureState === 'idle' - ) { - this.wizard.transitionFeatureMachine(this.wizard.featureState, 'CONTINUE'); - } if (!this.version.hasSentinel && policyType !== 'acl') { return this.transitionTo('vault.cluster.policies', policyType); } diff --git a/ui/app/routes/vault/cluster/policies/index.js b/ui/app/routes/vault/cluster/policies/index.js index ce4cb27e988a..7650e8bc8336 100644 --- a/ui/app/routes/vault/cluster/policies/index.js +++ b/ui/app/routes/vault/cluster/policies/index.js @@ -6,13 +6,6 @@ import ListRoute from 'core/mixins/list-route'; export default Route.extend(ClusterRoute, ListRoute, { store: service(), version: service(), - wizard: service(), - - activate() { - if (this.wizard.featureState === 'details') { - this.wizard.transitionFeatureMachine('details', 'CONTINUE', this.policyType()); - } - }, shouldReturnEmptyModel(policyType, version) { return policyType !== 'acl' && (version.get('isOSS') || !version.get('hasSentinel')); diff --git a/ui/app/routes/vault/cluster/policy/edit.js b/ui/app/routes/vault/cluster/policy/edit.js index 4b22d1d2f72e..b5a186dd986a 100644 --- a/ui/app/routes/vault/cluster/policy/edit.js +++ b/ui/app/routes/vault/cluster/policy/edit.js @@ -1,13 +1,4 @@ import UnsavedModelRoute from 'vault/mixins/unsaved-model-route'; import ShowRoute from './show'; -import { inject as service } from '@ember/service'; -export default ShowRoute.extend(UnsavedModelRoute, { - wizard: service(), - - activate() { - if (this.wizard.featureState === 'details') { - this.wizard.transitionFeatureMachine('details', 'CONTINUE', this.policyType()); - } - }, -}); +export default ShowRoute.extend(UnsavedModelRoute, {}); diff --git a/ui/app/routes/vault/cluster/policy/show.js b/ui/app/routes/vault/cluster/policy/show.js index 89c27a2b2581..e6d1e9ac2b68 100644 --- a/ui/app/routes/vault/cluster/policy/show.js +++ b/ui/app/routes/vault/cluster/policy/show.js @@ -5,13 +5,6 @@ import { inject as service } from '@ember/service'; export default Route.extend(UnloadModelRoute, { store: service(), - wizard: service(), - - activate() { - if (this.wizard.featureState === 'create') { - this.wizard.transitionFeatureMachine('create', 'CONTINUE', this.policyType()); - } - }, beforeModel() { const params = this.paramsFor(this.routeName); diff --git a/ui/app/routes/vault/cluster/secrets/backend/configuration.js b/ui/app/routes/vault/cluster/secrets/backend/configuration.js index 446be4b4477c..a2f32af08834 100644 --- a/ui/app/routes/vault/cluster/secrets/backend/configuration.js +++ b/ui/app/routes/vault/cluster/secrets/backend/configuration.js @@ -2,13 +2,9 @@ import { inject as service } from '@ember/service'; import Route from '@ember/routing/route'; export default Route.extend({ - wizard: service(), store: service(), async model() { const backend = this.modelFor('vault.cluster.secrets.backend'); - if (this.wizard.featureState === 'list') { - this.wizard.transitionFeatureMachine(this.wizard.featureState, 'CONTINUE', backend.get('type')); - } if (backend.isV2KV) { const canRead = await this.store .findRecord('capabilities', `${backend.id}/config`) diff --git a/ui/app/routes/vault/cluster/secrets/backend/create-root.js b/ui/app/routes/vault/cluster/secrets/backend/create-root.js index 5041d60fcec1..ebaa2585b2fc 100644 --- a/ui/app/routes/vault/cluster/secrets/backend/create-root.js +++ b/ui/app/routes/vault/cluster/secrets/backend/create-root.js @@ -29,7 +29,6 @@ const transformModel = (queryParams) => { export default EditBase.extend({ store: service(), - wizard: service(), createModel(transition) { const { backend } = this.paramsFor('vault.cluster.secrets.backend'); @@ -44,9 +43,6 @@ export default EditBase.extend({ modelType = 'database/role'; } if (modelType !== 'secret' && modelType !== 'secret-v2') { - if (this.wizard.featureState === 'details' && this.wizard.componentState === 'transit') { - this.wizard.transitionFeatureMachine('details', 'CONTINUE', 'transit'); - } return this.store.createRecord(modelType); } // create record in capabilities that checks for access to create metadata @@ -59,10 +55,6 @@ export default EditBase.extend({ }, model(params, transition) { - // wizard will pause unless we manually continue it -- verify that keymgmt tutorial is in progress - if (params.itemType === 'provider' && this.wizard.nextStep === 'provider') { - this.wizard.transitionFeatureMachine(this.wizard.featureState, 'CONTINUE', 'keymgmt'); - } return hash({ secret: this.createModel(transition), capabilities: {}, diff --git a/ui/app/routes/vault/cluster/tools/tool.js b/ui/app/routes/vault/cluster/tools/tool.js index c5030f822e9a..0c5ecfde7af6 100644 --- a/ui/app/routes/vault/cluster/tools/tool.js +++ b/ui/app/routes/vault/cluster/tools/tool.js @@ -1,10 +1,7 @@ -import { inject as service } from '@ember/service'; import Route from '@ember/routing/route'; import { toolsActions } from 'vault/helpers/tools-actions'; export default Route.extend({ - wizard: service(), - beforeModel(transition) { const supportedActions = toolsActions(); const { selected_action: selectedAction } = this.paramsFor(this.routeName); @@ -26,9 +23,6 @@ export default Route.extend({ actions: { didTransition() { const params = this.paramsFor(this.routeName); - if (this.wizard.currentMachine === 'tools') { - this.wizard.transitionFeatureMachine(this.wizard.featureState, params.selected_action.toUpperCase()); - } /* eslint-disable-next-line ember/no-controller-access-in-routes */ this.controller.setProperties(params); return true; diff --git a/ui/app/routes/vault/cluster/unseal.js b/ui/app/routes/vault/cluster/unseal.js index 917588e09426..1495fc503fc3 100644 --- a/ui/app/routes/vault/cluster/unseal.js +++ b/ui/app/routes/vault/cluster/unseal.js @@ -1,11 +1,3 @@ -import { inject as service } from '@ember/service'; import ClusterRoute from './cluster-route-base'; -export default ClusterRoute.extend({ - wizard: service(), - - activate() { - this.wizard.set('initEvent', 'UNSEAL'); - this.wizard.transitionTutorialMachine(this.wizard.currentState, 'TOUNSEAL'); - }, -}); +export default ClusterRoute.extend({}); diff --git a/ui/app/serializers/kubernetes/config.js b/ui/app/serializers/kubernetes/config.js index fccf63dce6ff..3de98f8a2f61 100644 --- a/ui/app/serializers/kubernetes/config.js +++ b/ui/app/serializers/kubernetes/config.js @@ -7,6 +7,12 @@ export default class KubernetesConfigSerializer extends ApplicationSerializer { const json = super.serialize(...arguments); // remove backend value from payload delete json.backend; + // ensure that values from a previous manual configuration are unset + if (json.disable_local_ca_jwt === false) { + json.kubernetes_ca_cert = null; + json.kubernetes_host = null; + json.service_account_jwt = null; + } return json; } } diff --git a/ui/app/serializers/namespace.js b/ui/app/serializers/namespace.js index 9b8172fb4d20..22034e4148ad 100644 --- a/ui/app/serializers/namespace.js +++ b/ui/app/serializers/namespace.js @@ -1,6 +1,10 @@ import ApplicationSerializer from './application'; -export default ApplicationSerializer.extend({ +export default class NamespaceSerializer extends ApplicationSerializer { + attrs = { + path: { serialize: false }, + }; + normalizeList(payload) { const data = payload.data.keys ? payload.data.keys.map((key) => ({ @@ -11,7 +15,7 @@ export default ApplicationSerializer.extend({ : payload.data; return data; - }, + } normalizeResponse(store, primaryModelClass, payload, id, requestType) { const nullResponses = ['deleteRecord', 'createRecord']; @@ -19,6 +23,6 @@ export default ApplicationSerializer.extend({ const normalizedPayload = nullResponses.includes(requestType) ? { id: cid, path: cid } : this.normalizeList(payload); - return this._super(store, primaryModelClass, normalizedPayload, id, requestType); - }, -}); + return super.normalizeResponse(store, primaryModelClass, normalizedPayload, id, requestType); + } +} diff --git a/ui/app/templates/components/auth-form.hbs b/ui/app/templates/components/auth-form.hbs index b17c19b010c6..2753b1d57ad2 100644 --- a/ui/app/templates/components/auth-form.hbs +++ b/ui/app/templates/components/auth-form.hbs @@ -19,7 +19,7 @@ "is-active" "" }} - data-test-auth-method + data-test-auth-method={{method.id}} > {{/let}} {{/each}} - {{#if this.hasMethodsWithPath}} -
  • - - Other - -
  • - {{/if}} +
  • + + Other + +
  • {{/if}} diff --git a/ui/app/templates/components/auth-info.hbs b/ui/app/templates/components/auth-info.hbs index bed119be924a..29cca1d20d33 100644 --- a/ui/app/templates/components/auth-info.hbs +++ b/ui/app/templates/components/auth-info.hbs @@ -23,11 +23,6 @@
    {{/if}} -
  • - -
  • Delete {{else}} -
    +
    @@ -58,13 +58,13 @@
    -