diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile index 1ceac6e24..f0af86779 100644 --- a/.ci/Jenkinsfile +++ b/.ci/Jenkinsfile @@ -22,7 +22,7 @@ pipeline { quietPeriod(10) } triggers { - issueCommentTrigger('(?i)(.*(?:jenkins\\W+)?run\\W+(?:the\\W+)?tests(?:\\W+please)?.*|^\\/test$)') + issueCommentTrigger("${obltGitHubComments()}") } stages { /** @@ -38,8 +38,8 @@ pipeline { stage('Check') { steps { cleanup() - withGoEnv(){ - dir("${BASE_DIR}"){ + dir("${BASE_DIR}"){ + withGoEnv(){ sh(label: 'check',script: 'make check') } } @@ -48,20 +48,39 @@ pipeline { stage('Local') { steps { cleanup() - withGoEnv(){ - dir("${BASE_DIR}"){ + dir("${BASE_DIR}"){ + withGoEnv(){ sh(label: 'local',script: 'make local') } } } } - stage('Test') { + stage('Unit Test') { + options { skipDefaultCheckout() } steps { cleanup() - withGoEnv(){ - dir("${BASE_DIR}"){ + dir("${BASE_DIR}"){ + withGoEnv(){ + sh(label: 'test', script: 'make test-unit') + sh(label: 'test', script: 'make junit-report') + } + } + } + post { + always { + junit(allowEmptyResults: true, keepLongStdio: true, testResults: "${BASE_DIR}/build/*.xml") + } + } + } + stage('Integration Test') { + options { skipDefaultCheckout() } + steps { + cleanup() + dir("${BASE_DIR}"){ + withGoEnv(){ retryWithSleep(retries: 2, seconds: 5, backoff: true){ sh(label: "Install Docker", script: '.ci/scripts/install-docker-compose.sh') } - sh(label: 'test', script: 'make test') + sh(label: 'test', script: 'make test-int') + sh(label: 'test', script: 'make junit-report') } } } @@ -84,4 +103,4 @@ def cleanup(){ deleteDir() } unstash 'source' -} \ No newline at end of file +} diff --git a/.ci/bump-go-release-version.sh b/.ci/bump-go-release-version.sh new file mode 100755 index 000000000..f97e8b764 --- /dev/null +++ b/.ci/bump-go-release-version.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash +# +# Given the Golang release version this script will bump the version. +# +# This script is executed by the automation we are putting in place +# and it requires the git add/commit commands. +# +# Parameters: +# $1 -> the Golang release version to be bumped. Mandatory. +# +set -euo pipefail +MSG="parameter missing." +GO_RELEASE_VERSION=${1:?$MSG} + +echo "Update go version ${GO_RELEASE_VERSION}" +echo "${GO_RELEASE_VERSION}" > .go-version + +git add .go-version +git diff --staged --quiet || git commit -m "[Automation] Update go release version to ${GO_RELEASE_VERSION}" +git --no-pager log -1 + +echo "You can now push and create a Pull Request" diff --git a/.ci/bump-stack-version.sh b/.ci/bump-stack-version.sh new file mode 100755 index 000000000..7caaefdf6 --- /dev/null +++ b/.ci/bump-stack-version.sh @@ -0,0 +1,41 @@ +#!/usr/bin/env bash +# +# Given the stack version this script will bump the version. +# +# This script is executed by the automation we are putting in place +# and it requires the git add/commit commands. +# +# Parameters: +# $1 -> the version to be bumped. Mandatory. +# $2 -> whether to create a branch where to commit the changes to. +# this is required when reusing an existing Pull Request. +# Optional. Default true. +# +set -euo pipefail +MSG="parameter missing." +VERSION=${1:?$MSG} +CREATE_BRANCH=${2:-true} + +OS=$(uname -s| tr '[:upper:]' '[:lower:]') + +if [ "${OS}" == "darwin" ] ; then + SED="sed -i .bck" +else + SED="sed -i" +fi + +echo "Update stack with version ${VERSION}" +${SED} -E -e "s#(ELASTICSEARCH_VERSION)=[0-9]+\.[0-9]+\.[0-9]+(-[a-f0-9]{8})?#\1=${VERSION}#g" dev-tools/integration/.env + +echo "Commit changes" +if [ "$CREATE_BRANCH" = "true" ]; then + base=$(git rev-parse --abbrev-ref HEAD | sed 's#/#-#g') + git checkout -b "update-stack-version-$(date "+%Y%m%d%H%M%S")-${base}" +else + echo "Branch creation disabled." +fi +git add dev-tools/integration/.env +git diff --staged --quiet || git commit -m "[Automation] Update elastic stack version to ${VERSION} for testing" +git --no-pager log -1 + +echo "You can now push and create a Pull Request" diff --git a/.ci/jobs/fleet-server.yml b/.ci/jobs/fleet-server.yml index 1379e8670..4ae16dd18 100644 --- a/.ci/jobs/fleet-server.yml +++ b/.ci/jobs/fleet-server.yml @@ -13,6 +13,7 @@ discover-pr-forks-trust: permission discover-pr-origin: merge-current discover-tags: true + head-filter-regex: '^(?!update-.*-version).*$' notification-context: 'fleet-server' repo: fleet-server repo-owner: elastic diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index ef4ecda88..934167b94 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -8,45 +8,33 @@ Please label this PR with one of the following labels, depending on the scope of - Docs --> -## What does this PR do? +## What is the problem this PR solves? - +// Please do not just reference an issue. Explain WHAT the problem this PR solves here. -## Why is it important? +## How does this PR solve the problem? - ## Checklist -- [ ] My code follows the style guidelines of this project - [ ] I have commented my code, particularly in hard-to-understand areas - [ ] I have made corresponding changes to the documentation - [ ] I have made corresponding change to the default configuration files - [ ] I have added tests that prove my fix is effective or that my feature works - [ ] I have added an entry in `CHANGELOG.next.asciidoc` or `CHANGELOG-developer.next.asciidoc`. -## Author's Checklist - - -- [ ] - -## How to test this PR locally - - ## Related issues @@ -57,25 +45,4 @@ Link related issues below. Insert the issue link or reference after the word "Cl - Relates #123 - Requires #123 - Superseds #123 ---> -- - -## Use cases - - - -## Screenshots - - - -## Logs - - +--> \ No newline at end of file diff --git a/.gitignore b/.gitignore index 5efb2dd97..c159582f8 100644 --- a/.gitignore +++ b/.gitignore @@ -4,9 +4,12 @@ .vscode/ bin/ -*.rpm -build/ +/build/ fleet-server fleet_server fleet-server.dev.yml +*.log +*.log.* + +dev-tools/integration/.env.bck \ No newline at end of file diff --git a/.go-version b/.go-version index 4ed70fac1..06fb41b63 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.14.12 +1.17.2 diff --git a/.mergify.yml b/.mergify.yml new file mode 100644 index 000000000..7df61f975 --- /dev/null +++ b/.mergify.yml @@ -0,0 +1,85 @@ +pull_request_rules: + - name: ask to resolve conflict + conditions: + - conflict + actions: + comment: + message: | + This pull request is now in conflicts. Could you fix it @{{author}}? 🙏 + To fixup this pull request, you can check out it locally. See documentation: https://help.github.com/articles/checking-out-pull-requests-locally/ + ``` + git fetch upstream + git checkout -b {{head}} upstream/{{head}} + git merge upstream/{{base}} + git push upstream {{head}} + ``` + - name: backport patches to 7.x branch + conditions: + - merged + - base=master + - label=v7.14.0 + actions: + backport: + assignees: + - "{{ author }}" + branches: + - "7.x" + labels: + - "backport" + - name: backport patches to 7.13 branch + conditions: + - merged + - base=master + - label=v7.13.0 + actions: + backport: + assignees: + - "{{ author }}" + branches: + - "7.13" + labels: + - "backport" + - name: backport patches to 7.12 branch + conditions: + - merged + - base=master + - label=v7.12.0 + actions: + backport: + assignees: + - "{{ author }}" + branches: + - "7.12" + labels: + - "backport" + - name: automatic merge for 7\. branches when CI passes + conditions: + - check-success=fleet-server/pr-merge + - check-success=CLA + - base~=^7\. + - label=backport + - author=mergify[bot] + actions: + merge: + method: squash + strict: smart+fasttrack + - name: automatic merge when CI passes and the file dev-tools/integration/.env is modified. + conditions: + - check-success=fleet-server/pr-merge + - label=automation + - files~=^dev-tools/integration/.env$ + actions: + merge: + method: squash + strict: smart+fasttrack + - name: delete upstream branch with changes on dev-tools/integration/.env or .go-version after merging/closing it + conditions: + - or: + - merged + - closed + - and: + - label=automation + - head~=^update-.*-version + - files~=^(dev-tools/integration/.env|.go-version)$ + actions: + delete_head_branch: diff --git a/Makefile b/Makefile index 525c51788..d5b45db87 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,13 @@ DEFAULT_VERSION=$(shell awk '/const defaultVersion/{print $$NF}' main.go | tr -d TARGET_ARCH_386=x86 TARGET_ARCH_amd64=x86_64 TARGET_ARCH_arm64=arm64 -PLATFORMS ?= darwin/amd64 linux/386 linux/amd64 linux/arm64 windows/386 windows/amd64 +PLATFORMS ?= darwin/amd64 darwin/arm64 linux/386 linux/amd64 linux/arm64 windows/386 windows/amd64 +BUILDMODE_linux_amd64=-buildmode=pie +BUILDMODE_linux_arm64=-buildmode=pie +BUILDMODE_windows_386=-buildmode=pie +BUILDMODE_windows_amd64=-buildmode=pie +BUILDMODE_darwin_amd64=-buildmode=pie +BUILDMODE_darwin_arm64=-buildmode=pie ifeq ($(SNAPSHOT),true) VERSION=${DEFAULT_VERSION}-SNAPSHOT @@ -12,10 +18,15 @@ VERSION=${DEFAULT_VERSION} endif PLATFORM_TARGETS=$(addprefix release-, $(PLATFORMS)) -LDFLAGS=-w -s -X main.Version=${VERSION} +COMMIT=$(shell git rev-parse --short HEAD) +NOW=$(shell date -u '+%Y-%m-%dT%H:%M:%SZ') +LDFLAGS=-w -s -X main.Version=${VERSION} -X main.Commit=${COMMIT} -X main.BuildTime=$(NOW) CMD_COLOR_ON=\033[32m\xE2\x9c\x93 CMD_COLOR_OFF=\033[0m +# Directory to dump build tools into +GOBIN=$(shell go env GOPATH)/bin/ + .PHONY: help help: ## - Show help message @printf "${CMD_COLOR_ON} usage: make [target]\n\n${CMD_COLOR_OFF}" @@ -35,13 +46,14 @@ clean: ## - Clean up build artifacts .PHONY: generate generate: ## - Generate schema models @printf "${CMD_COLOR_ON} Installing module for go generate\n${CMD_COLOR_OFF}" - go install github.com/aleksmaus/generate/... + env GOBIN=${GOBIN} go install github.com/aleksmaus/generate/cmd/schema-generate@latest @printf "${CMD_COLOR_ON} Running go generate\n${CMD_COLOR_OFF}" - go generate ./... + env PATH="${GOBIN}:${PATH}" go generate ./... .PHONY: check check: ## - Run all checks @$(MAKE) generate + @$(MAKE) defaults @$(MAKE) check-headers @$(MAKE) check-go @$(MAKE) notice @@ -49,8 +61,8 @@ check: ## - Run all checks .PHONY: check-headers check-headers: ## - Check copyright headers - @go install github.com/elastic/go-licenser - @go-licenser -license Elastic + @env GOBIN=${GOBIN} go install github.com/elastic/go-licenser@latest + @env PATH="${GOBIN}:${PATH}" go-licenser -license Elastic .PHONY: check-go check-go: ## - Run go fmt, go vet, go mod tidy @@ -62,8 +74,9 @@ check-go: ## - Run go fmt, go vet, go mod tidy notice: ## - Generates the NOTICE.txt file. @echo "Generating NOTICE.txt" @go mod tidy - @go mod download - go list -m -json all | go run go.elastic.co/go-licence-detector \ + @go mod download all + @env GOBIN=${GOBIN} go install go.elastic.co/go-licence-detector@latest + go list -m -json all | env PATH="${GOBIN}:${PATH}" go-licence-detector \ -includeIndirect \ -rules dev-tools/notice/rules.json \ -overrides dev-tools/notice/overrides.json \ @@ -71,6 +84,11 @@ notice: ## - Generates the NOTICE.txt file. -noticeOut NOTICE.txt \ -depsOut "" +.PHONY: defaults +defaults: ## -Generate defaults based on limits files. + @echo "Generating env_defaults.go" + @go run dev-tools/buildlimits/buildlimits.go --in "internal/pkg/config/defaults/*.yml" --out internal/pkg/config/env_defaults.go + .PHONY: check-no-changes check-no-changes: @git diff | cat @@ -79,11 +97,11 @@ check-no-changes: .PHONY: test test: prepare-test-context ## - Run all tests - @$(MAKE) test-unit - @$(MAKE) test-int + @./dev-tools/run_with_go_ver $(MAKE) test-unit + @./dev-tools/run_with_go_ver $(MAKE) test-int @$(MAKE) junit-report -.PHONY: test-unit +.PHONY: test-unit test-unit: prepare-test-context ## - Run unit tests only set -o pipefail; go test -v -race ./... | tee build/test-unit.out @@ -108,12 +126,14 @@ $(PLATFORM_TARGETS): release-%: $(eval $@_OS := $(firstword $(subst /, ,$(lastword $(subst release-, ,$@))))) $(eval $@_GO_ARCH := $(lastword $(subst /, ,$(lastword $(subst release-, ,$@))))) $(eval $@_ARCH := $(TARGET_ARCH_$($@_GO_ARCH))) - GOOS=$($@_OS) GOARCH=$($@_GO_ARCH) go build -ldflags="${LDFLAGS}" -o build/binaries/fleet-server-$(VERSION)-$($@_OS)-$($@_ARCH)/fleet-server . + $(eval $@_BUILDMODE:= $(BUILDMODE_$($@_OS)_$($@_GO_ARCH))) + GOOS=$($@_OS) GOARCH=$($@_GO_ARCH) go build -ldflags="${LDFLAGS}" $($@_BUILDMODE) -o build/binaries/fleet-server-$(VERSION)-$($@_OS)-$($@_ARCH)/fleet-server . @$(MAKE) OS=$($@_OS) ARCH=$($@_ARCH) package-target .PHONY: package-target package-target: build/distributions ifeq ($(OS),windows) + @mv build/binaries/fleet-server-$(VERSION)-$(OS)-$(ARCH)/fleet-server build/binaries/fleet-server-$(VERSION)-$(OS)-$(ARCH)/fleet-server.exe @cd build/binaries && zip -q -r ../distributions/fleet-server-$(VERSION)-$(OS)-$(ARCH).zip fleet-server-$(VERSION)-$(OS)-$(ARCH) @cd build/distributions && shasum -a 512 fleet-server-$(VERSION)-$(OS)-$(ARCH).zip > fleet-server-$(VERSION)-$(OS)-$(ARCH).zip.sha512 else @@ -165,7 +185,7 @@ int-docker-stop: ## - Stop docker environment for integration tests .PHONY: test-int test-int: prepare-test-context ## - Run integration tests with full setup (slow!) @$(MAKE) int-docker-start - @set -o pipefail; $(MAKE) test-int-set | tee build/test-init.out + @set -o pipefail; $(MAKE) test-int-set | tee build/test-int.out @$(MAKE) int-docker-stop # Run integration tests without starting/stopping docker @@ -176,5 +196,4 @@ test-int: prepare-test-context ## - Run integration tests with full setup (slow .PHONY: test-int-set test-int-set: ## - Run integration tests without setup # Initialize indices one before running all the tests - ELASTICSEARCH_HOSTS=${TEST_ELASTICSEARCH_HOSTS} go run ./dev-tools/integration/main.go ELASTICSEARCH_HOSTS=${TEST_ELASTICSEARCH_HOSTS} go test -v -tags=integration -count=1 -race ./... diff --git a/NOTICE.txt b/NOTICE.txt index 334b5810d..a5ecbf2aa 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -10,43 +10,42 @@ Third party libraries used by the Elastic Beats project: -------------------------------------------------------------------------------- -Dependency : github.com/aleksmaus/generate -Version: v0.0.0-20201213151810-c5bc68a6a42f +Dependency : github.com/Pallinder/go-randomdata +Version: v1.2.0 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aleksmaus/generate@v0.0.0-20201213151810-c5bc68a6a42f/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/!pallinder/go-randomdata@v1.2.0/LICENSE: -MIT License +The MIT License (MIT) -Copyright (c) 2017 Adrian Hesketh +Copyright (c) 2013 David Pallinder -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/dgraph-io/ristretto -Version: v0.0.3 +Version: v0.1.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/dgraph-io/ristretto@v0.0.3/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/dgraph-io/ristretto@v0.1.0/LICENSE: Apache License Version 2.0, January 2004 @@ -228,11 +227,11 @@ Contents of probable licence file $GOMODCACHE/github.com/dgraph-io/ristretto@v0. -------------------------------------------------------------------------------- Dependency : github.com/elastic/beats/v7 -Version: v7.10.0 +Version: v7.11.1 Licence type (autodetected): Elastic -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/beats/v7@v7.10.0/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/elastic/beats/v7@v7.11.1/LICENSE.txt: Source code in this repository is variously licensed under the Apache License Version 2.0, an Apache compatible license, or the Elastic License. Outside of @@ -250,12 +249,245 @@ License Version 2.0. -------------------------------------------------------------------------------- -Dependency : github.com/elastic/go-elasticsearch/v8 -Version: v8.0.0-20200728144331-527225d8e836 +Dependency : github.com/elastic/elastic-agent-client/v7 +Version: v7.0.0-20210727140539-f0905d9377f6 +Licence type (autodetected): Elastic +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-client/v7@v7.0.0-20210727140539-f0905d9377f6/LICENSE.txt: + +ELASTIC LICENSE AGREEMENT + +PLEASE READ CAREFULLY THIS ELASTIC LICENSE AGREEMENT (THIS "AGREEMENT"), WHICH +CONSTITUTES A LEGALLY BINDING AGREEMENT AND GOVERNS ALL OF YOUR USE OF ALL OF +THE ELASTIC SOFTWARE WITH WHICH THIS AGREEMENT IS INCLUDED ("ELASTIC SOFTWARE") +THAT IS PROVIDED IN OBJECT CODE FORMAT, AND, IN ACCORDANCE WITH SECTION 2 BELOW, +CERTAIN OF THE ELASTIC SOFTWARE THAT IS PROVIDED IN SOURCE CODE FORMAT. BY +INSTALLING OR USING ANY OF THE ELASTIC SOFTWARE GOVERNED BY THIS AGREEMENT, YOU +ARE ASSENTING TO THE TERMS AND CONDITIONS OF THIS AGREEMENT. IF YOU DO NOT AGREE +WITH SUCH TERMS AND CONDITIONS, YOU MAY NOT INSTALL OR USE THE ELASTIC SOFTWARE +GOVERNED BY THIS AGREEMENT. IF YOU ARE INSTALLING OR USING THE SOFTWARE ON +BEHALF OF A LEGAL ENTITY, YOU REPRESENT AND WARRANT THAT YOU HAVE THE ACTUAL +AUTHORITY TO AGREE TO THE TERMS AND CONDITIONS OF THIS AGREEMENT ON BEHALF OF +SUCH ENTITY. + +Posted Date: April 20, 2018 + +This Agreement is entered into by and between Elasticsearch BV ("Elastic") and +You, or the legal entity on behalf of whom You are acting (as applicable, +"You"). + +1. OBJECT CODE END USER LICENSES, RESTRICTIONS AND THIRD PARTY OPEN SOURCE +SOFTWARE + + 1.1 Object Code End User License. Subject to the terms and conditions of + Section 1.2 of this Agreement, Elastic hereby grants to You, AT NO CHARGE and + for so long as you are not in breach of any provision of this Agreement, a + License to the Basic Features and Functions of the Elastic Software. + + 1.2 Reservation of Rights; Restrictions. As between Elastic and You, Elastic + and its licensors own all right, title and interest in and to the Elastic + Software, and except as expressly set forth in Sections 1.1, and 2.1 of this + Agreement, no other license to the Elastic Software is granted to You under + this Agreement, by implication, estoppel or otherwise. You agree not to: (i) + reverse engineer or decompile, decrypt, disassemble or otherwise reduce any + Elastic Software provided to You in Object Code, or any portion thereof, to + Source Code, except and only to the extent any such restriction is prohibited + by applicable law, (ii) except as expressly permitted in this Agreement, + prepare derivative works from, modify, copy or use the Elastic Software Object + Code or the Commercial Software Source Code in any manner; (iii) except as + expressly permitted in Section 1.1 above, transfer, sell, rent, lease, + distribute, sublicense, loan or otherwise transfer, Elastic Software Object + Code, in whole or in part, to any third party; (iv) use Elastic Software + Object Code for providing time-sharing services, any software-as-a-service, + service bureau services or as part of an application services provider or + other service offering (collectively, "SaaS Offering") where obtaining access + to the Elastic Software or the features and functions of the Elastic Software + is a primary reason or substantial motivation for users of the SaaS Offering + to access and/or use the SaaS Offering ("Prohibited SaaS Offering"); (v) + circumvent the limitations on use of Elastic Software provided to You in + Object Code format that are imposed or preserved by any License Key, or (vi) + alter or remove any Marks and Notices in the Elastic Software. If You have any + question as to whether a specific SaaS Offering constitutes a Prohibited SaaS + Offering, or are interested in obtaining Elastic's permission to engage in + commercial or non-commercial distribution of the Elastic Software, please + contact elastic_license@elastic.co. + + 1.3 Third Party Open Source Software. The Commercial Software may contain or + be provided with third party open source libraries, components, utilities and + other open source software (collectively, "Open Source Software"), which Open + Source Software may have applicable license terms as identified on a website + designated by Elastic. Notwithstanding anything to the contrary herein, use of + the Open Source Software shall be subject to the license terms and conditions + applicable to such Open Source Software, to the extent required by the + applicable licensor (which terms shall not restrict the license rights granted + to You hereunder, but may contain additional rights). To the extent any + condition of this Agreement conflicts with any license to the Open Source + Software, the Open Source Software license will govern with respect to such + Open Source Software only. Elastic may also separately provide you with + certain open source software that is licensed by Elastic. Your use of such + Elastic open source software will not be governed by this Agreement, but by + the applicable open source license terms. + +2. COMMERCIAL SOFTWARE SOURCE CODE + + 2.1 Limited License. Subject to the terms and conditions of Section 2.2 of + this Agreement, Elastic hereby grants to You, AT NO CHARGE and for so long as + you are not in breach of any provision of this Agreement, a limited, + non-exclusive, non-transferable, fully paid up royalty free right and license + to the Commercial Software in Source Code format, without the right to grant + or authorize sublicenses, to prepare Derivative Works of the Commercial + Software, provided You (i) do not hack the licensing mechanism, or otherwise + circumvent the intended limitations on the use of Elastic Software to enable + features other than Basic Features and Functions or those features You are + entitled to as part of a Subscription, and (ii) use the resulting object code + only for reasonable testing purposes. + + 2.2 Restrictions. Nothing in Section 2.1 grants You the right to (i) use the + Commercial Software Source Code other than in accordance with Section 2.1 + above, (ii) use a Derivative Work of the Commercial Software outside of a + Non-production Environment, in any production capacity, on a temporary or + permanent basis, or (iii) transfer, sell, rent, lease, distribute, sublicense, + loan or otherwise make available the Commercial Software Source Code, in whole + or in part, to any third party. Notwithstanding the foregoing, You may + maintain a copy of the repository in which the Source Code of the Commercial + Software resides and that copy may be publicly accessible, provided that you + include this Agreement with Your copy of the repository. + +3. TERMINATION + + 3.1 Termination. This Agreement will automatically terminate, whether or not + You receive notice of such Termination from Elastic, if You breach any of its + provisions. + + 3.2 Post Termination. Upon any termination of this Agreement, for any reason, + You shall promptly cease the use of the Elastic Software in Object Code format + and cease use of the Commercial Software in Source Code format. For the + avoidance of doubt, termination of this Agreement will not affect Your right + to use Elastic Software, in either Object Code or Source Code formats, made + available under the Apache License Version 2.0. + + 3.3 Survival. Sections 1.2, 2.2. 3.3, 4 and 5 shall survive any termination or + expiration of this Agreement. + +4. DISCLAIMER OF WARRANTIES AND LIMITATION OF LIABILITY + + 4.1 Disclaimer of Warranties. TO THE MAXIMUM EXTENT PERMITTED UNDER APPLICABLE + LAW, THE ELASTIC SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, + AND ELASTIC AND ITS LICENSORS MAKE NO WARRANTIES WHETHER EXPRESSED, IMPLIED OR + STATUTORY REGARDING OR RELATING TO THE ELASTIC SOFTWARE. TO THE MAXIMUM EXTENT + PERMITTED UNDER APPLICABLE LAW, ELASTIC AND ITS LICENSORS SPECIFICALLY + DISCLAIM ALL IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE AND NON-INFRINGEMENT WITH RESPECT TO THE ELASTIC SOFTWARE, AND WITH + RESPECT TO THE USE OF THE FOREGOING. FURTHER, ELASTIC DOES NOT WARRANT RESULTS + OF USE OR THAT THE ELASTIC SOFTWARE WILL BE ERROR FREE OR THAT THE USE OF THE + ELASTIC SOFTWARE WILL BE UNINTERRUPTED. + + 4.2 Limitation of Liability. IN NO EVENT SHALL ELASTIC OR ITS LICENSORS BE + LIABLE TO YOU OR ANY THIRD PARTY FOR ANY DIRECT OR INDIRECT DAMAGES, + INCLUDING, WITHOUT LIMITATION, FOR ANY LOSS OF PROFITS, LOSS OF USE, BUSINESS + INTERRUPTION, LOSS OF DATA, COST OF SUBSTITUTE GOODS OR SERVICES, OR FOR ANY + SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, IN CONNECTION WITH + OR ARISING OUT OF THE USE OR INABILITY TO USE THE ELASTIC SOFTWARE, OR THE + PERFORMANCE OF OR FAILURE TO PERFORM THIS AGREEMENT, WHETHER ALLEGED AS A + BREACH OF CONTRACT OR TORTIOUS CONDUCT, INCLUDING NEGLIGENCE, EVEN IF ELASTIC + HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +5. MISCELLANEOUS + + This Agreement completely and exclusively states the entire agreement of the + parties regarding the subject matter herein, and it supersedes, and its terms + govern, all prior proposals, agreements, or other communications between the + parties, oral or written, regarding such subject matter. This Agreement may be + modified by Elastic from time to time, and any such modifications will be + effective upon the "Posted Date" set forth at the top of the modified + Agreement. If any provision hereof is held unenforceable, this Agreement will + continue without said provision and be interpreted to reflect the original + intent of the parties. This Agreement and any non-contractual obligation + arising out of or in connection with it, is governed exclusively by Dutch law. + This Agreement shall not be governed by the 1980 UN Convention on Contracts + for the International Sale of Goods. All disputes arising out of or in + connection with this Agreement, including its existence and validity, shall be + resolved by the courts with jurisdiction in Amsterdam, The Netherlands, except + where mandatory law provides for the courts at another location in The + Netherlands to have jurisdiction. The parties hereby irrevocably waive any and + all claims and defenses either might otherwise have in any such action or + proceeding in any of such courts based upon any alleged lack of personal + jurisdiction, improper venue, forum non conveniens or any similar claim or + defense. A breach or threatened breach, by You of Section 2 may cause + irreparable harm for which damages at law may not provide adequate relief, and + therefore Elastic shall be entitled to seek injunctive relief without being + required to post a bond. You may not assign this Agreement (including by + operation of law in connection with a merger or acquisition), in whole or in + part to any third party without the prior written consent of Elastic, which + may be withheld or granted by Elastic in its sole and absolute discretion. + Any assignment in violation of the preceding sentence is void. Notices to + Elastic may also be sent to legal@elastic.co. + +6. DEFINITIONS + + The following terms have the meanings ascribed: + + 6.1 "Affiliate" means, with respect to a party, any entity that controls, is + controlled by, or which is under common control with, such party, where + "control" means ownership of at least fifty percent (50%) of the outstanding + voting shares of the entity, or the contractual right to establish policy for, + and manage the operations of, the entity. + + 6.2 "Basic Features and Functions" means those features and functions of the + Elastic Software that are eligible for use under a Basic license, as set forth + at https://www.elastic.co/subscriptions, as may be modified by Elastic from + time to time. + + 6.3 "Commercial Software" means the Elastic Software Source Code in any file + containing a header stating the contents are subject to the Elastic License or + which is contained in the repository folder labeled "x-pack", unless a LICENSE + file present in the directory subtree declares a different license. + + 6.4 "Derivative Work of the Commercial Software" means, for purposes of this + Agreement, any modification(s) or enhancement(s) to the Commercial Software, + which represent, as a whole, an original work of authorship. + + 6.5 "License" means a limited, non-exclusive, non-transferable, fully paid up, + royalty free, right and license, without the right to grant or authorize + sublicenses, solely for Your internal business operations to (i) install and + use the applicable Features and Functions of the Elastic Software in Object + Code, and (ii) permit Contractors and Your Affiliates to use the Elastic + software as set forth in (i) above, provided that such use by Contractors must + be solely for Your benefit and/or the benefit of Your Affiliates, and You + shall be responsible for all acts and omissions of such Contractors and + Affiliates in connection with their use of the Elastic software that are + contrary to the terms and conditions of this Agreement. + + 6.6 "License Key" means a sequence of bytes, including but not limited to a + JSON blob, that is used to enable certain features and functions of the + Elastic Software. + + 6.7 "Marks and Notices" means all Elastic trademarks, trade names, logos and + notices present on the Documentation as originally provided by Elastic. + + 6.8 "Non-production Environment" means an environment for development, testing + or quality assurance, where software is not used for production purposes. + + 6.9 "Object Code" means any form resulting from mechanical transformation or + translation of Source Code form, including but not limited to compiled object + code, generated documentation, and conversions to other media types. + + 6.10 "Source Code" means the preferred form of computer software for making + modifications, including but not limited to software source code, + documentation source, and configuration files. + + 6.11 "Subscription" means the right to receive Support Services and a License + to the Commercial Software. + + +-------------------------------------------------------------------------------- +Dependency : github.com/elastic/go-elasticsearch/v7 +Version: v7.5.1-0.20210823155509-845c8efe54a7 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/go-elasticsearch/v8@v8.0.0-20200728144331-527225d8e836/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-elasticsearch/v7@v7.5.1-0.20210823155509-845c8efe54a7/LICENSE: Apache License Version 2.0, January 2004 @@ -1111,6 +1343,370 @@ Exhibit B - "Incompatible With Secondary Licenses" Notice +-------------------------------------------------------------------------------- +Dependency : github.com/hashicorp/go-version +Version: v1.3.0 +Licence type (autodetected): MPL-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/hashicorp/go-version@v1.3.0/LICENSE: + +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. “Contributor” + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. “Contributor Version” + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor’s Contribution. + +1.3. “Contribution” + + means Covered Software of a particular Contributor. + +1.4. “Covered Software” + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. “Incompatible With Secondary Licenses” + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. + +1.6. “Executable Form” + + means any form of the work other than Source Code Form. + +1.7. “Larger Work” + + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. + +1.8. “License” + + means this document. + +1.9. “Licensable” + + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. + +1.10. “Modifications” + + means any of the following: + + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. “Patent Claims” of a Contributor + + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. + +1.12. “Secondary License” + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. “Source Code Form” + + means the form of the work preferred for making modifications. + +1.14. “You” (or “Your”) + + means an individual or a legal entity exercising rights under this + License. For legal entities, “You” includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, “control” means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party’s + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. + + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - “Incompatible With Secondary Licenses” Notice + + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by + the Mozilla Public License, v. 2.0. + + + -------------------------------------------------------------------------------- Dependency : github.com/hashicorp/golang-lru Version: v0.5.2-0.20190520140433-59383c442f7d @@ -1523,16 +2119,33 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- -Dependency : github.com/mitchellh/mapstructure -Version: v1.3.3 +Dependency : github.com/mailru/easyjson +Version: v0.7.7 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/mailru/easyjson@v0.7.7/LICENSE: + +Copyright (c) 2016 Mail.Ru Group + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/miolini/datacounter +Version: v1.0.2 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/mitchellh/mapstructure@v1.3.3/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/miolini/datacounter@v1.0.2/LICENSE: The MIT License (MIT) -Copyright (c) 2013 Mitchell Hashimoto +Copyright (c) 2015 Artem Andreenko Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -1541,16 +2154,89 @@ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + + +-------------------------------------------------------------------------------- +Dependency : github.com/pbnjay/memory +Version: v0.0.0-20210728143218-7b4eea64cf58 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/pbnjay/memory@v0.0.0-20210728143218-7b4eea64cf58/LICENSE: + +BSD 3-Clause License + +Copyright (c) 2017, Jeremy Jay +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/pkg/errors +Version: v0.9.1 +Licence type (autodetected): BSD-2-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/pkg/errors@v0.9.1/LICENSE: + +Copyright (c) 2015, Dave Cheney +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- @@ -1829,40 +2515,32 @@ SOFTWARE. -------------------------------------------------------------------------------- -Dependency : golang.org/x/crypto -Version: v0.0.0-20200622213623-75b288015ac9 -Licence type (autodetected): BSD-3-Clause +Dependency : go.uber.org/zap +Version: v1.14.0 +Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/crypto@v0.0.0-20200622213623-75b288015ac9/LICENSE: +Contents of probable licence file $GOMODCACHE/go.uber.org/zap@v1.14.0/LICENSE.txt: -Copyright (c) 2009 The Go Authors. All rights reserved. +Copyright (c) 2016-2017 Uber Technologies, Inc. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. -------------------------------------------------------------------------------- @@ -4174,11 +4852,11 @@ THE SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/Azure/go-autorest/autorest -Version: v0.9.4 +Version: v0.9.6 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/!azure/go-autorest/autorest@v0.9.4/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/!azure/go-autorest/autorest@v0.9.6/LICENSE: Apache License @@ -4375,11 +5053,11 @@ Contents of probable licence file $GOMODCACHE/github.com/!azure/go-autorest/auto -------------------------------------------------------------------------------- Dependency : github.com/Azure/go-autorest/autorest/adal -Version: v0.8.1 +Version: v0.8.2 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/!azure/go-autorest/autorest/adal@v0.8.1/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/!azure/go-autorest/autorest/adal@v0.8.2/LICENSE: Apache License @@ -13454,6 +14132,37 @@ Contents of probable licence file $GOMODCACHE/github.com/docker/spdystream@v0.0. limitations under the License. +-------------------------------------------------------------------------------- +Dependency : github.com/docopt/docopt-go +Version: v0.0.0-20180111231733-ee0de3bc6815 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/docopt/docopt-go@v0.0.0-20180111231733-ee0de3bc6815/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2013 Keith Batten +Copyright (c) 2016 David Irvine + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + -------------------------------------------------------------------------------- Dependency : github.com/andrewkroh/goja Version: v0.0.0-20190128172624-dd2ac4456e20 @@ -13938,239 +14647,6 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/ecs@v1.6.0/LICE limitations under the License. --------------------------------------------------------------------------------- -Dependency : github.com/elastic/elastic-agent-client/v7 -Version: v7.0.0-20200709172729-d43b7ad5833a -Licence type (autodetected): Elastic --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-client/v7@v7.0.0-20200709172729-d43b7ad5833a/LICENSE.txt: - -ELASTIC LICENSE AGREEMENT - -PLEASE READ CAREFULLY THIS ELASTIC LICENSE AGREEMENT (THIS "AGREEMENT"), WHICH -CONSTITUTES A LEGALLY BINDING AGREEMENT AND GOVERNS ALL OF YOUR USE OF ALL OF -THE ELASTIC SOFTWARE WITH WHICH THIS AGREEMENT IS INCLUDED ("ELASTIC SOFTWARE") -THAT IS PROVIDED IN OBJECT CODE FORMAT, AND, IN ACCORDANCE WITH SECTION 2 BELOW, -CERTAIN OF THE ELASTIC SOFTWARE THAT IS PROVIDED IN SOURCE CODE FORMAT. BY -INSTALLING OR USING ANY OF THE ELASTIC SOFTWARE GOVERNED BY THIS AGREEMENT, YOU -ARE ASSENTING TO THE TERMS AND CONDITIONS OF THIS AGREEMENT. IF YOU DO NOT AGREE -WITH SUCH TERMS AND CONDITIONS, YOU MAY NOT INSTALL OR USE THE ELASTIC SOFTWARE -GOVERNED BY THIS AGREEMENT. IF YOU ARE INSTALLING OR USING THE SOFTWARE ON -BEHALF OF A LEGAL ENTITY, YOU REPRESENT AND WARRANT THAT YOU HAVE THE ACTUAL -AUTHORITY TO AGREE TO THE TERMS AND CONDITIONS OF THIS AGREEMENT ON BEHALF OF -SUCH ENTITY. - -Posted Date: April 20, 2018 - -This Agreement is entered into by and between Elasticsearch BV ("Elastic") and -You, or the legal entity on behalf of whom You are acting (as applicable, -"You"). - -1. OBJECT CODE END USER LICENSES, RESTRICTIONS AND THIRD PARTY OPEN SOURCE -SOFTWARE - - 1.1 Object Code End User License. Subject to the terms and conditions of - Section 1.2 of this Agreement, Elastic hereby grants to You, AT NO CHARGE and - for so long as you are not in breach of any provision of this Agreement, a - License to the Basic Features and Functions of the Elastic Software. - - 1.2 Reservation of Rights; Restrictions. As between Elastic and You, Elastic - and its licensors own all right, title and interest in and to the Elastic - Software, and except as expressly set forth in Sections 1.1, and 2.1 of this - Agreement, no other license to the Elastic Software is granted to You under - this Agreement, by implication, estoppel or otherwise. You agree not to: (i) - reverse engineer or decompile, decrypt, disassemble or otherwise reduce any - Elastic Software provided to You in Object Code, or any portion thereof, to - Source Code, except and only to the extent any such restriction is prohibited - by applicable law, (ii) except as expressly permitted in this Agreement, - prepare derivative works from, modify, copy or use the Elastic Software Object - Code or the Commercial Software Source Code in any manner; (iii) except as - expressly permitted in Section 1.1 above, transfer, sell, rent, lease, - distribute, sublicense, loan or otherwise transfer, Elastic Software Object - Code, in whole or in part, to any third party; (iv) use Elastic Software - Object Code for providing time-sharing services, any software-as-a-service, - service bureau services or as part of an application services provider or - other service offering (collectively, "SaaS Offering") where obtaining access - to the Elastic Software or the features and functions of the Elastic Software - is a primary reason or substantial motivation for users of the SaaS Offering - to access and/or use the SaaS Offering ("Prohibited SaaS Offering"); (v) - circumvent the limitations on use of Elastic Software provided to You in - Object Code format that are imposed or preserved by any License Key, or (vi) - alter or remove any Marks and Notices in the Elastic Software. If You have any - question as to whether a specific SaaS Offering constitutes a Prohibited SaaS - Offering, or are interested in obtaining Elastic's permission to engage in - commercial or non-commercial distribution of the Elastic Software, please - contact elastic_license@elastic.co. - - 1.3 Third Party Open Source Software. The Commercial Software may contain or - be provided with third party open source libraries, components, utilities and - other open source software (collectively, "Open Source Software"), which Open - Source Software may have applicable license terms as identified on a website - designated by Elastic. Notwithstanding anything to the contrary herein, use of - the Open Source Software shall be subject to the license terms and conditions - applicable to such Open Source Software, to the extent required by the - applicable licensor (which terms shall not restrict the license rights granted - to You hereunder, but may contain additional rights). To the extent any - condition of this Agreement conflicts with any license to the Open Source - Software, the Open Source Software license will govern with respect to such - Open Source Software only. Elastic may also separately provide you with - certain open source software that is licensed by Elastic. Your use of such - Elastic open source software will not be governed by this Agreement, but by - the applicable open source license terms. - -2. COMMERCIAL SOFTWARE SOURCE CODE - - 2.1 Limited License. Subject to the terms and conditions of Section 2.2 of - this Agreement, Elastic hereby grants to You, AT NO CHARGE and for so long as - you are not in breach of any provision of this Agreement, a limited, - non-exclusive, non-transferable, fully paid up royalty free right and license - to the Commercial Software in Source Code format, without the right to grant - or authorize sublicenses, to prepare Derivative Works of the Commercial - Software, provided You (i) do not hack the licensing mechanism, or otherwise - circumvent the intended limitations on the use of Elastic Software to enable - features other than Basic Features and Functions or those features You are - entitled to as part of a Subscription, and (ii) use the resulting object code - only for reasonable testing purposes. - - 2.2 Restrictions. Nothing in Section 2.1 grants You the right to (i) use the - Commercial Software Source Code other than in accordance with Section 2.1 - above, (ii) use a Derivative Work of the Commercial Software outside of a - Non-production Environment, in any production capacity, on a temporary or - permanent basis, or (iii) transfer, sell, rent, lease, distribute, sublicense, - loan or otherwise make available the Commercial Software Source Code, in whole - or in part, to any third party. Notwithstanding the foregoing, You may - maintain a copy of the repository in which the Source Code of the Commercial - Software resides and that copy may be publicly accessible, provided that you - include this Agreement with Your copy of the repository. - -3. TERMINATION - - 3.1 Termination. This Agreement will automatically terminate, whether or not - You receive notice of such Termination from Elastic, if You breach any of its - provisions. - - 3.2 Post Termination. Upon any termination of this Agreement, for any reason, - You shall promptly cease the use of the Elastic Software in Object Code format - and cease use of the Commercial Software in Source Code format. For the - avoidance of doubt, termination of this Agreement will not affect Your right - to use Elastic Software, in either Object Code or Source Code formats, made - available under the Apache License Version 2.0. - - 3.3 Survival. Sections 1.2, 2.2. 3.3, 4 and 5 shall survive any termination or - expiration of this Agreement. - -4. DISCLAIMER OF WARRANTIES AND LIMITATION OF LIABILITY - - 4.1 Disclaimer of Warranties. TO THE MAXIMUM EXTENT PERMITTED UNDER APPLICABLE - LAW, THE ELASTIC SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, - AND ELASTIC AND ITS LICENSORS MAKE NO WARRANTIES WHETHER EXPRESSED, IMPLIED OR - STATUTORY REGARDING OR RELATING TO THE ELASTIC SOFTWARE. TO THE MAXIMUM EXTENT - PERMITTED UNDER APPLICABLE LAW, ELASTIC AND ITS LICENSORS SPECIFICALLY - DISCLAIM ALL IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR - PURPOSE AND NON-INFRINGEMENT WITH RESPECT TO THE ELASTIC SOFTWARE, AND WITH - RESPECT TO THE USE OF THE FOREGOING. FURTHER, ELASTIC DOES NOT WARRANT RESULTS - OF USE OR THAT THE ELASTIC SOFTWARE WILL BE ERROR FREE OR THAT THE USE OF THE - ELASTIC SOFTWARE WILL BE UNINTERRUPTED. - - 4.2 Limitation of Liability. IN NO EVENT SHALL ELASTIC OR ITS LICENSORS BE - LIABLE TO YOU OR ANY THIRD PARTY FOR ANY DIRECT OR INDIRECT DAMAGES, - INCLUDING, WITHOUT LIMITATION, FOR ANY LOSS OF PROFITS, LOSS OF USE, BUSINESS - INTERRUPTION, LOSS OF DATA, COST OF SUBSTITUTE GOODS OR SERVICES, OR FOR ANY - SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, IN CONNECTION WITH - OR ARISING OUT OF THE USE OR INABILITY TO USE THE ELASTIC SOFTWARE, OR THE - PERFORMANCE OF OR FAILURE TO PERFORM THIS AGREEMENT, WHETHER ALLEGED AS A - BREACH OF CONTRACT OR TORTIOUS CONDUCT, INCLUDING NEGLIGENCE, EVEN IF ELASTIC - HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -5. MISCELLANEOUS - - This Agreement completely and exclusively states the entire agreement of the - parties regarding the subject matter herein, and it supersedes, and its terms - govern, all prior proposals, agreements, or other communications between the - parties, oral or written, regarding such subject matter. This Agreement may be - modified by Elastic from time to time, and any such modifications will be - effective upon the "Posted Date" set forth at the top of the modified - Agreement. If any provision hereof is held unenforceable, this Agreement will - continue without said provision and be interpreted to reflect the original - intent of the parties. This Agreement and any non-contractual obligation - arising out of or in connection with it, is governed exclusively by Dutch law. - This Agreement shall not be governed by the 1980 UN Convention on Contracts - for the International Sale of Goods. All disputes arising out of or in - connection with this Agreement, including its existence and validity, shall be - resolved by the courts with jurisdiction in Amsterdam, The Netherlands, except - where mandatory law provides for the courts at another location in The - Netherlands to have jurisdiction. The parties hereby irrevocably waive any and - all claims and defenses either might otherwise have in any such action or - proceeding in any of such courts based upon any alleged lack of personal - jurisdiction, improper venue, forum non conveniens or any similar claim or - defense. A breach or threatened breach, by You of Section 2 may cause - irreparable harm for which damages at law may not provide adequate relief, and - therefore Elastic shall be entitled to seek injunctive relief without being - required to post a bond. You may not assign this Agreement (including by - operation of law in connection with a merger or acquisition), in whole or in - part to any third party without the prior written consent of Elastic, which - may be withheld or granted by Elastic in its sole and absolute discretion. - Any assignment in violation of the preceding sentence is void. Notices to - Elastic may also be sent to legal@elastic.co. - -6. DEFINITIONS - - The following terms have the meanings ascribed: - - 6.1 "Affiliate" means, with respect to a party, any entity that controls, is - controlled by, or which is under common control with, such party, where - "control" means ownership of at least fifty percent (50%) of the outstanding - voting shares of the entity, or the contractual right to establish policy for, - and manage the operations of, the entity. - - 6.2 "Basic Features and Functions" means those features and functions of the - Elastic Software that are eligible for use under a Basic license, as set forth - at https://www.elastic.co/subscriptions, as may be modified by Elastic from - time to time. - - 6.3 "Commercial Software" means the Elastic Software Source Code in any file - containing a header stating the contents are subject to the Elastic License or - which is contained in the repository folder labeled "x-pack", unless a LICENSE - file present in the directory subtree declares a different license. - - 6.4 "Derivative Work of the Commercial Software" means, for purposes of this - Agreement, any modification(s) or enhancement(s) to the Commercial Software, - which represent, as a whole, an original work of authorship. - - 6.5 "License" means a limited, non-exclusive, non-transferable, fully paid up, - royalty free, right and license, without the right to grant or authorize - sublicenses, solely for Your internal business operations to (i) install and - use the applicable Features and Functions of the Elastic Software in Object - Code, and (ii) permit Contractors and Your Affiliates to use the Elastic - software as set forth in (i) above, provided that such use by Contractors must - be solely for Your benefit and/or the benefit of Your Affiliates, and You - shall be responsible for all acts and omissions of such Contractors and - Affiliates in connection with their use of the Elastic software that are - contrary to the terms and conditions of this Agreement. - - 6.6 "License Key" means a sequence of bytes, including but not limited to a - JSON blob, that is used to enable certain features and functions of the - Elastic Software. - - 6.7 "Marks and Notices" means all Elastic trademarks, trade names, logos and - notices present on the Documentation as originally provided by Elastic. - - 6.8 "Non-production Environment" means an environment for development, testing - or quality assurance, where software is not used for production purposes. - - 6.9 "Object Code" means any form resulting from mechanical transformation or - translation of Source Code form, including but not limited to compiled object - code, generated documentation, and conversions to other media types. - - 6.10 "Source Code" means the preferred form of computer software for making - modifications, including but not limited to software source code, - documentation source, and configuration files. - - 6.11 "Subscription" means the right to receive Support Services and a License - to the Commercial Software. - - -------------------------------------------------------------------------------- Dependency : github.com/elastic/go-concert Version: v0.0.4 @@ -14384,11 +14860,11 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/go-concert@v0.0 -------------------------------------------------------------------------------- Dependency : github.com/elastic/go-libaudit/v2 -Version: v2.0.2 +Version: v2.1.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/go-libaudit/v2@v2.0.2/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-libaudit/v2@v2.1.0/LICENSE.txt: Apache License @@ -16137,11 +16613,11 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/go-windows@v1.0 -------------------------------------------------------------------------------- Dependency : github.com/elastic/gosigar -Version: v0.10.6-0.20200715000138-f115143bb233 +Version: v0.13.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/gosigar@v0.10.6-0.20200715000138-f115143bb233/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/gosigar@v0.13.0/LICENSE: Apache License Version 2.0, January 2004 @@ -16839,11 +17315,11 @@ Contents of probable licence file $GOMODCACHE/github.com/envoyproxy/protoc-gen-v -------------------------------------------------------------------------------- Dependency : github.com/evanphx/json-patch -Version: v4.2.0+incompatible +Version: v4.9.0+incompatible Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/evanphx/json-patch@v4.2.0+incompatible/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/evanphx/json-patch@v4.9.0+incompatible/LICENSE: Copyright (c) 2014, Evan Phoenix All rights reserved. @@ -16853,7 +17329,7 @@ modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright notice +* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Evan Phoenix nor the names of its contributors @@ -17009,14 +17485,14 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : github.com/fsnotify/fsnotify -Version: v1.4.7 +Version: v1.4.9 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/fsnotify/fsnotify@v1.4.7/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/fsnotify/fsnotify@v1.4.9/LICENSE: Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2012 fsnotify Authors. All rights reserved. +Copyright (c) 2012-2019 fsnotify Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -17393,11 +17869,11 @@ SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/go-logr/logr -Version: v0.1.0 +Version: v0.2.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/go-logr/logr@v0.1.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/go-logr/logr@v0.2.0/LICENSE: Apache License Version 2.0, January 2004 @@ -22234,11 +22710,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : github.com/googleapis/gnostic -Version: v0.3.1-0.20190624222214-25d8b0b66985 +Version: v0.4.1 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/googleapis/gnostic@v0.3.1-0.20190624222214-25d8b0b66985/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/googleapis/gnostic@v0.4.1/LICENSE: Apache License @@ -22445,207 +22921,6 @@ Contents of probable licence file $GOMODCACHE/github.com/googleapis/gnostic@v0.3 --------------------------------------------------------------------------------- -Dependency : github.com/gophercloud/gophercloud -Version: v0.1.0 -Licence type (autodetected): Apache-2.0 --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/gophercloud/gophercloud@v0.1.0/LICENSE: - -Copyright 2012-2013 Rackspace, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); you may not use -this file except in compliance with the License. You may obtain a copy of the -License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software distributed -under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -CONDITIONS OF ANY KIND, either express or implied. See the License for the -specific language governing permissions and limitations under the License. - ------- - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - -------------------------------------------------------------------------------- Dependency : github.com/gopherjs/gopherjs Version: v0.0.0-20181017120253-0766667cb4d1 @@ -22812,11 +23087,11 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : github.com/h2non/filetype -Version: v1.0.12 +Version: v1.1.1-0.20201130172452-f60988ab73d5 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/h2non/filetype@v1.0.12/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/h2non/filetype@v1.1.1-0.20201130172452-f60988ab73d5/LICENSE: The MIT License @@ -23207,496 +23482,133 @@ Exhibit B - “Incompatible With Secondary Licenses” Notice the Mozilla Public License, v. 2.0. - --------------------------------------------------------------------------------- -Dependency : github.com/hashicorp/go-hclog -Version: v0.9.2 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/hashicorp/go-hclog@v0.9.2/LICENSE: - -MIT License - -Copyright (c) 2017 HashiCorp - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - --------------------------------------------------------------------------------- -Dependency : github.com/hashicorp/go-multierror -Version: v1.1.0 -Licence type (autodetected): MPL-2.0 --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/hashicorp/go-multierror@v1.1.0/LICENSE: - -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” - - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. - -1.8. “License” - - means this document. - -1.9. “Licensable” - - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. - -1.10. “Modifications” - - means any of the following: - - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor - - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. - -1.12. “Secondary License” - - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. - -1.13. “Source Code Form” - - means the form of the work preferred for making modifications. - -1.14. “You” (or “Your”) - - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. - - -2. License Grants and Conditions - -2.1. Grants - - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: - - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and - - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. - -2.2. Effective Date - - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. - -2.3. Limitations on Grant Scope - - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: - - a. for any code that a Contributor has removed from Covered Software; or - - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or - - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. - - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). - -2.4. Subsequent Licenses - - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). - -2.5. Representation - - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. - -2.6. Fair Use - - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. - -2.7. Conditions - - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. - - -3. Responsibilities - -3.1. Distribution of Source Form - - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. - -3.2. Distribution of Executable Form - - If You distribute Covered Software in Executable Form then: - - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and - - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. - -3.3. Distribution of a Larger Work - - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). - -3.4. Notices - - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. - -3.5. Application of Additional Terms - - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. - -4. Inability to Comply Due to Statute or Regulation - - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. - -5. Termination - -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. - -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. - -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. - -6. Disclaimer of Warranty - - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. - -7. Limitation of Liability - - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. - -8. Litigation - - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. - - -10. Versions of the License - -10.1. New Versions - - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. - -10.2. Effect of New Versions - - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. - -10.3. Modified Versions - - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). - -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. - -Exhibit A - Source Code Form License Notice - - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. - -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. - -You may add additional accurate notices of copyright ownership. - -Exhibit B - “Incompatible With Secondary Licenses” Notice - - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. - - + -------------------------------------------------------------------------------- -Dependency : github.com/hashicorp/go-retryablehttp -Version: v0.6.6 +Dependency : github.com/hashicorp/go-hclog +Version: v0.9.2 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/hashicorp/go-hclog@v0.9.2/LICENSE: + +MIT License + +Copyright (c) 2017 HashiCorp + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/hashicorp/go-multierror +Version: v1.1.0 Licence type (autodetected): MPL-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/hashicorp/go-retryablehttp@v0.6.6/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/hashicorp/go-multierror@v1.1.0/LICENSE: Mozilla Public License, version 2.0 1. Definitions -1.1. "Contributor" +1.1. “Contributor” means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software. -1.2. "Contributor Version" +1.2. “Contributor Version” means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. + Contributor and that particular Contributor’s Contribution. -1.3. "Contribution" +1.3. “Contribution” means Covered Software of a particular Contributor. -1.4. "Covered Software" +1.4. “Covered Software” means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof. -1.5. "Incompatible With Secondary Licenses" +1.5. “Incompatible With Secondary Licenses” means a. that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. -1.6. "Executable Form" +1.6. “Executable Form” means any form of the work other than Source Code Form. -1.7. "Larger Work" +1.7. “Larger Work” - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. -1.8. "License" +1.8. “License” means this document. -1.9. "Licensable" +1.9. “Licensable” - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. -1.10. "Modifications" +1.10. “Modifications” means any of the following: - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or b. any new file in Source Code Form that contains any Covered Software. -1.11. "Patent Claims" of a Contributor +1.11. “Patent Claims” of a Contributor - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. -1.12. "Secondary License" +1.12. “Secondary License” means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses. -1.13. "Source Code Form" +1.13. “Source Code Form” means the form of the work preferred for making modifications. -1.14. "You" (or "Your") +1.14. “You” (or “Your”) means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is + License. For legal entities, “You” includes any entity that controls, is controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause + definition, “control” means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. @@ -23712,59 +23624,57 @@ Mozilla Public License, version 2.0 a. under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. 2.2. Effective Date - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. 2.3. Limitations on Grant Scope - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: a. for any code that a Contributor has removed from Covered Software; or - b. for infringements caused by: (i) Your and any other third party's + b. for infringements caused by: (i) Your and any other third party’s modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). 2.4. Subsequent Licenses No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). 2.5. Representation - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. 2.6. Fair Use - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. 2.7. Conditions @@ -23777,12 +23687,11 @@ Mozilla Public License, version 2.0 3.1. Distribution of Source Form All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. 3.2. Distribution of Executable Form @@ -23794,40 +23703,39 @@ Mozilla Public License, version 2.0 reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. 3.3. Distribution of a Larger Work You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). 3.4. Notices - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. 3.5. Application of Additional Terms You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any @@ -23836,14 +23744,14 @@ Mozilla Public License, version 2.0 4. Inability to Comply Due to Statute or Regulation If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. 5. Termination @@ -23851,22 +23759,21 @@ Mozilla Public License, version 2.0 fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. 5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been @@ -23875,16 +23782,16 @@ Mozilla Public License, version 2.0 6. Disclaimer of Warranty - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. 7. Limitation of Liability @@ -23896,29 +23803,27 @@ Mozilla Public License, version 2.0 goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses, even if such party shall have been informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. 8. Litigation - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. 9. Miscellaneous - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. 10. Versions of the License @@ -23932,24 +23837,23 @@ Mozilla Public License, version 2.0 10.2. Effect of New Versions - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license steward. 10.3. Modified Versions If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. Exhibit A - Source Code Form License Notice @@ -23960,28 +23864,26 @@ Exhibit A - Source Code Form License Notice obtain one at http://mozilla.org/MPL/2.0/. -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. You may add additional accurate notices of copyright ownership. -Exhibit B - "Incompatible With Secondary Licenses" Notice +Exhibit B - “Incompatible With Secondary Licenses” Notice - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by the Mozilla Public License, v. 2.0. - -------------------------------------------------------------------------------- -Dependency : github.com/hashicorp/go-uuid -Version: v1.0.2 +Dependency : github.com/hashicorp/go-retryablehttp +Version: v0.6.6 Licence type (autodetected): MPL-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/hashicorp/go-uuid@v1.0.2/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/hashicorp/go-retryablehttp@v0.6.6/LICENSE: Mozilla Public License, version 2.0 @@ -24349,100 +24251,100 @@ Exhibit B - "Incompatible With Secondary Licenses" Notice -------------------------------------------------------------------------------- -Dependency : github.com/hashicorp/go-version -Version: v1.0.0 +Dependency : github.com/hashicorp/go-uuid +Version: v1.0.2 Licence type (autodetected): MPL-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/hashicorp/go-version@v1.0.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/hashicorp/go-uuid@v1.0.2/LICENSE: Mozilla Public License, version 2.0 1. Definitions -1.1. “Contributor” +1.1. "Contributor" means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software. -1.2. “Contributor Version” +1.2. "Contributor Version" means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. + Contributor and that particular Contributor's Contribution. -1.3. “Contribution” +1.3. "Contribution" means Covered Software of a particular Contributor. -1.4. “Covered Software” +1.4. "Covered Software" means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof. -1.5. “Incompatible With Secondary Licenses” +1.5. "Incompatible With Secondary Licenses" means a. that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. -1.6. “Executable Form” +1.6. "Executable Form" means any form of the work other than Source Code Form. -1.7. “Larger Work” +1.7. "Larger Work" - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. -1.8. “License” +1.8. "License" means this document. -1.9. “Licensable” +1.9. "Licensable" - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. -1.10. “Modifications” +1.10. "Modifications" means any of the following: - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or b. any new file in Source Code Form that contains any Covered Software. -1.11. “Patent Claims” of a Contributor +1.11. "Patent Claims" of a Contributor - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. -1.12. “Secondary License” +1.12. "Secondary License" means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses. -1.13. “Source Code Form” +1.13. "Source Code Form" means the form of the work preferred for making modifications. -1.14. “You” (or “Your”) +1.14. "You" (or "Your") means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is + License. For legal entities, "You" includes any entity that controls, is controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause + definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. @@ -24458,57 +24360,59 @@ Mozilla Public License, version 2.0 a. under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. 2.2. Effective Date - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. 2.3. Limitations on Grant Scope - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: a. for any code that a Contributor has removed from Covered Software; or - b. for infringements caused by: (i) Your and any other third party’s + b. for infringements caused by: (i) Your and any other third party's modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). 2.4. Subsequent Licenses No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). 2.5. Representation - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. 2.6. Fair Use - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. 2.7. Conditions @@ -24521,11 +24425,12 @@ Mozilla Public License, version 2.0 3.1. Distribution of Source Form All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. 3.2. Distribution of Executable Form @@ -24537,39 +24442,40 @@ Mozilla Public License, version 2.0 reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. 3.3. Distribution of a Larger Work You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). 3.4. Notices - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. 3.5. Application of Additional Terms You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any @@ -24578,14 +24484,14 @@ Mozilla Public License, version 2.0 4. Inability to Comply Due to Statute or Regulation If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. 5. Termination @@ -24593,21 +24499,22 @@ Mozilla Public License, version 2.0 fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. 5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been @@ -24616,16 +24523,16 @@ Mozilla Public License, version 2.0 6. Disclaimer of Warranty - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. 7. Limitation of Liability @@ -24637,27 +24544,29 @@ Mozilla Public License, version 2.0 goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses, even if such party shall have been informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. 8. Litigation - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. 9. Miscellaneous - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. 10. Versions of the License @@ -24671,23 +24580,24 @@ Mozilla Public License, version 2.0 10.2. Effect of New Versions - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license steward. 10.3. Modified Versions If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. Exhibit A - Source Code Form License Notice @@ -24698,16 +24608,17 @@ Exhibit A - Source Code Form License Notice obtain one at http://mozilla.org/MPL/2.0/. -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. You may add additional accurate notices of copyright ownership. -Exhibit B - “Incompatible With Secondary Licenses” Notice +Exhibit B - "Incompatible With Secondary Licenses" Notice - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by the Mozilla Public License, v. 2.0. @@ -25561,13 +25472,224 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------------------- +Dependency : github.com/jonboulle/clockwork +Version: v0.2.2 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/jonboulle/clockwork@v0.2.2/LICENSE: + +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + -------------------------------------------------------------------------------- Dependency : github.com/josephspurrier/goversioninfo -Version: v0.0.0-20200309025242-14b0ab84c6ca +Version: v0.0.0-20190209210621-63e6d1acd3dd Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/josephspurrier/goversioninfo@v0.0.0-20200309025242-14b0ab84c6ca/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/josephspurrier/goversioninfo@v0.0.0-20190209210621-63e6d1acd3dd/LICENSE: The MIT License (MIT) @@ -25592,6 +25714,37 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------------------- +Dependency : github.com/josharian/intern +Version: v1.0.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/josharian/intern@v1.0.0/license.md: + +MIT License + +Copyright (c) 2019 Josh Bleecher Snyder + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + -------------------------------------------------------------------------------- Dependency : github.com/jpillora/backoff Version: v1.0.0 @@ -25625,11 +25778,11 @@ SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/json-iterator/go -Version: v1.1.8 +Version: v1.1.10 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/json-iterator/go@v1.1.8/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/json-iterator/go@v1.1.10/LICENSE: MIT License @@ -26513,23 +26666,6 @@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --------------------------------------------------------------------------------- -Dependency : github.com/mailru/easyjson -Version: v0.7.1 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/mailru/easyjson@v0.7.1/LICENSE: - -Copyright (c) 2016 Mail.Ru Group - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -------------------------------------------------------------------------------- Dependency : github.com/markbates/pkger Version: v0.17.0 @@ -27443,6 +27579,37 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------------------- +Dependency : github.com/mitchellh/mapstructure +Version: v1.1.2 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/mitchellh/mapstructure@v1.1.2/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + -------------------------------------------------------------------------------- Dependency : github.com/modern-go/concurrent Version: v0.0.0-20180306012644-bacd9c7ef1dd @@ -29232,39 +29399,6 @@ The above copyright notice and this permission notice shall be included in all c THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --------------------------------------------------------------------------------- -Dependency : github.com/pkg/errors -Version: v0.9.1 -Licence type (autodetected): BSD-2-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/pkg/errors@v0.9.1/LICENSE: - -Copyright (c) 2015, Dave Cheney -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -------------------------------------------------------------------------------- Dependency : github.com/pmezard/go-difflib Version: v1.0.0 @@ -35162,32 +35296,40 @@ THE SOFTWARE. -------------------------------------------------------------------------------- -Dependency : go.uber.org/zap -Version: v1.14.0 -Licence type (autodetected): MIT +Dependency : golang.org/x/crypto +Version: v0.0.0-20200622213623-75b288015ac9 +Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.uber.org/zap@v1.14.0/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/golang.org/x/crypto@v0.0.0-20200622213623-75b288015ac9/LICENSE: -Copyright (c) 2016-2017 Uber Technologies, Inc. +Copyright (c) 2009 The Go Authors. All rights reserved. -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- @@ -35451,11 +35593,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : golang.org/x/sys -Version: v0.0.0-20200625212154-ddb9806d33ae +Version: v0.0.0-20200930185726-fdedc70b468f Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/sys@v0.0.0-20200625212154-ddb9806d33ae/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/sys@v0.0.0-20200930185726-fdedc70b468f/LICENSE: Copyright (c) 2009 The Go Authors. All rights reserved. @@ -35488,11 +35630,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : golang.org/x/text -Version: v0.3.2 +Version: v0.3.3 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/text@v0.3.2/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/text@v0.3.3/LICENSE: Copyright (c) 2009 The Go Authors. All rights reserved. @@ -35848,11 +35990,11 @@ Contents of probable licence file $GOMODCACHE/google.golang.org/appengine@v1.6.5 -------------------------------------------------------------------------------- Dependency : google.golang.org/genproto -Version: v0.0.0-20191230161307-f3c370f40bfb +Version: v0.0.0-20200526211855-cb27e3aa2013 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/google.golang.org/genproto@v0.0.0-20191230161307-f3c370f40bfb/LICENSE: +Contents of probable licence file $GOMODCACHE/google.golang.org/genproto@v0.0.0-20200526211855-cb27e3aa2013/LICENSE: Apache License @@ -36272,11 +36414,11 @@ Contents of probable licence file $GOMODCACHE/google.golang.org/grpc@v1.29.1/LIC -------------------------------------------------------------------------------- Dependency : google.golang.org/protobuf -Version: v1.23.0 +Version: v1.24.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/google.golang.org/protobuf@v1.23.0/LICENSE: +Contents of probable licence file $GOMODCACHE/google.golang.org/protobuf@v1.24.0/LICENSE: Copyright (c) 2018 The Go Authors. All rights reserved. @@ -38006,11 +38148,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : k8s.io/api -Version: v0.18.3 +Version: v0.19.4 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/k8s.io/api@v0.18.3/LICENSE: +Contents of probable licence file $GOMODCACHE/k8s.io/api@v0.19.4/LICENSE: Apache License @@ -38218,11 +38360,11 @@ Contents of probable licence file $GOMODCACHE/k8s.io/api@v0.18.3/LICENSE: -------------------------------------------------------------------------------- Dependency : k8s.io/apimachinery -Version: v0.18.3 +Version: v0.19.4 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/k8s.io/apimachinery@v0.18.3/LICENSE: +Contents of probable licence file $GOMODCACHE/k8s.io/apimachinery@v0.19.4/LICENSE: Apache License @@ -38430,11 +38572,11 @@ Contents of probable licence file $GOMODCACHE/k8s.io/apimachinery@v0.18.3/LICENS -------------------------------------------------------------------------------- Dependency : k8s.io/client-go -Version: v0.18.3 +Version: v0.19.4 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/k8s.io/client-go@v0.18.3/LICENSE: +Contents of probable licence file $GOMODCACHE/k8s.io/client-go@v0.19.4/LICENSE: Apache License @@ -38642,11 +38784,11 @@ Contents of probable licence file $GOMODCACHE/k8s.io/client-go@v0.18.3/LICENSE: -------------------------------------------------------------------------------- Dependency : k8s.io/gengo -Version: v0.0.0-20190128074634-0689ccc1d7d6 +Version: v0.0.0-20200413195148-3a45101e95ac Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/k8s.io/gengo@v0.0.0-20190128074634-0689ccc1d7d6/LICENSE: +Contents of probable licence file $GOMODCACHE/k8s.io/gengo@v0.0.0-20200413195148-3a45101e95ac/LICENSE: Apache License @@ -38853,12 +38995,12 @@ Contents of probable licence file $GOMODCACHE/k8s.io/gengo@v0.0.0-20190128074634 -------------------------------------------------------------------------------- -Dependency : k8s.io/klog -Version: v1.0.0 +Dependency : k8s.io/klog/v2 +Version: v2.2.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/k8s.io/klog@v1.0.0/LICENSE: +Contents of probable licence file $GOMODCACHE/k8s.io/klog/v2@v2.2.0/LICENSE: Apache License Version 2.0, January 2004 @@ -39055,11 +39197,11 @@ third-party archives. -------------------------------------------------------------------------------- Dependency : k8s.io/kube-openapi -Version: v0.0.0-20200410145947-61e04a5be9a6 +Version: v0.0.0-20200805222855-6aeccd4b50c6 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/k8s.io/kube-openapi@v0.0.0-20200410145947-61e04a5be9a6/LICENSE: +Contents of probable licence file $GOMODCACHE/k8s.io/kube-openapi@v0.0.0-20200805222855-6aeccd4b50c6/LICENSE: Apache License @@ -39267,11 +39409,11 @@ Contents of probable licence file $GOMODCACHE/k8s.io/kube-openapi@v0.0.0-2020041 -------------------------------------------------------------------------------- Dependency : k8s.io/utils -Version: v0.0.0-20200324210504-a9aa75ae1b89 +Version: v0.0.0-20200729134348-d5654de09c73 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/k8s.io/utils@v0.0.0-20200324210504-a9aa75ae1b89/LICENSE: +Contents of probable licence file $GOMODCACHE/k8s.io/utils@v0.0.0-20200729134348-d5654de09c73/LICENSE: Apache License @@ -39515,12 +39657,12 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- -Dependency : sigs.k8s.io/structured-merge-diff/v3 -Version: v3.0.0 +Dependency : sigs.k8s.io/structured-merge-diff/v4 +Version: v4.0.1 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/sigs.k8s.io/structured-merge-diff/v3@v3.0.0/LICENSE: +Contents of probable licence file $GOMODCACHE/sigs.k8s.io/structured-merge-diff/v4@v4.0.1/LICENSE: Apache License Version 2.0, January 2004 diff --git a/README.md b/README.md index eaf469b89..9d7b57dc5 100644 --- a/README.md +++ b/README.md @@ -6,57 +6,95 @@ fleet-server is under development. The following are notes to help developers onboarding to the project to quickly get running. These notes might change at any time. -### Startup fleet-server +## Setup -Currently to startup fleet-server, the Kibana encryption key is needed. There are two options for this. +To run and test fleet-server, a recent version of Elastic Agent and Kibana are needed. In the following Elastic Agent and Kibana are built from source. The fleet-server itself is not built from source but pulled from the latest snapshot build. It would be possible to also pull Elastic Agent or Kibana from the latest snapshot but the assumption that is made here that whoever is testing this, is likely developing either Elastic Agent or on the Kibana side. -Either the key `a...` is used in the kibana config as this is the default: + +### Kibana setup + +The source code of Kibana must be checked out. After checkout, the following command must be run: ``` -xpack.encryptedSavedObjects.encryptionKey: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +yarn kbn bootstrap ``` -The alternative is to use `ES_SAVED_KEY` and pass it to fleet-server during setup with the value of the encryption key used in Kibana. +This will take a while the first time it is run. An error might be return in case not a valid node version is installed. Use nvm to install the correct version. + +Now the following two commands must be run in parallel: + +``` +# Start ES +yarn es snapshot -E xpack.security.authc.api_key.enabled=true + +# Start KB +yarn start --no-base-path +``` + +As soon as all is running, go to `http://localhost:5601`, enter `elastic/changeme` as credential and navigate to Fleet. Trigger the Fleet setup. As soon as this is completed, copy the `policy id` and `enrollment token` for the fleet-server policy. The policy id can be copied from the URL, the enrollment token can be found in the Enrollment Token list. + +NOTE: This step can be skipped if the full command below for the Elastic Agent is used. +Now Kibana is running and ready. The next step is to setup Elastic Agent. -### Kibana +## Beats repo -Currently there is some work to do to be able to run Kibana with Fleet Server and all the features are not yet supported, in the future, these workarounds will not be needed anymore. +To build the Elastic Agent from source, check out the beats repository. Navigate to `x-pack/elastic-agent` and run the following command: -* Start fleet-server before Kibana, to create the mappings in ES. -* Create and use a custom user as the `kibana_system` user -* Enable Fleet server usage with `xpack.fleet.agents.fleetServerEnabled: true` +``` +SNAPSHOT=true DEV=true PLATFORMS=darwin mage package +``` + +The above assumes you are running on OS X. Put the platform in you are running on. This speeds up packaging as it only builds it for your platform. As soon as this is completed (it might take a while for the first time) navigate to `build/distributions` and unpackage the `.tar.gz`. Change working directory to the elastic-agent directory and start the Elastic Agent: ``` -POST /_security/role/kibana_fleet_system -{ - "cluster" : [ - "all" - ], - "indices" : [ - { - "names" : [ - ".fleet*" - ], - "privileges" : [ - "all" - ] - } - ] -} +KIBANA_HOST=http://localhost:5601 KIBANA_USERNAME=elastic KIBANA_PASSWORD=changeme ELASTICSEARCH_HOST=http://localhost:9200 ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme KIBANA_FLEET_SETUP=1 FLEET_SERVER_ENABLE=1 sudo ./elastic-agent container +``` + +This will start up Elastic Agent with fleet-server and directly enroll it. In addition Fleet is setup inside of Kibana. +## Running Elastic Agent with fleet-server in container +If you want to run Elastic Agent and fleet-server in a container but built Kibana from source, you have to add the following to your `config/kibana.dev.yml`: -POST /_security/user/kibana_fleet_system -{ - "password" : "changeme", - "roles" : [ "kibana_system", "kibana_fleet_system" ] -} ``` +server.host: 0.0.0.0 +``` + +This makes sure, Kibana is accessible from the container. Start Kibana as before but for Elasticsearch, run the following command: -Then configure your Kibana with ``` -elasticsearch.username: 'kibana_fleet_system' -elasticsearch.password: 'changeme' -xpack.fleet.agents.fleetServerEnabled: true -``` \ No newline at end of file +yarn es snapshot -E xpack.security.authc.api_key.enabled=true -E http.host=0.0.0.0 +``` + +This makes sure also Elasticsearch is accessible to the container. + +Start the Elastic Agent with the following command: + +``` +docker run -e KIBANA_HOST=http://{YOUR-IP}:5601 -e KIBANA_USERNAME=elastic -e KIBANA_PASSWORD=changeme -e ELASTICSEARCH_HOST=http://{YOUR-IP}:9200 -e ELASTICSEARCH_USERNAME=elastic -e ELASTICSEARCH_PASSWORD=changeme -e KIBANA_FLEET_SETUP=1 -e FLEET_SERVER_ENABLE=1 -e FLEET_SERVER_INSECURE_HTTP=1 docker.elastic.co/beats/elastic-agent:8.0.0-SNAPSHOT +``` + +Replace {YOUR-IP} with the IP address of your machine. + +## fleet-server repo + +By default the above will download the most recent snapshot build for fleet-server. To use your own development build, run `make release` in the fleet-server repository, go to `build/distributions` and copy the `.tar.gz` and `sha512` file to the `data/elastic-agent-{hash}/downloads` inside the elastic-agent directory. Now you run with your own build of fleet-server. + + +## Compatbility and upgrades + +Fleet server is always on the exact same version as Elastic Agent running fleet-server. Any Elastic Agent enrolling into a fleet-server must be the same version or older. Fleet-server communicates with Elasticsearch. Elasticsearch must be on the same version or newer. For Kibana it is assumed it is on the same version as Elasticsearch. With this the compatibility looks as following: + +``` +Elastic Agent <= Elastic Agent with fleet-server) <= Elasticsearch / Kibana +``` + +There might be differences on the bugfix version. + +If an upgrade is done, Elasticsearch / Kibana have to be upgraded first, then Elastic Agent with fleet-server and last the Elastic Agents. + +## MacOSX Version + +The [golang-crossbuild](https://github.com/elastic/golang-crossbuild) produces images used for testing/building. +The `golang-crossbuild:1.16.X-darwin-debian10` images expects the minimum MacOSX version to be 10.14+. diff --git a/cmd/fleet/auth.go b/cmd/fleet/auth.go index 88ae83b61..8c6a53fd5 100644 --- a/cmd/fleet/auth.go +++ b/cmd/fleet/auth.go @@ -12,19 +12,23 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/apikey" "github.com/elastic/fleet-server/v7/internal/pkg/bulk" "github.com/elastic/fleet-server/v7/internal/pkg/cache" + "github.com/elastic/fleet-server/v7/internal/pkg/logger" "github.com/elastic/fleet-server/v7/internal/pkg/model" - "github.com/elastic/go-elasticsearch/v8" "github.com/rs/zerolog/log" ) -const ( - kAPIKeyTTL = 5 * time.Second +var ( + ErrApiKeyNotEnabled = errors.New("APIKey not enabled") + ErrAgentCorrupted = errors.New("agent record corrupted") + ErrAgentInactive = errors.New("agent inactive") + ErrAgentIdentity = errors.New("agent header contains wrong identifier") ) -var ErrApiKeyNotEnabled = errors.New("APIKey not enabled") - -func authApiKey(r *http.Request, client *elasticsearch.Client, c cache.Cache) (*apikey.ApiKey, error) { +// This authenticates that the provided API key exists and is enabled. +// WARNING: This does not validate that the api key is valid for the Fleet Domain. +// An additional check must be executed to validate it is not a random api key. +func authApiKey(r *http.Request, bulker bulk.Bulk, c cache.Cache) (*apikey.ApiKey, error) { key, err := apikey.ExtractAPIKey(r) if err != nil { @@ -35,57 +39,123 @@ func authApiKey(r *http.Request, client *elasticsearch.Client, c cache.Cache) (* return key, nil } + reqId := r.Header.Get(logger.HeaderRequestID) + start := time.Now() - info, err := key.Authenticate(r.Context(), client) + info, err := bulker.ApiKeyAuth(r.Context(), *key) if err != nil { - log.Error(). + log.Info(). Err(err). - Dur("tdiff", time.Since(start)). + Str(LogApiKeyId, key.Id). + Str(EcsHttpRequestId, reqId). + Int64(EcsEventDuration, time.Since(start).Nanoseconds()). Msg("ApiKey fail authentication") return nil, err } log.Trace(). Str("id", key.Id). - Dur("tdiff", time.Since(start)). - Str("UserName", info.UserName). - Strs("Roles", info.Roles). + Str(EcsHttpRequestId, reqId). + Int64(EcsEventDuration, time.Since(start).Nanoseconds()). + Str("userName", info.UserName). + Strs("roles", info.Roles). Bool("enabled", info.Enabled). RawJSON("meta", info.Metadata). Msg("ApiKey authenticated") - if info.Enabled { - c.SetApiKey(*key, kAPIKeyTTL) - } else { + c.SetApiKey(*key, info.Enabled) + if !info.Enabled { err = ErrApiKeyNotEnabled + log.Info(). + Err(err). + Str("id", key.Id). + Str(EcsHttpRequestId, reqId). + Int64(EcsEventDuration, time.Since(start).Nanoseconds()). + Msg("ApiKey not enabled") } return key, err } -func authAgent(r *http.Request, id string, bulker bulk.Bulk, c cache.Cache) (*model.Agent, error) { +func authAgent(r *http.Request, id *string, bulker bulk.Bulk, c cache.Cache) (*model.Agent, error) { + start := time.Now() + // authenticate - key, err := authApiKey(r, bulker.Client(), c) + key, err := authApiKey(r, bulker, c) if err != nil { return nil, err } + w := log.With(). + Str(LogAccessApiKeyId, key.Id). + Str(EcsHttpRequestId, r.Header.Get(logger.HeaderRequestID)) + + if id != nil { + w = w.Str(LogAgentId, *id) + } + + zlog := w.Logger() + + authTime := time.Now() + + if authTime.Sub(start) > time.Second { + zlog.Debug(). + Int64(EcsEventDuration, authTime.Sub(start).Nanoseconds()). + Msg("authApiKey slow") + } + agent, err := findAgentByApiKeyId(r.Context(), bulker, key.Id) if err != nil { return nil, err } - // validate key alignment + if agent.Agent == nil { + zlog.Warn(). + Err(ErrAgentCorrupted). + Msg("agent record does not contain required metadata section") + return nil, ErrAgentCorrupted + } + + findTime := time.Now() + + if findTime.Sub(authTime) > time.Second { + zlog.Debug(). + Int64(EcsEventDuration, findTime.Sub(authTime).Nanoseconds()). + Msg("findAgentByApiKeyId slow") + } + + // validate that the Access ApiKey identifier stored in the agent's record + // is in alignment when the authenticated key provided on this transaction if agent.AccessApiKeyId != key.Id { - log.Debug(). + zlog.Warn(). Err(ErrAgentCorrupted). - Interface("agent", &agent). - Str("key.Id", key.Id). - Msg("agent id mismatch") + Str("agent.AccessApiKeyId", agent.AccessApiKeyId). + Msg("agent access ApiKey id mismatch agent record") return nil, ErrAgentCorrupted } + // validate that the id in the header is equal to the agent id record + if id != nil && *id != agent.Agent.Id { + zlog.Warn(). + Err(ErrAgentIdentity). + Str("agent.Agent.Id", agent.Agent.Id). + Msg("agent id mismatch against http header") + return nil, ErrAgentIdentity + } + + // validate active, an api key can be valid for an inactive agent record + // if it is in our cache and has not timed out. + if !agent.Active { + zlog.Info(). + Err(ErrAgentInactive). + Msg("agent record inactive") + + // Update the cache to mark the api key id associated with this agent as not enabled + c.SetApiKey(*key, false) + return nil, ErrAgentInactive + } + return agent, nil } diff --git a/cmd/fleet/bulkCheckin.go b/cmd/fleet/bulkCheckin.go deleted file mode 100644 index 17496da15..000000000 --- a/cmd/fleet/bulkCheckin.go +++ /dev/null @@ -1,122 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package fleet - -import ( - "context" - "encoding/json" - "sync" - "time" - - "github.com/elastic/fleet-server/v7/internal/pkg/bulk" - "github.com/elastic/fleet-server/v7/internal/pkg/dl" - "github.com/elastic/fleet-server/v7/internal/pkg/saved" - - "github.com/rs/zerolog/log" -) - -const kBulkCheckinFlushInterval = 10 * time.Second - -type PendingData struct { - fields saved.Fields - seqNo int64 -} - -type BulkCheckin struct { - bulker bulk.Bulk - mut sync.Mutex - pending map[string]PendingData -} - -func NewBulkCheckin(bulker bulk.Bulk) *BulkCheckin { - return &BulkCheckin{ - bulker: bulker, - pending: make(map[string]PendingData), - } -} - -func (bc *BulkCheckin) CheckIn(id string, fields saved.Fields, seqno int64) error { - - if fields == nil { - fields = make(saved.Fields) - } - - timeNow := time.Now().UTC().Format(time.RFC3339) - fields[FieldLastCheckin] = timeNow - - bc.mut.Lock() - bc.pending[id] = PendingData{fields, seqno} - bc.mut.Unlock() - return nil -} - -func (bc *BulkCheckin) Run(ctx context.Context, sv saved.CRUD) error { - - tick := time.NewTicker(kBulkCheckinFlushInterval) - - var err error -LOOP: - for { - select { - case <-tick.C: - if err = bc.flush(ctx, sv); err != nil { - log.Error().Err(err).Msg("Eat bulk checkin error; Keep on truckin'") - err = nil - } - - case <-ctx.Done(): - err = ctx.Err() - break LOOP - } - } - - return err -} - -func (bc *BulkCheckin) flush(ctx context.Context, sv saved.CRUD) error { - start := time.Now() - - bc.mut.Lock() - pending := bc.pending - bc.pending = make(map[string]PendingData, len(pending)) - bc.mut.Unlock() - - if len(pending) == 0 { - return nil - } - - updates := make([]bulk.BulkOp, 0, len(pending)) - - for id, pendingData := range pending { - doc := pendingData.fields - doc[dl.FieldUpdatedAt] = time.Now().UTC().Format(time.RFC3339) - if pendingData.seqNo >= 0 { - doc[dl.FieldActionSeqNo] = pendingData.seqNo - } - - source, err := json.Marshal(map[string]interface{}{ - "doc": doc, - }) - - if err != nil { - return err - } - - updates = append(updates, bulk.BulkOp{ - Id: id, - Body: source, - Index: dl.FleetAgents, - }) - } - - err := bc.bulker.MUpdate(ctx, updates, bulk.WithRefresh()) - log.Debug(). - Err(err). - Dur("rtt", time.Since(start)). - Int("cnt", len(updates)). - Msg("Flush updates") - - return err -} diff --git a/cmd/fleet/dsl.go b/cmd/fleet/dsl.go deleted file mode 100644 index b11c215b8..000000000 --- a/cmd/fleet/dsl.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package fleet - -import ( - "github.com/elastic/fleet-server/v7/internal/pkg/dsl" - "github.com/elastic/fleet-server/v7/internal/pkg/saved" -) - -const ( - kTmplApiKeyField = "ApiKeyId" - kTmplAgentIdField = "AgentIdList" -) - -var agentActionQueryTmpl = genAgentActionQueryTemplate() - -func genAgentActionQueryTemplate() *dsl.Tmpl { - tmpl := dsl.NewTmpl() - token := tmpl.Bind(kTmplAgentIdField) - - root := saved.NewQuery(AGENT_ACTION_SAVED_OBJECT_TYPE) - - fieldSentAt := saved.ScopeField(AGENT_ACTION_SAVED_OBJECT_TYPE, "sent_at") - fieldAgentId := saved.ScopeField(AGENT_ACTION_SAVED_OBJECT_TYPE, "agent_id") - - root.Query().Bool().Must().Terms(fieldAgentId, token, nil) - root.Query().Bool().MustNot().Exists(fieldSentAt) - - if err := tmpl.Resolve(root); err != nil { - panic(err) - } - - return tmpl -} diff --git a/cmd/fleet/error.go b/cmd/fleet/error.go new file mode 100644 index 000000000..375521516 --- /dev/null +++ b/cmd/fleet/error.go @@ -0,0 +1,165 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fleet + +import ( + "context" + "encoding/json" + "net/http" + "os" + + "github.com/elastic/fleet-server/v7/internal/pkg/dl" + "github.com/elastic/fleet-server/v7/internal/pkg/limit" + "github.com/elastic/fleet-server/v7/internal/pkg/logger" + + "github.com/pkg/errors" + "github.com/rs/zerolog" +) + +// Alias logger constants +const ( + EcsHttpRequestId = logger.EcsHttpRequestId + EcsEventDuration = logger.EcsEventDuration + EcsHttpResponseCode = logger.EcsHttpResponseCode + EcsHttpResponseBodyBytes = logger.EcsHttpResponseBodyBytes + + LogApiKeyId = logger.ApiKeyId + LogPolicyId = logger.PolicyId + LogAgentId = logger.AgentId + LogEnrollApiKeyId = logger.EnrollApiKeyId + LogAccessApiKeyId = logger.AccessApiKeyId + LogDefaultOutputApiKeyId = logger.DefaultOutputApiKeyId +) + +type errResp struct { + StatusCode int `json:"statusCode"` + Error string `json:"error"` + Message string `json:"message,omitempty"` + Level zerolog.Level `json:"-"` +} + +func NewErrorResp(err error) errResp { + + errTable := []struct { + target error + meta errResp + }{ + { + ErrAgentNotFound, + errResp{ + http.StatusNotFound, + "AgentNotFound", + "agent could not be found", + zerolog.WarnLevel, + }, + }, + { + limit.ErrRateLimit, + errResp{ + http.StatusTooManyRequests, + "RateLimit", + "exceeded the rate limit", + zerolog.DebugLevel, + }, + }, + { + limit.ErrMaxLimit, + errResp{ + http.StatusTooManyRequests, + "MaxLimit", + "exceeded the max limit", + zerolog.DebugLevel, + }, + }, + { + ErrApiKeyNotEnabled, + errResp{ + http.StatusUnauthorized, + "Unauthorized", + "ApiKey not enabled", + zerolog.InfoLevel, + }, + }, + { + context.Canceled, + errResp{ + http.StatusServiceUnavailable, + "ServiceUnavailable", + "server is stopping", + zerolog.DebugLevel, + }, + }, + { + ErrInvalidUserAgent, + errResp{ + http.StatusBadRequest, + "InvalidUserAgent", + "user-agent is invalid", + zerolog.InfoLevel, + }, + }, + { + ErrUnsupportedVersion, + errResp{ + http.StatusBadRequest, + "UnsupportedVersion", + "version is not supported", + zerolog.InfoLevel, + }, + }, + { + dl.ErrNotFound, + errResp{ + http.StatusNotFound, + "NotFound", + "not found", + zerolog.WarnLevel, + }, + }, + { + ErrorThrottle, + errResp{ + http.StatusTooManyRequests, + "TooManyRequests", + "too many requests", + zerolog.DebugLevel, + }, + }, + { + os.ErrDeadlineExceeded, + errResp{ + http.StatusRequestTimeout, + "RequestTimeout", + "timeout on request", + zerolog.InfoLevel, + }, + }, + } + + for _, e := range errTable { + if errors.Is(err, e.target) { + return e.meta + } + } + + // Default + return errResp{ + StatusCode: http.StatusBadRequest, + Error: "BadRequest", + Level: zerolog.InfoLevel, + } +} + +func (er errResp) Write(w http.ResponseWriter) error { + data, err := json.Marshal(&er) + if err != nil { + return err + } + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.Header().Set("X-Content-Type-Options", "nosniff") + w.WriteHeader(er.StatusCode) + _, err = w.Write(data) + return err +} diff --git a/cmd/fleet/handleAck.go b/cmd/fleet/handleAck.go index dc8967903..e2a5b2a2b 100644 --- a/cmd/fleet/handleAck.go +++ b/cmd/fleet/handleAck.go @@ -5,84 +5,164 @@ package fleet import ( + "bytes" "context" "encoding/json" - "errors" "io/ioutil" "net/http" + "strconv" "strings" "time" "github.com/elastic/fleet-server/v7/internal/pkg/bulk" "github.com/elastic/fleet-server/v7/internal/pkg/cache" + "github.com/elastic/fleet-server/v7/internal/pkg/config" "github.com/elastic/fleet-server/v7/internal/pkg/dl" + "github.com/elastic/fleet-server/v7/internal/pkg/limit" + "github.com/elastic/fleet-server/v7/internal/pkg/logger" "github.com/elastic/fleet-server/v7/internal/pkg/model" "github.com/elastic/fleet-server/v7/internal/pkg/policy" + "github.com/pkg/errors" "github.com/julienschmidt/httprouter" + "github.com/rs/zerolog" "github.com/rs/zerolog/log" ) var ErrEventAgentIdMismatch = errors.New("event agentId mismatch") +type AckT struct { + cfg *config.Server + limit *limit.Limiter + bulk bulk.Bulk + cache cache.Cache +} + +func NewAckT(cfg *config.Server, bulker bulk.Bulk, cache cache.Cache) *AckT { + log.Info(). + Interface("limits", cfg.Limits.AckLimit). + Msg("Setting config ack_limits") + + return &AckT{ + cfg: cfg, + bulk: bulker, + cache: cache, + limit: limit.NewLimiter(&cfg.Limits.AckLimit), + } +} + func (rt Router) handleAcks(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { + start := time.Now() + id := ps.ByName("id") - err := _handleAcks(w, r, id, rt.ct.bulker, rt.ct.cache) + reqId := r.Header.Get(logger.HeaderRequestID) + + zlog := log.With(). + Str(LogAgentId, id). + Str(EcsHttpRequestId, reqId). + Logger() + + err := rt.ack.handleAcks(&zlog, w, r, id) if err != nil { - code := http.StatusBadRequest - // Don't log connection drops - if err != context.Canceled { - log.Error().Err(err).Int("code", code).Msg("Fail ACK") - } + cntAcks.IncError(err) + resp := NewErrorResp(err) + + zlog.WithLevel(resp.Level). + Err(err). + Int(EcsHttpResponseCode, resp.StatusCode). + Int64(EcsEventDuration, time.Since(start).Nanoseconds()). + Msg("fail ACK") - http.Error(w, err.Error(), code) + if err := resp.Write(w); err != nil { + zlog.Error().Err(err).Msg("fail writing error response") + } } } -// TODO: Handle UPGRADE -func _handleAcks(w http.ResponseWriter, r *http.Request, id string, bulker bulk.Bulk, c cache.Cache) error { - agent, err := authAgent(r, id, bulker, c) +func (ack *AckT) handleAcks(zlog *zerolog.Logger, w http.ResponseWriter, r *http.Request, id string) error { + limitF, err := ack.limit.Acquire() if err != nil { return err } + defer limitF() - raw, err := ioutil.ReadAll(r.Body) + agent, err := authAgent(r, &id, ack.bulk, ack.cache) if err != nil { return err } + // Pointer is passed in to allow UpdateContext by child function + zlog.UpdateContext(func(ctx zerolog.Context) zerolog.Context { + return ctx.Str(LogAccessApiKeyId, agent.AccessApiKeyId) + }) + + // Metrics; serenity now. + dfunc := cntAcks.IncStart() + defer dfunc() + + return ack.processRequest(*zlog, w, r, agent) +} + +func (ack *AckT) processRequest(zlog zerolog.Logger, w http.ResponseWriter, r *http.Request, agent *model.Agent) error { + + body := r.Body + + // Limit the size of the body to prevent malicious agent from exhausting RAM in server + if ack.cfg.Limits.AckLimit.MaxBody > 0 { + body = http.MaxBytesReader(w, body, ack.cfg.Limits.AckLimit.MaxBody) + } + + raw, err := ioutil.ReadAll(body) + if err != nil { + return errors.Wrap(err, "handleAcks read body") + } + + cntAcks.bodyIn.Add(uint64(len(raw))) + var req AckRequest if err := json.Unmarshal(raw, &req); err != nil { - return err + return errors.Wrap(err, "handleAcks unmarshal") } - log.Trace().RawJSON("raw", raw).Msg("Ack request") + zlog.Trace().RawJSON("raw", raw).Msg("Ack request") + + zlog = zlog.With().Int("nEvents", len(req.Events)).Logger() - if err = _handleAckEvents(r.Context(), agent, req.Events, bulker, c); err != nil { + if err = ack.handleAckEvents(r.Context(), zlog, agent, req.Events); err != nil { return err } - // TODO: flesh this out resp := AckResponse{"acks"} data, err := json.Marshal(&resp) if err != nil { - return err + return errors.Wrap(err, "handleAcks marshal response") } - if _, err = w.Write(data); err != nil { + var nWritten int + if nWritten, err = w.Write(data); err != nil { return err } + cntAcks.bodyOut.Add(uint64(nWritten)) + return nil } -func _handleAckEvents(ctx context.Context, agent *model.Agent, events []Event, bulker bulk.Bulk, c cache.Cache) error { +func (ack *AckT) handleAckEvents(ctx context.Context, zlog zerolog.Logger, agent *model.Agent, events []Event) error { var policyAcks []string var unenroll bool - for _, ev := range events { + for n, ev := range events { + zlog.Info(). + Str("actionType", ev.Type). + Str("actionSubType", ev.SubType). + Str("actionId", ev.ActionId). + Str("timestamp", ev.Timestamp). + Int("n", n). + Msg("ack event") + if ev.AgentId != "" && ev.AgentId != agent.Id { return ErrEventAgentIdMismatch } @@ -94,52 +174,60 @@ func _handleAckEvents(ctx context.Context, agent *model.Agent, events []Event, b continue } - action, ok := c.GetAction(ev.ActionId) + action, ok := ack.cache.GetAction(ev.ActionId) if !ok { - actions, err := dl.FindAction(ctx, bulker, ev.ActionId) + actions, err := dl.FindAction(ctx, ack.bulk, ev.ActionId) if err != nil { - return err + return errors.Wrap(err, "find actions") } if len(actions) == 0 { return errors.New("no matching action") } action = actions[0] - c.SetAction(action) + ack.cache.SetAction(action) } acr := model.ActionResult{ - ActionId: ev.ActionId, - AgentId: agent.Id, - Data: ev.Data, - Error: ev.Error, + ActionId: ev.ActionId, + AgentId: agent.Id, + StartedAt: ev.StartedAt, + CompletedAt: ev.CompletedAt, + ActionData: ev.ActionData, + ActionResponse: ev.ActionResponse, + Data: ev.Data, + Error: ev.Error, } - if _, err := dl.CreateActionResult(ctx, bulker, acr); err != nil { - return err + if _, err := dl.CreateActionResult(ctx, ack.bulk, acr); err != nil { + return errors.Wrap(err, "create action result") } - if ev.Error == "" && action.Type == TypeUnenroll { - unenroll = true + if ev.Error == "" { + if action.Type == TypeUnenroll { + unenroll = true + } else if action.Type == TypeUpgrade { + if err := ack.handleUpgrade(ctx, zlog, agent); err != nil { + return err + } + } } } if len(policyAcks) > 0 { - if err := _handlePolicyChange(ctx, bulker, agent, policyAcks...); err != nil { + if err := ack.handlePolicyChange(ctx, zlog, agent, policyAcks...); err != nil { return err } } if unenroll { - if err := _handleUnenroll(ctx, bulker, agent); err != nil { + if err := ack.handleUnenroll(ctx, zlog, agent); err != nil { return err } } - // TODO: handle UPGRADE - return nil } -func _handlePolicyChange(ctx context.Context, bulker bulk.Bulk, agent *model.Agent, actionIds ...string) error { +func (ack *AckT) handlePolicyChange(ctx context.Context, zlog zerolog.Logger, agent *model.Agent, actionIds ...string) error { // If more than one, pick the winner; // 0) Correct policy id // 1) Highest revision/coordinator number @@ -149,6 +237,16 @@ func _handlePolicyChange(ctx context.Context, bulker bulk.Bulk, agent *model.Age currCoord := agent.PolicyCoordinatorIdx for _, a := range actionIds { rev, ok := policy.RevisionFromString(a) + + zlog.Debug(). + Str("agent.policyId", agent.PolicyId). + Int64("agent.revisionIdx", currRev). + Int64("agent.coordinatorIdx", currCoord). + Str("rev.policyId", rev.PolicyId). + Int64("rev.revisionIdx", rev.RevisionIdx). + Int64("rev.coordinatorIdx", rev.CoordinatorIdx). + Msg("ack policy revision") + if ok && rev.PolicyId == agent.PolicyId && (rev.RevisionIdx > currRev || (rev.RevisionIdx == currRev && rev.CoordinatorIdx > currCoord)) { found = true @@ -157,56 +255,134 @@ func _handlePolicyChange(ctx context.Context, bulker bulk.Bulk, agent *model.Age } } - if found { - updates := make([]bulk.BulkOp, 0, 1) - fields := map[string]interface{}{ - dl.FieldPolicyRevisionIdx: currRev, - dl.FieldPolicyCoordinatorIdx: currCoord, - } - fields[dl.FieldUpdatedAt] = time.Now().UTC().Format(time.RFC3339) + if !found { + return nil + } - source, err := json.Marshal(map[string]interface{}{ - "doc": fields, - }) - if err != nil { - return err - } + body := makeUpdatePolicyBody( + agent.PolicyId, + currRev, + currCoord, + ) + + err := ack.bulk.Update( + ctx, + dl.FleetAgents, + agent.Id, + body, + bulk.WithRefresh(), + bulk.WithRetryOnConflict(3), + ) + + zlog.Info().Err(err). + Str(LogPolicyId, agent.PolicyId). + Int64("policyRevision", currRev). + Int64("policyCoordinator", currCoord). + Msg("ack policy") + + return errors.Wrap(err, "handlePolicyChange update") +} - updates = append(updates, bulk.BulkOp{ - Id: agent.Id, - Body: source, - Index: dl.FleetAgents, - }) +func (ack *AckT) handleUnenroll(ctx context.Context, zlog zerolog.Logger, agent *model.Agent) error { + apiKeys := _getAPIKeyIDs(agent) + if len(apiKeys) > 0 { + zlog = zlog.With().Strs(LogApiKeyId, apiKeys).Logger() - err = bulker.MUpdate(ctx, updates, bulk.WithRefresh()) - if err != nil { - return err + if err := ack.bulk.ApiKeyInvalidate(ctx, apiKeys...); err != nil { + return errors.Wrap(err, "handleUnenroll invalidate apikey") } } + now := time.Now().UTC().Format(time.RFC3339) + doc := bulk.UpdateFields{ + dl.FieldActive: false, + dl.FieldUnenrolledAt: now, + dl.FieldUpdatedAt: now, + } + + body, err := doc.Marshal() + if err != nil { + return errors.Wrap(err, "handleUnenroll marshal") + } + + if err = ack.bulk.Update(ctx, dl.FleetAgents, agent.Id, body, bulk.WithRefresh()); err != nil { + return errors.Wrap(err, "handleUnenroll update") + } + + zlog.Info().Msg("ack unenroll") return nil } -func _handleUnenroll(ctx context.Context, bulker bulk.Bulk, agent *model.Agent) error { - updates := make([]bulk.BulkOp, 0, 1) +func (ack *AckT) handleUpgrade(ctx context.Context, zlog zerolog.Logger, agent *model.Agent) error { + now := time.Now().UTC().Format(time.RFC3339) - fields := map[string]interface{}{ - dl.FieldUnenrolledAt: now, - dl.FieldUpdatedAt: now, + doc := bulk.UpdateFields{ + dl.FieldUpgradeStartedAt: nil, + dl.FieldUpgradedAt: now, } - source, err := json.Marshal(map[string]interface{}{ - "doc": fields, - }) + body, err := doc.Marshal() if err != nil { - return err + return errors.Wrap(err, "handleUpgrade marshal") } - updates = append(updates, bulk.BulkOp{ - Id: agent.Id, - Body: source, - Index: dl.FleetAgents, - }) + if err = ack.bulk.Update(ctx, dl.FleetAgents, agent.Id, body, bulk.WithRefresh()); err != nil { + return errors.Wrap(err, "handleUpgrade update") + } + + zlog.Info(). + Str("lastReportedVersion", agent.Agent.Version). + Str("upgradedAt", now). + Msg("ack upgrade") + + return nil +} + +func _getAPIKeyIDs(agent *model.Agent) []string { + keys := make([]string, 0, 1) + if agent.AccessApiKeyId != "" { + keys = append(keys, agent.AccessApiKeyId) + } + if agent.DefaultApiKeyId != "" { + keys = append(keys, agent.DefaultApiKeyId) + } + return keys +} - return bulker.MUpdate(ctx, updates, bulk.WithRefresh()) +// Generate an update script that validates that the policy_id +// has not changed underneath us by an upstream process (Kibana or otherwise). +// We have a race condition where a user could have assigned a new policy to +// an agent while we were busy updating the old one. A blind update to the +// agent record without a check could set the revision and coordIdx for the wrong +// policy. This script should be coupled with a "retry_on_conflict" parameter +// to allow for *other* changes to the agent record while we running the script. +// (For example, say the background bulk check-in timestamp update task fires) +// +// WARNING: This assumes the input data is sanitized. + +const kUpdatePolicyPrefix = `{"script":{"lang":"painless","source":"if (ctx._source.policy_id == params.id) {ctx._source.` + + dl.FieldPolicyRevisionIdx + + ` = params.rev;ctx._source.` + + dl.FieldPolicyCoordinatorIdx + + `= params.coord;ctx._source.` + + dl.FieldUpdatedAt + + ` = params.ts;} else {ctx.op = \"noop\";}","params": {"id":"` + +func makeUpdatePolicyBody(policyId string, newRev, coordIdx int64) []byte { + + var buf bytes.Buffer + buf.Grow(384) + + // Not pretty, but fast. + buf.WriteString(kUpdatePolicyPrefix) + buf.WriteString(policyId) + buf.WriteString(`","rev":`) + buf.WriteString(strconv.FormatInt(newRev, 10)) + buf.WriteString(`,"coord":`) + buf.WriteString(strconv.FormatInt(coordIdx, 10)) + buf.WriteString(`,"ts":"`) + buf.WriteString(time.Now().UTC().Format(time.RFC3339)) + buf.WriteString(`"}}}`) + + return buf.Bytes() } diff --git a/cmd/fleet/handleAck_test.go b/cmd/fleet/handleAck_test.go new file mode 100644 index 000000000..ce6135406 --- /dev/null +++ b/cmd/fleet/handleAck_test.go @@ -0,0 +1,39 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fleet + +import ( + "testing" + + "encoding/json" +) + +func BenchmarkMakeUpdatePolicyBody(b *testing.B) { + b.ReportAllocs() + + const policyId = "ed110be4-c2a0-42b8-adc0-94c2f0569207" + const newRev = 2 + const coord = 1 + + for n := 0; n < b.N; n++ { + makeUpdatePolicyBody(policyId, newRev, coord) + } +} + +func TestMakeUpdatePolicyBody(t *testing.T) { + + const policyId = "ed110be4-c2a0-42b8-adc0-94c2f0569207" + const newRev = 2 + const coord = 1 + + data := makeUpdatePolicyBody(policyId, newRev, coord) + + var i interface{} + err := json.Unmarshal(data, &i) + + if err != nil { + t.Fatal(err) + } +} diff --git a/cmd/fleet/handleArtifacts.go b/cmd/fleet/handleArtifacts.go new file mode 100644 index 000000000..64f8b9f7d --- /dev/null +++ b/cmd/fleet/handleArtifacts.go @@ -0,0 +1,299 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fleet + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/base64" + "encoding/hex" + "encoding/json" + "io" + "net/http" + "time" + + "github.com/elastic/fleet-server/v7/internal/pkg/bulk" + "github.com/elastic/fleet-server/v7/internal/pkg/cache" + "github.com/elastic/fleet-server/v7/internal/pkg/config" + "github.com/elastic/fleet-server/v7/internal/pkg/dl" + "github.com/elastic/fleet-server/v7/internal/pkg/limit" + "github.com/elastic/fleet-server/v7/internal/pkg/logger" + "github.com/elastic/fleet-server/v7/internal/pkg/model" + "github.com/elastic/fleet-server/v7/internal/pkg/throttle" + + "github.com/julienschmidt/httprouter" + "github.com/pkg/errors" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +const ( + defaultMaxParallel = 8 // TODO: configurable + defaultThrottleTTL = time.Minute // TODO: configurable +) + +var ( + ErrorThrottle = errors.New("cannot acquire throttle token") + ErrorBadSha2 = errors.New("malformed sha256") + ErrorRecord = errors.New("artifact record mismatch") + ErrorMismatchSha2 = errors.New("mismatched sha256") +) + +type ArtifactT struct { + bulker bulk.Bulk + cache cache.Cache + esThrottle *throttle.Throttle + limit *limit.Limiter +} + +func NewArtifactT(cfg *config.Server, bulker bulk.Bulk, cache cache.Cache) *ArtifactT { + log.Info(). + Interface("limits", cfg.Limits.ArtifactLimit). + Int("maxParallel", defaultMaxParallel). + Msg("Artifact install limits") + + return &ArtifactT{ + bulker: bulker, + cache: cache, + limit: limit.NewLimiter(&cfg.Limits.ArtifactLimit), + esThrottle: throttle.NewThrottle(defaultMaxParallel), + } +} + +func (rt Router) handleArtifacts(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { + start := time.Now() + + var ( + id = ps.ByName("id") // Identifier in the artifact record + sha2 = ps.ByName("sha2") // DecodedSha256 in the artifact record + ) + + reqId := r.Header.Get(logger.HeaderRequestID) + + zlog := log.With(). + Str(LogAgentId, id). + Str(EcsHttpRequestId, reqId). + Str("sha2", sha2). + Str("remoteAddr", r.RemoteAddr). + Logger() + + rdr, err := rt.at.handleArtifacts(&zlog, r, id, sha2) + + var nWritten int64 + if err == nil { + nWritten, err = io.Copy(w, rdr) + zlog.Trace(). + Err(err). + Int64(EcsHttpResponseBodyBytes, nWritten). + Int64(EcsEventDuration, time.Since(start).Nanoseconds()). + Msg("Response sent") + + cntArtifacts.bodyOut.Add(uint64(nWritten)) + } + + if err != nil { + cntArtifacts.IncError(err) + resp := NewErrorResp(err) + + zlog.WithLevel(resp.Level). + Err(err). + Int(EcsHttpResponseCode, resp.StatusCode). + Int64(EcsHttpResponseBodyBytes, nWritten). + Int64(EcsEventDuration, time.Since(start).Nanoseconds()). + Msg("fail artifact") + + if err := resp.Write(w); err != nil { + zlog.Error().Err(err).Msg("fail writing error response") + } + } +} + +func (at ArtifactT) handleArtifacts(zlog *zerolog.Logger, r *http.Request, id, sha2 string) (io.Reader, error) { + limitF, err := at.limit.Acquire() + if err != nil { + return nil, err + } + defer limitF() + + // Authenticate the APIKey; retrieve agent record. + // Note: This is going to be a bit slow even if we hit the cache on the api key. + // In order to validate that the agent still has that api key, we fetch the agent record from elastic. + agent, err := authAgent(r, nil, at.bulker, at.cache) + if err != nil { + return nil, err + } + + // Pointer is passed in to allow UpdateContext by child function + zlog.UpdateContext(func(ctx zerolog.Context) zerolog.Context { + return ctx.Str(LogAccessApiKeyId, agent.AccessApiKeyId) + }) + + // Metrics; serenity now. + dfunc := cntArtifacts.IncStart() + defer dfunc() + + return at.processRequest(r.Context(), *zlog, agent, id, sha2) +} + +type artHandler struct { + zlog zerolog.Logger + bulker bulk.Bulk + c cache.Cache +} + +func (at ArtifactT) processRequest(ctx context.Context, zlog zerolog.Logger, agent *model.Agent, id, sha2 string) (io.Reader, error) { + + // Input validation + if err := validateSha2String(sha2); err != nil { + return nil, err + } + + // Determine whether the agent should have access to this artifact + if err := at.authorizeArtifact(ctx, agent, id, sha2); err != nil { + zlog.Warn().Err(err).Msg("Unauthorized GET on artifact") + return nil, err + } + + // Grab artifact, whether from cache or elastic. + artifact, err := at.getArtifact(ctx, zlog, id, sha2) + if err != nil { + return nil, err + } + + // Sanity check; just in case something underneath is misbehaving + if artifact.Identifier != id || artifact.DecodedSha256 != sha2 { + err = ErrorRecord + zlog.Info(). + Err(err). + Str("artifact_id", artifact.Identifier). + Str("artifact_sha2", artifact.DecodedSha256). + Msg("Identifier mismatch on url") + return nil, err + } + + zlog.Debug(). + Int("sz", len(artifact.Body)). + Int64("decodedSz", artifact.DecodedSize). + Str("compression", artifact.CompressionAlgorithm). + Str("encryption", artifact.EncryptionAlgorithm). + Str("created", artifact.Created). + Msg("Artifact GET") + + // Write the payload + rdr := bytes.NewReader(artifact.Body) + return rdr, nil +} + +// TODO: Pull the policy record for this agent and validate that the +// requested artifact is assigned to this policy. This will prevent +// agents from retrieving artifacts that they do not have access to. +// Note that this is racy, the policy could have changed to allow an +// artifact before this instantiation of FleetServer has its local +// copy updated. Take the race conditions into consideration. +// +// Initial implementation is dependent on security by obscurity; ie. +// it should be difficult for an attacker to guess a guid. +func (at ArtifactT) authorizeArtifact(ctx context.Context, agent *model.Agent, ident, sha2 string) error { + return nil // TODO +} + +// Return artifact from cache by sha2 or fetch directly from Elastic. +// Update cache on successful retrieval from Elastic. +func (at ArtifactT) getArtifact(ctx context.Context, zlog zerolog.Logger, ident, sha2 string) (*model.Artifact, error) { + + // Check the cache; return immediately if found. + if artifact, ok := at.cache.GetArtifact(ident, sha2); ok { + return &artifact, nil + } + + // Fetch the artifact from elastic + art, err := at.fetchArtifact(ctx, zlog, ident, sha2) + + if err != nil { + zlog.Info().Err(err).Msg("Fail retrieve artifact") + return nil, err + } + + // The 'Body' field type is Raw; extract to string. + var srcPayload string + if err = json.Unmarshal(art.Body, &srcPayload); err != nil { + zlog.Error().Err(err).Msg("Cannot unmarshal artifact payload") + return nil, err + } + + // Artifact is stored base64 encoded in ElasticSearch. + // Base64 decode the payload before putting in cache + // to avoid having to decode on each cache hit. + dstPayload, err := base64.StdEncoding.DecodeString(srcPayload) + if err != nil { + zlog.Error().Err(err).Msg("Fail base64 decode artifact") + return nil, err + } + + // Validate the sha256 hash; this is just good hygiene. + if err = validateSha2Data(dstPayload, art.EncodedSha256); err != nil { + zlog.Error().Err(err).Msg("Fail sha2 hash validation") + return nil, err + } + + // Reassign decoded payload before adding to cache, avoid base64 decode on cache hit. + art.Body = dstPayload + + // Update the cache. + at.cache.SetArtifact(*art) + + return art, nil +} + +// Attempt to fetch the artifact from Elastic +// TODO: Design a mechanism to mitigate a DDOS attack on bogus hashes. +// Perhaps have a cache of the most recently used hashes available, and items that aren't +// in the cache can do a lookup but throttle as below. We could update the cache every 10m or so. +func (at ArtifactT) fetchArtifact(ctx context.Context, zlog zerolog.Logger, ident, sha2 string) (*model.Artifact, error) { + // Throttle prevents more than N outstanding requests to elastic globally and per sha2. + if token := at.esThrottle.Acquire(sha2, defaultThrottleTTL); token == nil { + return nil, ErrorThrottle + } else { + defer token.Release() + } + + start := time.Now() + artifact, err := dl.FindArtifact(ctx, at.bulker, ident, sha2) + + zlog.Info(). + Err(err). + Int64(EcsEventDuration, time.Since(start).Nanoseconds()). + Msg("fetch artifact") + + return artifact, errors.Wrap(err, "fetchArtifact") +} + +func validateSha2String(sha2 string) error { + + if len(sha2) != 64 { + return ErrorBadSha2 + } + + if _, err := hex.DecodeString(sha2); err != nil { + return ErrorBadSha2 + } + + return nil +} + +func validateSha2Data(data []byte, sha2 string) error { + src, err := hex.DecodeString(sha2) + if err != nil { + return errors.Wrap(err, "sha2 hex decode") + } + + sum := sha256.Sum256(data) + if !bytes.Equal(sum[:], src) { + return ErrorMismatchSha2 + } + + return nil +} diff --git a/cmd/fleet/handleCheckin.go b/cmd/fleet/handleCheckin.go index 10c7a74a5..86b628298 100644 --- a/cmd/fleet/handleCheckin.go +++ b/cmd/fleet/handleCheckin.go @@ -6,9 +6,11 @@ package fleet import ( "bytes" + "compress/flate" + "compress/gzip" "context" "encoding/json" - "errors" + "math/rand" "net/http" "reflect" "time" @@ -16,67 +18,106 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/action" "github.com/elastic/fleet-server/v7/internal/pkg/bulk" "github.com/elastic/fleet-server/v7/internal/pkg/cache" + "github.com/elastic/fleet-server/v7/internal/pkg/checkin" "github.com/elastic/fleet-server/v7/internal/pkg/config" "github.com/elastic/fleet-server/v7/internal/pkg/dl" + "github.com/elastic/fleet-server/v7/internal/pkg/limit" + "github.com/elastic/fleet-server/v7/internal/pkg/logger" "github.com/elastic/fleet-server/v7/internal/pkg/model" "github.com/elastic/fleet-server/v7/internal/pkg/monitor" "github.com/elastic/fleet-server/v7/internal/pkg/policy" - "github.com/elastic/fleet-server/v7/internal/pkg/saved" + "github.com/elastic/fleet-server/v7/internal/pkg/smap" + "github.com/elastic/fleet-server/v7/internal/pkg/sqn" + "github.com/hashicorp/go-version" "github.com/julienschmidt/httprouter" + "github.com/miolini/datacounter" + "github.com/pkg/errors" + "github.com/rs/zerolog" "github.com/rs/zerolog/log" ) var ( - ErrAgentNotFound = errors.New("agent not found") - ErrAgentCorrupted = errors.New("agent record corrupted") + ErrAgentNotFound = errors.New("agent not found") + ErrNoOutputPerms = errors.New("output permission sections not found") + ErrNoPolicyOutput = errors.New("output section not found") + ErrFailInjectApiKey = errors.New("fail inject api key") +) - kCheckinTimeout = 30 * time.Second - kLongPollTimeout = 300 * time.Second // 5m +const ( + kEncodingGzip = "gzip" ) func (rt Router) handleCheckin(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { - // TODO: Consider rate limit here + start := time.Now() id := ps.ByName("id") - err := rt.ct._handleCheckin(w, r, id, rt.bulker) + + reqId := r.Header.Get(logger.HeaderRequestID) + + zlog := log.With(). + Str(LogAgentId, id). + Str(EcsHttpRequestId, reqId). + Logger() + + err := rt.ct.handleCheckin(&zlog, w, r, id) if err != nil { - code := http.StatusBadRequest - if err == ErrAgentNotFound { - code = http.StatusNotFound + cntCheckin.IncError(err) + resp := NewErrorResp(err) + + // Log this as warn for visibility that limit has been reached. + // This allows customers to tune the configuration on detection of threshold. + if errors.Is(err, limit.ErrMaxLimit) { + resp.Level = zerolog.WarnLevel } - // Don't log connection drops - if err != context.Canceled { - log.Error().Err(err).Str("id", id).Int("code", code).Msg("Fail checkin") + zlog.WithLevel(resp.Level). + Err(err). + Int(EcsHttpResponseCode, resp.StatusCode). + Int64(EcsEventDuration, time.Since(start).Nanoseconds()). + Msg("fail checkin") + + if err := resp.Write(w); err != nil { + zlog.Error().Err(err).Msg("fail writing error response") } - http.Error(w, err.Error(), code) } } type CheckinT struct { - cfg *config.Config + verCon version.Constraints + cfg *config.Server cache cache.Cache - bc *BulkCheckin + bc *checkin.Bulk pm policy.Monitor gcp monitor.GlobalCheckpointProvider ad *action.Dispatcher tr *action.TokenResolver bulker bulk.Bulk + limit *limit.Limiter } func NewCheckinT( - cfg *config.Config, + verCon version.Constraints, + cfg *config.Server, c cache.Cache, - bc *BulkCheckin, + bc *checkin.Bulk, pm policy.Monitor, gcp monitor.GlobalCheckpointProvider, ad *action.Dispatcher, tr *action.TokenResolver, bulker bulk.Bulk, ) *CheckinT { - return &CheckinT{ + + log.Info(). + Interface("limits", cfg.Limits.CheckinLimit). + Dur("long_poll_timeout", cfg.Timeouts.CheckinLongPoll). + Dur("long_poll_timestamp", cfg.Timeouts.CheckinTimestamp). + Dur("long_poll_jitter", cfg.Timeouts.CheckinJitter). + Msg("Checkin install limits") + + ct := &CheckinT{ + verCon: verCon, cfg: cfg, cache: c, bc: bc, @@ -84,40 +125,83 @@ func NewCheckinT( gcp: gcp, ad: ad, tr: tr, + limit: limit.NewLimiter(&cfg.Limits.CheckinLimit), bulker: bulker, } + + return ct } -func (ct *CheckinT) _handleCheckin(w http.ResponseWriter, r *http.Request, id string, bulker bulk.Bulk) error { +func (ct *CheckinT) handleCheckin(zlog *zerolog.Logger, w http.ResponseWriter, r *http.Request, id string) error { + + start := time.Now() + + limitF, err := ct.limit.Acquire() + if err != nil { + return err + } + defer limitF() - agent, err := authAgent(r, id, ct.bulker, ct.cache) + agent, err := authAgent(r, &id, ct.bulker, ct.cache) if err != nil { return err } + // Pointer is passed in to allow UpdateContext by child function + zlog.UpdateContext(func(ctx zerolog.Context) zerolog.Context { + return ctx.Str(LogAccessApiKeyId, agent.AccessApiKeyId) + }) + + ver, err := validateUserAgent(*zlog, r, ct.verCon) + if err != nil { + return err + } + + // Safely check if the agent version is different, return empty string otherwise + newVer := agent.CheckDifferentVersion(ver) + + // Metrics; serenity now. + dfunc := cntCheckin.IncStart() + defer dfunc() + + return ct.processRequest(*zlog, w, r, start, agent, newVer) +} + +func (ct *CheckinT) processRequest(zlog zerolog.Logger, w http.ResponseWriter, r *http.Request, start time.Time, agent *model.Agent, ver string) error { + ctx := r.Context() - // Interpret request; TODO: defend overflow, slow roll + body := r.Body + + // Limit the size of the body to prevent malicious agent from exhausting RAM in server + if ct.cfg.Limits.CheckinLimit.MaxBody > 0 { + body = http.MaxBytesReader(w, body, ct.cfg.Limits.CheckinLimit.MaxBody) + } + + readCounter := datacounter.NewReaderCounter(body) + var req CheckinRequest - decoder := json.NewDecoder(r.Body) + decoder := json.NewDecoder(readCounter) if err := decoder.Decode(&req); err != nil { - return err + return errors.Wrap(err, "decode checkin request") } + cntCheckin.bodyIn.Add(readCounter.Count()) + // Compare local_metadata content and update if different - fields, err := parseMeta(agent, &req) + rawMeta, err := parseMeta(zlog, agent, &req) if err != nil { return err } // Resolve AckToken from request, fallback on the agent record - seqno, err := ct.resolveSeqNo(ctx, req, agent) + seqno, err := ct.resolveSeqNo(ctx, zlog, req, agent) if err != nil { return err } - // Subsribe to actions dispatcher + // Subscribe to actions dispatcher aSub := ct.ad.Subscribe(agent.Id, seqno) defer ct.ad.Unsubscribe(aSub) actCh := aSub.Ch() @@ -125,20 +209,32 @@ func (ct *CheckinT) _handleCheckin(w http.ResponseWriter, r *http.Request, id st // Subscribe to policy manager for changes on PolicyId > policyRev sub, err := ct.pm.Subscribe(agent.Id, agent.PolicyId, agent.PolicyRevisionIdx, agent.PolicyCoordinatorIdx) if err != nil { - return err + return errors.Wrap(err, "subscribe policy monitor") } defer ct.pm.Unsubscribe(sub) // Update check-in timestamp on timeout - tick := time.NewTicker(kCheckinTimeout) + tick := time.NewTicker(ct.cfg.Timeouts.CheckinTimestamp) defer tick.Stop() + setupDuration := time.Since(start) + pollDuration, jitter := calcPollDuration(zlog, ct.cfg, setupDuration) + + zlog.Debug(). + Str("status", req.Status). + Str("seqNo", seqno.String()). + Dur("setupDuration", setupDuration). + Dur("jitter", jitter). + Dur("pollDuration", pollDuration). + Uint64("bodyCount", readCounter.Count()). + Msg("checkin start long poll") + // Chill out for for a bit. Long poll. - longPoll := time.NewTicker(kLongPollTimeout) + longPoll := time.NewTicker(pollDuration) defer longPoll.Stop() // Intial update on checkin, and any user fields that might have changed - ct.bc.CheckIn(agent.Id, fields, seqno) + ct.bc.CheckIn(agent.Id, req.Status, rawMeta, seqno, ver) // Initial fetch for pending actions var ( @@ -165,44 +261,102 @@ func (ct *CheckinT) _handleCheckin(w http.ResponseWriter, r *http.Request, id st actions = append(actions, acs...) break LOOP case policy := <-sub.Output(): - actionResp, err := parsePolicy(ctx, bulker, agent.Id, policy) + actionResp, err := processPolicy(ctx, zlog, ct.bulker, agent.Id, policy) if err != nil { - return err + return errors.Wrap(err, "processPolicy") } actions = append(actions, *actionResp) break LOOP case <-longPoll.C: - log.Trace().Msg("Fire long poll") + zlog.Trace().Msg("fire long poll") break LOOP case <-tick.C: - ct.bc.CheckIn(agent.Id, nil, seqno) + ct.bc.CheckIn(agent.Id, req.Status, nil, nil, ver) } } } - // For now, empty response + for _, action := range actions { + zlog.Info(). + Str("ackToken", ackToken). + Str("createdAt", action.CreatedAt). + Str("id", action.Id). + Str("type", action.Type). + Str("inputType", action.InputType). + Int64("timeout", action.Timeout). + Msg("Action delivered to agent on checkin") + } + resp := CheckinResponse{ AckToken: ackToken, Action: "checkin", Actions: actions, } - data, err := json.Marshal(&resp) + return ct.writeResponse(zlog, w, r, resp) +} + +func (ct *CheckinT) writeResponse(zlog zerolog.Logger, w http.ResponseWriter, r *http.Request, resp CheckinResponse) error { + + payload, err := json.Marshal(&resp) if err != nil { - return err + return errors.Wrap(err, "writeResponse marshal") } - if _, err = w.Write(data); err != nil { - return err + compressionLevel := ct.cfg.CompressionLevel + compressThreshold := ct.cfg.CompressionThresh + + if len(payload) > compressThreshold && compressionLevel != flate.NoCompression && acceptsEncoding(r, kEncodingGzip) { + + wrCounter := datacounter.NewWriterCounter(w) + + zipper, err := gzip.NewWriterLevel(wrCounter, compressionLevel) + if err != nil { + return errors.Wrap(err, "writeResponse new gzip") + } + + w.Header().Set("Content-Encoding", kEncodingGzip) + + if _, err = zipper.Write(payload); err != nil { + return errors.Wrap(err, "writeResponse gzip write") + } + + if err = zipper.Close(); err != nil { + err = errors.Wrap(err, "writeResponse gzip close") + } + + cntCheckin.bodyOut.Add(wrCounter.Count()) + + zlog.Trace(). + Err(err). + Int("lvl", compressionLevel). + Int("srcSz", len(payload)). + Uint64("dstSz", wrCounter.Count()). + Msg("compressing checkin response") + } else { + var nWritten int + nWritten, err = w.Write(payload) + cntCheckin.bodyOut.Add(uint64(nWritten)) + + if err != nil { + err = errors.Wrap(err, "writeResponse payload") + } } - log.Trace().RawJSON("resp", data).Msg("Checkin response") + return err +} - return nil +func acceptsEncoding(r *http.Request, encoding string) bool { + for _, v := range r.Header.Values("Accept-Encoding") { + if v == encoding { + return true + } + } + return false } // Resolve AckToken from request, fallback on the agent record -func (ct *CheckinT) resolveSeqNo(ctx context.Context, req CheckinRequest, agent *model.Agent) (seqno int64, err error) { +func (ct *CheckinT) resolveSeqNo(ctx context.Context, zlog zerolog.Logger, req CheckinRequest, agent *model.Agent) (seqno sqn.SeqNo, err error) { // Resolve AckToken from request, fallback on the agent record ackToken := req.AckToken seqno = agent.ActionSeqNo @@ -212,26 +366,27 @@ func (ct *CheckinT) resolveSeqNo(ctx context.Context, req CheckinRequest, agent sn, err = ct.tr.Resolve(ctx, ackToken) if err != nil { if errors.Is(err, dl.ErrNotFound) { - log.Debug().Str("token", ackToken).Str("agent_id", agent.Id).Msg("Revision token not found") + zlog.Debug().Str("token", ackToken).Msg("revision token not found") err = nil } else { + err = errors.Wrap(err, "resolveSeqNo") return } } - seqno = sn + seqno = []int64{sn} } return seqno, nil } -func (ct *CheckinT) fetchAgentPendingActions(ctx context.Context, seqno int64, agentId string) ([]model.Action, error) { - now := time.Now().UTC().Format(time.RFC3339) +func (ct *CheckinT) fetchAgentPendingActions(ctx context.Context, seqno sqn.SeqNo, agentId string) ([]model.Action, error) { - return dl.FindActions(ctx, ct.bulker, dl.QueryAgentActions, map[string]interface{}{ - dl.FieldSeqNo: seqno, - dl.FieldMaxSeqNo: ct.gcp.GetCheckpoint(), - dl.FieldExpiration: now, - dl.FieldAgents: []string{agentId}, - }) + actions, err := dl.FindAgentActions(ctx, ct.bulker, seqno, ct.gcp.GetCheckpoint(), agentId) + + if err != nil { + return nil, errors.Wrap(err, "fetchAgentPendingActions") + } + + return actions, err } func convertActions(agentId string, actions []model.Action) ([]ActionResp, string) { @@ -243,10 +398,11 @@ func convertActions(agentId string, actions []model.Action) ([]ActionResp, strin respList = append(respList, ActionResp{ AgentId: agentId, CreatedAt: action.Timestamp, - Data: []byte(action.Data), + Data: action.Data, Id: action.ActionId, Type: action.Type, - InputId: action.InputId, + InputType: action.InputType, + Timeout: action.Timeout, }) } @@ -257,56 +413,95 @@ func convertActions(agentId string, actions []model.Action) ([]ActionResp, strin return respList, ackToken } -func parsePolicy(ctx context.Context, bulker bulk.Bulk, agentId string, p model.Policy) (*ActionResp, error) { - // Need to inject the default api key into the object. So: - // 1) Deserialize the action - // 2) Lookup the DefaultApiKey in the save agent (we purposefully didn't decode it before) - // 3) If not there, generate and persist DefaultAPIKey - // 4) Inject default api key into structure - // 5) Re-serialize and return AgentResp structure - - var actionObj map[string]interface{} - if err := json.Unmarshal(p.Data, &actionObj); err != nil { - return nil, err +// A new policy exists for this agent. Perform the following: +// - Generate and update default ApiKey if roles have changed. +// - Rewrite the policy for delivery to the agent injecting the key material. +// +func processPolicy(ctx context.Context, zlog zerolog.Logger, bulker bulk.Bulk, agentId string, pp *policy.ParsedPolicy) (*ActionResp, error) { + + zlog = zlog.With(). + Str("ctx", "processPolicy"). + Int64("policyRevision", pp.Policy.RevisionIdx). + Int64("policyCoordinator", pp.Policy.CoordinatorIdx). + Str(LogPolicyId, pp.Policy.PolicyId). + Logger() + + // The parsed policy object contains a map of name->role with a precalculated sha2. + if pp.Default.Role == nil { + zlog.Error().Str("name", pp.Default.Name).Msg("policy does not contain required output permission section") + return nil, ErrNoOutputPerms } - // Repull and decode the agent object - var agent model.Agent + // Repull and decode the agent object. Do not trust the cache. agent, err := dl.FindAgent(ctx, bulker, dl.QueryAgentByID, dl.FieldId, agentId) if err != nil { + zlog.Error().Err(err).Msg("fail find agent record") return nil, err } - if agent.DefaultApiKey == "" { - defaultOutputApiKey, err := generateOutputApiKey(ctx, bulker.Client(), agent.Id, "default") + // Determine whether we need to generate a default output ApiKey. + // This is accomplished by comparing the sha2 hash stored in the agent + // record with the precalculated sha2 hash of the role. + needKey := true + switch { + case agent.DefaultApiKey == "": + zlog.Debug().Msg("must generate api key as default API key is not present") + case pp.Default.Role.Sha2 != agent.PolicyOutputPermissionsHash: + zlog.Debug().Msg("must generate api key as policy output permissions changed") + default: + needKey = false + zlog.Debug().Msg("policy output permissions are the same") + } + + if needKey { + zlog.Debug(). + RawJSON("roles", pp.Default.Role.Raw). + Str("oldHash", agent.PolicyOutputPermissionsHash). + Str("newHash", pp.Default.Role.Sha2). + Msg("Generating a new API key") + + defaultOutputApiKey, err := generateOutputApiKey(ctx, bulker, agent.Id, pp.Default.Name, pp.Default.Role.Raw) if err != nil { + zlog.Error().Err(err).Msg("fail generate output key") return nil, err } - agent.DefaultApiKey = defaultOutputApiKey.Agent() - agent.DefaultApiKeyId = defaultOutputApiKey.Id - log.Info().Str("agentId", agentId).Msg("Rewriting full agent record to pick up default output key.") - if err = dl.IndexAgent(ctx, bulker, agent); err != nil { + zlog.Info(). + Str("hash.sha256", pp.Default.Role.Sha2). + Str(LogDefaultOutputApiKeyId, defaultOutputApiKey.Id). + Msg("Updating agent record to pick up default output key.") + + fields := map[string]interface{}{ + dl.FieldDefaultApiKey: defaultOutputApiKey.Agent(), + dl.FieldDefaultApiKeyId: defaultOutputApiKey.Id, + dl.FieldPolicyOutputPermissionsHash: pp.Default.Role.Sha2, + } + + body, err := json.Marshal(map[string]interface{}{ + "doc": fields, + }) + if err != nil { return nil, err } - } - if ok := setMapObj(actionObj, agent.DefaultApiKey, "outputs", "default", "api_key"); !ok { - log.Debug().Msg("Cannot inject api_key into policy") + if err = bulker.Update(ctx, dl.FleetAgents, agent.Id, body); err != nil { + zlog.Error().Err(err).Msg("fail update agent record") + return nil, err + } + agent.DefaultApiKey = defaultOutputApiKey.Agent() } - dataJSON, err := json.Marshal(struct { - Policy map[string]interface{} `json:"policy"` - }{actionObj}) + rewrittenPolicy, err := rewritePolicy(pp, agent.DefaultApiKey) if err != nil { + zlog.Error().Err(err).Msg("fail rewrite policy") return nil, err } - r := policy.RevisionFromPolicy(p) + r := policy.RevisionFromPolicy(pp.Policy) resp := ActionResp{ AgentId: agent.Id, - CreatedAt: p.Timestamp, - Data: dataJSON, + CreatedAt: pp.Policy.Timestamp, + Data: rewrittenPolicy, Id: r.String(), Type: TypePolicyChange, } @@ -314,6 +509,45 @@ func parsePolicy(ctx context.Context, bulker bulk.Bulk, agentId string, p model. return &resp, nil } +// Return Serializable policy injecting the apikey into the output field. +// This avoids reallocation of each section of the policy by duping +// the map object and only replacing the targeted section. +func rewritePolicy(pp *policy.ParsedPolicy, apiKey string) (interface{}, error) { + + // Parse the outputs maps in order to inject the api key + const outputsProperty = "outputs" + outputs, err := smap.Parse(pp.Fields[outputsProperty]) + if err != nil { + return nil, err + } + + if outputs == nil { + return nil, ErrNoPolicyOutput + } + + if ok := setMapObj(outputs, apiKey, pp.Default.Name, "api_key"); !ok { + return nil, ErrFailInjectApiKey + } + + outputRaw, err := json.Marshal(outputs) + if err != nil { + return nil, err + } + + // Dupe field map; pp is immutable + fields := make(map[string]json.RawMessage, len(pp.Fields)) + + for k, v := range pp.Fields { + fields[k] = v + } + + fields[outputsProperty] = json.RawMessage(outputRaw) + + return struct { + Policy map[string]json.RawMessage `json:"policy"` + }{fields}, nil +} + func setMapObj(obj map[string]interface{}, val interface{}, keys ...string) bool { if len(keys) == 0 { return false @@ -339,38 +573,98 @@ func setMapObj(obj map[string]interface{}, val interface{}, keys ...string) bool func findAgentByApiKeyId(ctx context.Context, bulker bulk.Bulk, id string) (*model.Agent, error) { agent, err := dl.FindAgent(ctx, bulker, dl.QueryAgentByAssessAPIKeyID, dl.FieldAccessAPIKeyID, id) - if err != nil && errors.Is(err, dl.ErrNotFound) { - err = ErrAgentNotFound + if err != nil { + if errors.Is(err, dl.ErrNotFound) { + err = ErrAgentNotFound + } else { + err = errors.Wrap(err, "findAgentByApiKeyId") + } } return &agent, err } // parseMeta compares the agent and the request local_metadata content // and returns fields to update the agent record or nil -func parseMeta(agent *model.Agent, req *CheckinRequest) (fields saved.Fields, err error) { - // Quick comparison first +func parseMeta(zlog zerolog.Logger, agent *model.Agent, req *CheckinRequest) ([]byte, error) { + + // Quick comparison first; compare the JSON payloads. + // If the data is not consistently normalized, this short-circuit will not work. if bytes.Equal(req.LocalMeta, agent.LocalMetadata) { - log.Trace().Msg("Quick comparing local metadata is equal") + zlog.Trace().Msg("quick comparing local metadata is equal") return nil, nil } - // Compare local_metadata content and update if different - var reqLocalMeta saved.Fields - var agentLocalMeta saved.Fields - err = json.Unmarshal(req.LocalMeta, &reqLocalMeta) - if err != nil { - return nil, err + // Deserialize the request metadata + var reqLocalMeta interface{} + if err := json.Unmarshal(req.LocalMeta, &reqLocalMeta); err != nil { + return nil, errors.Wrap(err, "parseMeta request") } - err = json.Unmarshal(agent.LocalMetadata, &agentLocalMeta) - if err != nil { - return nil, err + + // If empty, don't step on existing data + if reqLocalMeta == nil { + return nil, nil + } + + // Deserialize the agent's metadata copy + var agentLocalMeta interface{} + if err := json.Unmarshal(agent.LocalMetadata, &agentLocalMeta); err != nil { + return nil, errors.Wrap(err, "parseMeta local") } - if reqLocalMeta != nil && !reflect.DeepEqual(reqLocalMeta, agentLocalMeta) { - log.Info().RawJSON("req.LocalMeta", req.LocalMeta).Msg("Applying new local metadata") - fields = map[string]interface{}{ - FieldLocalMetadata: req.LocalMeta, + var outMeta []byte + + // Compare the deserialized meta structures and return the bytes to update if different + if !reflect.DeepEqual(reqLocalMeta, agentLocalMeta) { + + zlog.Trace(). + RawJSON("oldLocalMeta", agent.LocalMetadata). + RawJSON("newLocalMeta", req.LocalMeta). + Msg("local metadata not equal") + + zlog.Info(). + RawJSON("req.LocalMeta", req.LocalMeta). + Msg("applying new local metadata") + + outMeta = req.LocalMeta + } + + return outMeta, nil +} + +func calcPollDuration(zlog zerolog.Logger, cfg *config.Server, setupDuration time.Duration) (time.Duration, time.Duration) { + + pollDuration := cfg.Timeouts.CheckinLongPoll + + // Under heavy load, elastic may take along time to authorize the api key, many seconds to minutes. + // Short circuit the long poll to take the setup delay into account. This is particularly necessary + // in cloud where the proxy will time us out after 5m20s causing unnecessary errors. + + if setupDuration >= pollDuration { + // We took so long to setup that we need to exit immediately + pollDuration = 0 + zlog.Warn(). + Dur("setupDuration", setupDuration). + Dur("pollDuration", cfg.Timeouts.CheckinLongPoll). + Msg("excessive setup duration short cicuit long poll") + + } else { + pollDuration -= setupDuration + if setupDuration > (time.Second * 10) { + zlog.Warn(). + Dur("setupDuration", setupDuration). + Dur("pollDuration", pollDuration). + Msg("checking poll duration decreased due to slow setup") } } - return fields, nil + + var jitter time.Duration + if cfg.Timeouts.CheckinJitter != 0 { + jitter = time.Duration(rand.Int63n(int64(cfg.Timeouts.CheckinJitter))) + if jitter < pollDuration { + pollDuration = pollDuration - jitter + zlog.Trace().Dur("poll", pollDuration).Msg("Long poll with jitter") + } + } + + return pollDuration, jitter } diff --git a/cmd/fleet/handleChecking_test.go b/cmd/fleet/handleChecking_test.go new file mode 100644 index 000000000..ea199c81a --- /dev/null +++ b/cmd/fleet/handleChecking_test.go @@ -0,0 +1,35 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fleet + +import ( + "encoding/json" + "github.com/elastic/fleet-server/v7/internal/pkg/model" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestConvertActionsEmpty(t *testing.T) { + resp, token := convertActions("1234", nil) + assert.Equal(t, resp, []ActionResp{}) + assert.Equal(t, token, "") +} + +func TestConvertActions(t *testing.T) { + actions := []model.Action{ + { + ActionId: "1234", + }, + } + resp, token := convertActions("agent-id", actions) + assert.Equal(t, resp, []ActionResp{ + { + AgentId: "agent-id", + Id: "1234", + Data: json.RawMessage(nil), + }, + }) + assert.Equal(t, token, "") +} diff --git a/cmd/fleet/handleEnroll.go b/cmd/fleet/handleEnroll.go index 8117909be..de1f922cc 100644 --- a/cmd/fleet/handleEnroll.go +++ b/cmd/fleet/handleEnroll.go @@ -7,7 +7,6 @@ package fleet import ( "context" "encoding/json" - "errors" "fmt" "io" "net/http" @@ -18,43 +17,53 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/cache" "github.com/elastic/fleet-server/v7/internal/pkg/config" "github.com/elastic/fleet-server/v7/internal/pkg/dl" + "github.com/elastic/fleet-server/v7/internal/pkg/limit" + "github.com/elastic/fleet-server/v7/internal/pkg/logger" "github.com/elastic/fleet-server/v7/internal/pkg/model" + "github.com/elastic/fleet-server/v7/internal/pkg/sqn" - "github.com/elastic/go-elasticsearch/v8" "github.com/gofrs/uuid" + "github.com/hashicorp/go-version" "github.com/julienschmidt/httprouter" + "github.com/miolini/datacounter" + "github.com/pkg/errors" + "github.com/rs/zerolog" "github.com/rs/zerolog/log" - "golang.org/x/sync/semaphore" ) const ( kEnrollMod = "enroll" - kCacheAccessInitTTL = time.Second * 30 // Cache a bit longer to handle expensive initial checkin - kCacheEnrollmentTTL = time.Second * 30 + EnrollEphemeral = "EPHEMERAL" + EnrollPermanent = "PERMANENT" + EnrollTemporary = "TEMPORARY" ) var ( - ErrUnknownEnrollType = errors.New("unknown enroll request type") - ErrServiceBusy = errors.New("service busy") - ErrAgentIdFailure = errors.New("agent persist failure") + ErrUnknownEnrollType = errors.New("unknown enroll request type") + ErrInactiveEnrollmentKey = errors.New("inactive enrollment key") ) type EnrollerT struct { - throttle *semaphore.Weighted - bulker bulk.Bulk - cache cache.Cache + verCon version.Constraints + cfg *config.Server + bulker bulk.Bulk + cache cache.Cache + limit *limit.Limiter } -func NewEnrollerT(cfg *config.Server, bulker bulk.Bulk, c cache.Cache) (*EnrollerT, error) { - // This value has more to do with the throughput of elastic search than anything else - // if you have a large elastic search cluster, you can be more aggressive. - maxEnrollPending := cfg.MaxEnrollPending +func NewEnrollerT(verCon version.Constraints, cfg *config.Server, bulker bulk.Bulk, c cache.Cache) (*EnrollerT, error) { + + log.Info(). + Interface("limits", cfg.Limits.EnrollLimit). + Msg("Setting config enroll_limit") return &EnrollerT{ - throttle: semaphore.NewWeighted(maxEnrollPending), - bulker: bulker, - cache: c, + verCon: verCon, + cfg: cfg, + limit: limit.NewLimiter(&cfg.Limits.EnrollLimit), + bulker: bulker, + cache: c, }, nil } @@ -68,91 +77,102 @@ func (rt Router) handleEnroll(w http.ResponseWriter, r *http.Request, ps httprou return } - data, err := rt.et.handleEnroll(r) + reqId := r.Header.Get(logger.HeaderRequestID) + + zlog := log.With(). + Str(EcsHttpRequestId, reqId). + Str("mod", kEnrollMod). + Logger() + + resp, err := rt.et.handleEnroll(&zlog, w, r) if err != nil { - code := http.StatusBadRequest - if err == ErrServiceBusy { - code = http.StatusServiceUnavailable - } + cntEnroll.IncError(err) + resp := NewErrorResp(err) - // Don't log connection drops - if err != context.Canceled { - log.Error(). - Str("mod", kEnrollMod). - Int("code", code). - Err(err).Dur("tdiff", time.Since(start)). - Msg("Enroll fail") - } + zlog.WithLevel(resp.Level). + Err(err). + Int(EcsHttpResponseCode, resp.StatusCode). + Int64(EcsEventDuration, time.Since(start).Nanoseconds()). + Msg("fail enroll") - http.Error(w, err.Error(), code) + if err := resp.Write(w); err != nil { + zlog.Error().Err(err).Msg("fail writing error response") + } return } - if _, err = w.Write(data); err != nil { - log.Error().Err(err).Msg("Fail send enroll response") - } - - log.Trace(). - Err(err). - RawJSON("raw", data). - Str("mod", kEnrollMod). - Dur("rtt", time.Since(start)). - Msg("handleEnroll OK") -} - -func (et *EnrollerT) acquireSemaphore(ctx context.Context) error { - start := time.Now() + if err = writeResponse(zlog, w, resp, start); err != nil { + cntEnroll.IncError(err) + zlog.Error(). + Err(err). + Int64(EcsEventDuration, time.Since(start).Nanoseconds()). + Msg("fail write response") - // Wait a reasonable amount of time, but if busy for N seconds; ask to come back later. - acquireCtx, cancelF := context.WithTimeout(ctx, time.Second*10) - defer cancelF() - - if err := et.throttle.Acquire(acquireCtx, 1); err != nil { - return ErrServiceBusy + // Remove ghost artifacts; agent will never receive the paylod + rt.et.wipeGhosts(r.Context(), zlog, resp) } - - log.Trace(). - Str("mod", kEnrollMod). - Dur("tdiff", time.Since(start)). - Msg("Enroll acquire") - - return nil } -func (et *EnrollerT) handleEnroll(r *http.Request) ([]byte, error) { +func (et *EnrollerT) handleEnroll(zlog *zerolog.Logger, w http.ResponseWriter, r *http.Request) (*EnrollResponse, error) { - if err := et.acquireSemaphore(r.Context()); err != nil { + limitF, err := et.limit.Acquire() + if err != nil { return nil, err } + defer limitF() - defer et.throttle.Release(1) - - key, err := authApiKey(r, et.bulker.Client(), et.cache) + key, err := authApiKey(r, et.bulker, et.cache) if err != nil { return nil, err } - erec, err := et.fetchEnrollmentKeyRecord(r.Context(), key.Id) + // Pointer is passed in to allow UpdateContext by child function + zlog.UpdateContext(func(ctx zerolog.Context) zerolog.Context { + return ctx.Str(LogEnrollApiKeyId, key.Id) + }) + + ver, err := validateUserAgent(*zlog, r, et.verCon) if err != nil { return nil, err } - // Parse the request body - req, err := decodeEnrollRequest(r.Body) + // Metrics; serenity now. + dfunc := cntEnroll.IncStart() + defer dfunc() + + return et.processRequest(*zlog, w, r, key.Id, ver) +} + +func (et *EnrollerT) processRequest(zlog zerolog.Logger, w http.ResponseWriter, r *http.Request, enrollmentApiKeyId, ver string) (*EnrollResponse, error) { + + // Validate that an enrollment record exists for a key with this id. + erec, err := et.fetchEnrollmentKeyRecord(r.Context(), enrollmentApiKeyId) if err != nil { return nil, err } - resp, err := _enroll(r.Context(), et.bulker, et.cache, *req, *erec) + body := r.Body + + // Limit the size of the body to prevent malicious agent from exhausting RAM in server + if et.cfg.Limits.EnrollLimit.MaxBody > 0 { + body = http.MaxBytesReader(w, body, et.cfg.Limits.EnrollLimit.MaxBody) + } + + readCounter := datacounter.NewReaderCounter(body) + + // Parse the request body + req, err := decodeEnrollRequest(readCounter) if err != nil { return nil, err } - return json.Marshal(resp) + cntEnroll.bodyIn.Add(readCounter.Count()) + + return et._enroll(r.Context(), zlog, req, erec.PolicyId, ver) } -func _enroll(ctx context.Context, bulker bulk.Bulk, c cache.Cache, req EnrollRequest, erec model.EnrollmentApiKey) (*EnrollResponse, error) { +func (et *EnrollerT) _enroll(ctx context.Context, zlog zerolog.Logger, req *EnrollRequest, policyId, ver string) (*EnrollResponse, error) { if req.SharedId != "" { // TODO: Support pre-existing install @@ -160,7 +180,6 @@ func _enroll(ctx context.Context, bulker bulk.Bulk, c cache.Cache, req EnrollReq } now := time.Now() - nowStr := now.UTC().Format(time.RFC3339) // Generate an ID here so we can pre-create the api key and avoid a round trip u, err := uuid.NewV4() @@ -168,49 +187,37 @@ func _enroll(ctx context.Context, bulker bulk.Bulk, c cache.Cache, req EnrollReq return nil, err } - // TODO: Cleanup after ourselves on failure: - // Revoke generated keys. - // Remove agent record. - agentId := u.String() - accessApiKey, err := generateAccessApiKey(ctx, bulker.Client(), agentId) - if err != nil { - return nil, err - } - - defaultOutputApiKey, err := generateOutputApiKey(ctx, bulker.Client(), agentId, "default") + // Update the local metadata agent id + localMeta, err := updateLocalMetaAgentId(req.Meta.Local, agentId) if err != nil { return nil, err } - log.Debug(). - Dur("rtt", time.Since(now)). - Str("agentId", agentId). - Str("accessApiKey.Id", accessApiKey.Id). - Str("defaultOutputApiKey.Id", defaultOutputApiKey.Id). - Msg("Created api key") - - // Update the local metadata agent id - localMeta, err := updateLocalMetaAgentId(req.Meta.Local, agentId) + // Generate the Fleet Agent access api key + accessApiKey, err := generateAccessApiKey(ctx, et.bulker, agentId) if err != nil { return nil, err } agentData := model.Agent{ - Active: true, - PolicyId: erec.PolicyId, - Type: req.Type, - EnrolledAt: nowStr, - LocalMetadata: localMeta, - AccessApiKeyId: accessApiKey.Id, - DefaultApiKeyId: defaultOutputApiKey.Id, - DefaultApiKey: defaultOutputApiKey.Agent(), - ActionSeqNo: dl.UndefinedSeqNo, + Active: true, + PolicyId: policyId, + Type: req.Type, + EnrolledAt: now.UTC().Format(time.RFC3339), + LocalMetadata: localMeta, + AccessApiKeyId: accessApiKey.Id, + ActionSeqNo: []int64{sqn.UndefinedSeqNo}, + Agent: &model.AgentMetadata{ + Id: agentId, + Version: ver, + }, } - err = createFleetAgent(ctx, bulker, agentId, agentData) + err = createFleetAgent(ctx, et.bulker, agentId, agentData) if err != nil { + invalidateApiKey(ctx, zlog, et.bulker, accessApiKey.Id) return nil, err } @@ -231,11 +238,96 @@ func _enroll(ctx context.Context, bulker bulk.Bulk, c cache.Cache, req EnrollReq } // We are Kool & and the Gang; cache the access key to avoid the roundtrip on impending checkin - c.SetApiKey(*accessApiKey, kCacheAccessInitTTL) + et.cache.SetApiKey(*accessApiKey, true) return &resp, nil } +// Remove the ghost artifacts from Elastic; the agent record and the accessApiKey. +func (et *EnrollerT) wipeGhosts(ctx context.Context, zlog zerolog.Logger, resp *EnrollResponse) { + zlog = zlog.With().Str(LogAgentId, resp.Item.ID).Logger() + + if err := et.bulker.Delete(ctx, dl.FleetAgents, resp.Item.ID); err != nil { + zlog.Error().Err(err).Msg("ghost agent record failed to delete") + } else { + zlog.Info().Msg("ghost agent record deleted") + } + + invalidateApiKey(ctx, zlog, et.bulker, resp.Item.AccessApiKeyId) +} + +func invalidateApiKey(ctx context.Context, zlog zerolog.Logger, bulker bulk.Bulk, apikeyId string) error { + + // hack-a-rama: We purposely do not force a "refresh:true" on the Apikey creation + // because doing so causes the api call to slow down at scale. It is already very slow. + // So we have to wait for the key to become visible until we can invalidate it. + + zlog = zlog.With().Str(LogApiKeyId, apikeyId).Logger() + + start := time.Now() + +LOOP: + for { + + _, err := bulker.ApiKeyRead(ctx, apikeyId) + + switch { + case err == nil: + break LOOP + case !errors.Is(err, apikey.ErrApiKeyNotFound): + zlog.Error().Err(err).Msg("Fail ApiKeyRead") + return err + case time.Since(start) > time.Minute: + err := errors.New("Apikey index failed to refresh") + zlog.Error().Err(err).Msg("Abort query attempt on apikey") + return err + } + + select { + case <-ctx.Done(): + zlog.Error(). + Err(ctx.Err()). + Str("apikeyId", apikeyId). + Msg("Failed to invalidate apiKey on ctx done during hack sleep") + return ctx.Err() + case <-time.After(time.Second): + } + } + + if err := bulker.ApiKeyInvalidate(ctx, apikeyId); err != nil { + zlog.Error().Err(err).Msg("fail invalidate apiKey") + return err + } + + zlog.Info().Dur("dur", time.Since(start)).Msg("invalidated apiKey") + return nil +} + +func writeResponse(zlog zerolog.Logger, w http.ResponseWriter, resp *EnrollResponse, start time.Time) error { + + data, err := json.Marshal(resp) + if err != nil { + return errors.Wrap(err, "marshal enrollResponse") + } + + numWritten, err := w.Write(data) + cntEnroll.bodyOut.Add(uint64(numWritten)) + + if err != nil { + return errors.Wrap(err, "fail send enroll response") + } + + zlog.Info(). + Str(LogAgentId, resp.Item.ID). + Str(LogPolicyId, resp.Item.PolicyId). + Str(LogAccessApiKeyId, resp.Item.AccessApiKeyId). + Int(EcsHttpResponseBodyBytes, numWritten). + Int64(EcsEventDuration, time.Since(start).Nanoseconds()). + Msg("Elastic Agent successfully enrolled") + + return nil +} + // updateMetaLocalAgentId updates the agent id in the local metadata if exists // At the time of writing the local metadata blob looks something like this // { @@ -305,13 +397,25 @@ func createFleetAgent(ctx context.Context, bulker bulk.Bulk, id string, agent mo return nil } -func generateAccessApiKey(ctx context.Context, client *elasticsearch.Client, agentId string) (*apikey.ApiKey, error) { - return apikey.Create(ctx, client, agentId, "", []byte(kFleetAccessRolesJSON)) +func generateAccessApiKey(ctx context.Context, bulk bulk.Bulk, agentId string) (*apikey.ApiKey, error) { + return bulk.ApiKeyCreate( + ctx, + agentId, + "", + []byte(kFleetAccessRolesJSON), + apikey.NewMetadata(agentId, apikey.TypeAccess), + ) } -func generateOutputApiKey(ctx context.Context, client *elasticsearch.Client, agentId string, outputName string) (*apikey.ApiKey, error) { +func generateOutputApiKey(ctx context.Context, bulk bulk.Bulk, agentId, outputName string, roles []byte) (*apikey.ApiKey, error) { name := fmt.Sprintf("%s:%s", agentId, outputName) - return apikey.Create(ctx, client, name, "", []byte(kFleetOutputRolesJSON)) + return bulk.ApiKeyCreate( + ctx, + name, + "", + roles, + apikey.NewMetadata(agentId, apikey.TypeOutput), + ) } func (et *EnrollerT) fetchEnrollmentKeyRecord(ctx context.Context, id string) (*model.EnrollmentApiKey, error) { @@ -321,33 +425,32 @@ func (et *EnrollerT) fetchEnrollmentKeyRecord(ctx context.Context, id string) (* } // Pull API key record from .fleet-enrollment-api-keys - rec, err := dl.FindEnrollmentAPIKey(ctx, et.bulker, dl.QueryEnrollmentAPIKeyByID, id) + rec, err := dl.FindEnrollmentAPIKey(ctx, et.bulker, dl.QueryEnrollmentAPIKeyByID, dl.FieldApiKeyID, id) if err != nil { - return nil, err + return nil, errors.Wrap(err, "FindEnrollmentAPIKey") } if !rec.Active { - return nil, fmt.Errorf("record is inactive") + return nil, ErrInactiveEnrollmentKey } cost := int64(len(rec.ApiKey)) - et.cache.SetEnrollmentApiKey(id, rec, cost, kCacheEnrollmentTTL) + et.cache.SetEnrollmentApiKey(id, rec, cost) return &rec, nil } func decodeEnrollRequest(data io.Reader) (*EnrollRequest, error) { - // TODO: defend overflow, slow roll var req EnrollRequest decoder := json.NewDecoder(data) if err := decoder.Decode(&req); err != nil { - return nil, err + return nil, errors.Wrap(err, "decode enroll request") } // Validate switch req.Type { - case "EPHEMERAL", "PERMANENT", "TEMPORARY": + case EnrollEphemeral, EnrollPermanent, EnrollTemporary: default: return nil, ErrUnknownEnrollType } diff --git a/cmd/fleet/handleStatus.go b/cmd/fleet/handleStatus.go new file mode 100644 index 000000000..ba809da6f --- /dev/null +++ b/cmd/fleet/handleStatus.go @@ -0,0 +1,54 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fleet + +import ( + "context" + "encoding/json" + "net/http" + + "github.com/elastic/fleet-server/v7/internal/pkg/logger" + + "github.com/elastic/elastic-agent-client/v7/pkg/proto" + "github.com/julienschmidt/httprouter" + "github.com/rs/zerolog/log" +) + +func (rt Router) handleStatus(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { + + dfunc := cntStatus.IncStart() + defer dfunc() + + status := rt.sm.Status() + resp := StatusResponse{ + Name: kServiceName, + Status: status.String(), + } + + reqId := r.Header.Get(logger.HeaderRequestID) + + data, err := json.Marshal(&resp) + if err != nil { + code := http.StatusInternalServerError + log.Error().Err(err).Str(EcsHttpRequestId, reqId).Int(EcsHttpResponseCode, code).Msg("fail status") + http.Error(w, "", code) + return + } + + code := http.StatusServiceUnavailable + if status == proto.StateObserved_DEGRADED || status == proto.StateObserved_HEALTHY { + code = http.StatusOK + } + w.WriteHeader(code) + + var nWritten int + if nWritten, err = w.Write(data); err != nil { + if err != context.Canceled { + log.Error().Err(err).Str(EcsHttpRequestId, reqId).Int(EcsHttpResponseCode, code).Msg("fail status") + } + } + + cntStatus.bodyOut.Add(uint64(nWritten)) +} diff --git a/cmd/fleet/main.go b/cmd/fleet/main.go index 2c0e1abd2..3a695e191 100644 --- a/cmd/fleet/main.go +++ b/cmd/fleet/main.go @@ -6,102 +6,415 @@ package fleet import ( "context" + "fmt" + "io" + "os" + "reflect" + "runtime/debug" + "sync" "time" + "github.com/elastic/go-ucfg" + "github.com/elastic/go-ucfg/yaml" + "github.com/elastic/fleet-server/v7/internal/pkg/action" + "github.com/elastic/fleet-server/v7/internal/pkg/build" "github.com/elastic/fleet-server/v7/internal/pkg/bulk" "github.com/elastic/fleet-server/v7/internal/pkg/cache" + "github.com/elastic/fleet-server/v7/internal/pkg/checkin" "github.com/elastic/fleet-server/v7/internal/pkg/config" "github.com/elastic/fleet-server/v7/internal/pkg/coordinator" "github.com/elastic/fleet-server/v7/internal/pkg/dl" - "github.com/elastic/fleet-server/v7/internal/pkg/env" - "github.com/elastic/fleet-server/v7/internal/pkg/esboot" + "github.com/elastic/fleet-server/v7/internal/pkg/es" "github.com/elastic/fleet-server/v7/internal/pkg/logger" - "github.com/elastic/fleet-server/v7/internal/pkg/migrate" "github.com/elastic/fleet-server/v7/internal/pkg/monitor" "github.com/elastic/fleet-server/v7/internal/pkg/policy" "github.com/elastic/fleet-server/v7/internal/pkg/profile" - "github.com/elastic/fleet-server/v7/internal/pkg/saved" + "github.com/elastic/fleet-server/v7/internal/pkg/reload" "github.com/elastic/fleet-server/v7/internal/pkg/signal" - + "github.com/elastic/fleet-server/v7/internal/pkg/sleep" + "github.com/elastic/fleet-server/v7/internal/pkg/status" + "github.com/elastic/fleet-server/v7/internal/pkg/ver" + + "github.com/elastic/elastic-agent-client/v7/pkg/client" + "github.com/elastic/elastic-agent-client/v7/pkg/proto" + "github.com/hashicorp/go-version" + "github.com/pkg/errors" "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/spf13/cobra" "golang.org/x/sync/errgroup" ) -const kPolicyThrottle = time.Millisecond * 5 +const ( + kServiceName = "fleet-server" + kAgentMode = "agent-mode" + kAgentModeRestartLoopDelay = 2 * time.Second -func checkErr(err error) { - if err != nil && err != context.Canceled { - panic(err) - } -} - -func savedObjectKey() string { - key := env.GetStr( - "ES_SAVED_KEY", - "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", - ) - log.Debug().Str("key", key).Msg("saved objects") - return key -} + kUAFleetServer = "Fleet-Server" +) func installSignalHandler() context.Context { rootCtx := context.Background() return signal.HandleInterrupt(rootCtx) } -func getRunCommand(version string) func(cmd *cobra.Command, args []string) error { +func makeCache(cfg *config.Config) (cache.Cache, error) { + cacheCfg := makeCacheConfig(cfg) + log.Info().Interface("cfg", cacheCfg).Msg("Setting cache config options") + return cache.New(cacheCfg) +} + +func makeCacheConfig(cfg *config.Config) cache.Config { + ccfg := cfg.Inputs[0].Cache + + return cache.Config{ + NumCounters: ccfg.NumCounters, + MaxCost: ccfg.MaxCost, + ActionTTL: ccfg.ActionTTL, + EnrollKeyTTL: ccfg.EnrollKeyTTL, + ArtifactTTL: ccfg.ArtifactTTL, + ApiKeyTTL: ccfg.ApiKeyTTL, + ApiKeyJitter: ccfg.ApiKeyJitter, + } +} + +func initLogger(cfg *config.Config, version, commit string) (*logger.Logger, error) { + l, err := logger.Init(cfg, kServiceName) + if err != nil { + return nil, err + } + + log.Info(). + Str("version", version). + Str("commit", commit). + Int("pid", os.Getpid()). + Int("ppid", os.Getppid()). + Str("exe", os.Args[0]). + Strs("args", os.Args[1:]). + Msg("Boot fleet-server") + log.Debug().Strs("env", os.Environ()).Msg("environment") + + return l, err +} + +func getRunCommand(bi build.Info) func(cmd *cobra.Command, args []string) error { return func(cmd *cobra.Command, args []string) error { + cfgObject := cmd.Flags().Lookup("E").Value.(*config.Flag) + cliCfg := cfgObject.Config() - cfgPath, err := cmd.Flags().GetString("config") - if err != nil { - return err - } - cfg, err := config.LoadFile(cfgPath) + agentMode, err := cmd.Flags().GetBool(kAgentMode) if err != nil { return err } - logger.Init(cfg) + var l *logger.Logger + var runErr error + if agentMode { + cfg, err := config.FromConfig(cliCfg) + if err != nil { + return err + } + l, err = initLogger(cfg, bi.Version, bi.Commit) + if err != nil { + return err + } - ctx := installSignalHandler() - c, err := cache.New() - checkErr(err) + agent, err := NewAgentMode(cliCfg, os.Stdin, bi, l) + if err != nil { + return err + } - srv, err := NewFleetServer(cfg, c, version) - checkErr(err) + runErr = agent.Run(installSignalHandler()) + } else { + cfgPath, err := cmd.Flags().GetString("config") + if err != nil { + return err + } + cfgData, err := yaml.NewConfigWithFile(cfgPath, config.DefaultOptions...) + if err != nil { + return err + } + err = cfgData.Merge(cliCfg, config.DefaultOptions...) + if err != nil { + return err + } + cfg, err := config.FromConfig(cfgData) + if err != nil { + return err + } - return srv.Run(ctx) + l, err = initLogger(cfg, bi.Version, bi.Commit) + if err != nil { + return err + } + + srv, err := NewFleetServer(cfg, bi, status.NewLog()) + if err != nil { + return err + } + + runErr = srv.Run(installSignalHandler()) + } + + if runErr != nil && runErr != context.Canceled { + log.Error().Err(runErr).Msg("Exiting") + l.Sync() + return runErr + } + l.Sync() + return nil } } -func NewCommand(version string) *cobra.Command { +func NewCommand(bi build.Info) *cobra.Command { cmd := &cobra.Command{ - Use: "fleet-server", + Use: kServiceName, Short: "Fleet Server controls a fleet of Elastic Agents", - RunE: getRunCommand(version), + RunE: getRunCommand(bi), } cmd.Flags().StringP("config", "c", "fleet-server.yml", "Configuration for Fleet Server") + cmd.Flags().Bool(kAgentMode, false, "Running under execution of the Elastic Agent") + cmd.Flags().VarP(config.NewFlag(), "E", "E", "Overwrite configuration value") return cmd } -type FleetServer struct { - version string +type firstCfg struct { + cfg *config.Config + err error +} + +type AgentMode struct { + cliCfg *ucfg.Config + bi build.Info + reloadables []reload.Reloadable + + agent client.Client + + mux sync.Mutex + firstCfg chan firstCfg + srv *FleetServer + srvCtx context.Context + srvCanceller context.CancelFunc + startChan chan struct{} +} + +func NewAgentMode(cliCfg *ucfg.Config, reader io.Reader, bi build.Info, reloadables ...reload.Reloadable) (*AgentMode, error) { + var err error + + a := &AgentMode{ + cliCfg: cliCfg, + bi: bi, + reloadables: reloadables, + } + a.agent, err = client.NewFromReader(reader, a) + if err != nil { + return nil, err + } + return a, nil +} + +func (a *AgentMode) Run(ctx context.Context) error { + ctx, canceller := context.WithCancel(ctx) + defer canceller() + + a.firstCfg = make(chan firstCfg) + a.startChan = make(chan struct{}, 1) + log.Info().Msg("starting communication connection back to Elastic Agent") + err := a.agent.Start(ctx) + if err != nil { + return err + } + + // wait for the initial configuration to be sent from the + // Elastic Agent before starting the actual Fleet Server. + log.Info().Msg("waiting for Elastic Agent to send initial configuration") + var cfg firstCfg + select { + case <-ctx.Done(): + return fmt.Errorf("never received initial configuration") + case cfg = <-a.firstCfg: + } - cfg *config.Config - cfgCh chan *config.Config - cache cache.Cache + // possible that first configuration resulted in an error + if cfg.err != nil { + // unblock startChan even though there was an error + a.startChan <- struct{}{} + return cfg.err + } + + // start fleet server with the initial configuration and its + // own context (needed so when OnStop occurs the fleet server + // is stopped and not the elastic-agent-client as well) + srvCtx, srvCancel := context.WithCancel(ctx) + defer srvCancel() + log.Info().Msg("received initial configuration starting Fleet Server") + srv, err := NewFleetServer(cfg.cfg, a.bi, status.NewChained(status.NewLog(), a.agent)) + if err != nil { + // unblock startChan even though there was an error + a.startChan <- struct{}{} + return err + } + a.mux.Lock() + close(a.firstCfg) + a.firstCfg = nil + a.srv = srv + a.srvCtx = srvCtx + a.srvCanceller = srvCancel + a.mux.Unlock() + + // trigger startChan so OnConfig can continue + a.startChan <- struct{}{} + + // keep trying to restart the FleetServer on failure, reporting + // the status back to Elastic Agent + res := make(chan error) + go func() { + for { + err := a.srv.Run(srvCtx) + if err == nil || err == context.Canceled { + res <- err + return + } + // sleep some before calling Run again + sleep.WithContext(srvCtx, kAgentModeRestartLoopDelay) + } + }() + return <-res +} + +func (a *AgentMode) OnConfig(s string) { + a.mux.Lock() + cliCfg := ucfg.MustNewFrom(a.cliCfg, config.DefaultOptions...) + srv := a.srv + ctx := a.srvCtx + canceller := a.srvCanceller + cfgChan := a.firstCfg + startChan := a.startChan + a.mux.Unlock() + + var cfg *config.Config + var err error + defer func() { + if err != nil { + if cfgChan != nil { + // failure on first config + cfgChan <- firstCfg{ + cfg: nil, + err: err, + } + // block until startChan signalled + <-startChan + return + } + + log.Err(err).Msg("failed to reload configuration") + if canceller != nil { + canceller() + } + } + }() + + // load configuration and then merge it on top of the CLI configuration + var cfgData *ucfg.Config + cfgData, err = yaml.NewConfig([]byte(s), config.DefaultOptions...) + if err != nil { + return + } + err = cliCfg.Merge(cfgData, config.DefaultOptions...) + if err != nil { + return + } + cfg, err = config.FromConfig(cliCfg) + if err != nil { + return + } + + if cfgChan != nil { + // reload the generic reloadables + for _, r := range a.reloadables { + err = r.Reload(ctx, cfg) + if err != nil { + return + } + } + + // send starting configuration so Fleet Server can start + cfgChan <- firstCfg{ + cfg: cfg, + err: nil, + } + + // block handling more OnConfig calls until the Fleet Server + // has been fully started + <-startChan + } else if srv != nil { + // reload the generic reloadables + for _, r := range a.reloadables { + err = r.Reload(ctx, cfg) + if err != nil { + return + } + } + + // reload the server + err = srv.Reload(ctx, cfg) + if err != nil { + return + } + } else { + err = fmt.Errorf("internal service should have been started") + return + } +} + +func (a *AgentMode) OnStop() { + a.mux.Lock() + canceller := a.srvCanceller + a.mux.Unlock() + + if canceller != nil { + canceller() + } +} + +func (a *AgentMode) OnError(err error) { + // Log communication error through the logger. These errors are only + // provided for logging purposes. The elastic-agent-client handles + // retries and reconnects internally automatically. + log.Err(err) +} + +type FleetServer struct { + bi build.Info + verCon version.Constraints + policyId string + + cfg *config.Config + cfgCh chan *config.Config + cache cache.Cache + reporter status.Reporter } // NewFleetServer creates the actual fleet server service. -func NewFleetServer(cfg *config.Config, c cache.Cache, version string) (*FleetServer, error) { +func NewFleetServer(cfg *config.Config, bi build.Info, reporter status.Reporter) (*FleetServer, error) { + verCon, err := buildVersionConstraint(bi.Version) + if err != nil { + return nil, err + } + + cache, err := makeCache(cfg) + if err != nil { + return nil, err + } + return &FleetServer{ - version: version, - cfg: cfg, - cfgCh: make(chan *config.Config, 1), - cache: c, + bi: bi, + verCon: verCon, + cfg: cfg, + cfgCh: make(chan *config.Config, 1), + cache: cache, + reporter: reporter, }, nil } @@ -146,101 +459,356 @@ func (f *FleetServer) Run(ctx context.Context) error { proEg, srvEg *errgroup.Group ) + started := false + +LOOP: for { ech := make(chan error, 2) - // Restart profiler - if curCfg == nil || curCfg.Inputs[0].Server.Profile.Bind != newCfg.Inputs[0].Server.Profile.Bind { - stop(proCancel, proEg) - proEg, proCancel = start(ctx, func(ctx context.Context) error { - return profile.RunProfiler(ctx, newCfg.Inputs[0].Server.Profile.Bind) - }, ech) + if started { + f.reporter.Status(proto.StateObserved_CONFIGURING, "Re-configuring", nil) + } else { + started = true + f.reporter.Status(proto.StateObserved_STARTING, "Starting", nil) + } + + // Create or recreate cache + if configCacheChanged(curCfg, newCfg) { + log.Info().Msg("reconfigure cache on configuration change") + cacheCfg := makeCacheConfig(newCfg) + err := f.cache.Reconfigure(cacheCfg) + log.Info().Err(err).Interface("cfg", cacheCfg).Msg("reconfigure cache complete") + if err != nil { + return err + } + } + + // Start or restart profiler + if configChangedProfiler(curCfg, newCfg) { + if proCancel != nil { + log.Info().Msg("stopping profiler on configuration change") + stop(proCancel, proEg) + } + proEg, proCancel = nil, nil + if newCfg.Inputs[0].Server.Profiler.Enabled { + log.Info().Msg("starting profiler on configuration change") + proEg, proCancel = start(ctx, func(ctx context.Context) error { + return profile.RunProfiler(ctx, newCfg.Inputs[0].Server.Profiler.Bind) + }, ech) + } } - // Restart server - if curCfg == nil || curCfg.Inputs[0].Server != newCfg.Inputs[0].Server { - stop(srvCancel, srvEg) + // Start or restart server + if configChangedServer(curCfg, newCfg) { + if srvCancel != nil { + log.Info().Msg("stopping server on configuration change") + stop(srvCancel, srvEg) + } + log.Info().Msg("starting server on configuration change") srvEg, srvCancel = start(ctx, func(ctx context.Context) error { return f.runServer(ctx, newCfg) }, ech) } curCfg = newCfg + f.cfg = curCfg select { case newCfg = <-f.cfgCh: - log.Debug().Msg("Server configuration update") + log.Info().Msg("Server configuration update") case err := <-ech: + f.reporter.Status(proto.StateObserved_FAILED, fmt.Sprintf("Error - %s", err), nil) log.Error().Err(err).Msg("Fleet Server failed") - return nil + return err case <-ctx.Done(): - log.Info().Msg("Fleet Server exited") - return nil + f.reporter.Status(proto.StateObserved_STOPPING, "Stopping", nil) + break LOOP } } + + // Server is coming down; wait for the server group to exit cleanly. + // Timeout if something is locked up. + err := safeWait(srvEg, time.Second) + + // Eat cancel error to minimize confusion in logs + if errors.Is(err, context.Canceled) { + err = nil + } + + log.Info().Err(err).Msg("Fleet Server exited") + return err +} + +func configChangedProfiler(curCfg, newCfg *config.Config) bool { + + changed := true + + switch { + case curCfg == nil: + case curCfg.Inputs[0].Server.Profiler.Enabled != newCfg.Inputs[0].Server.Profiler.Enabled: + case curCfg.Inputs[0].Server.Profiler.Bind != newCfg.Inputs[0].Server.Profiler.Bind: + default: + changed = false + } + + return changed +} + +func redactOutputCfg(cfg *config.Config) config.Output { + const kRedacted = "[redacted]" + redacted := cfg.Output + + if redacted.Elasticsearch.Password != "" { + redacted.Elasticsearch.Password = kRedacted + } + + if redacted.Elasticsearch.APIKey != "" { + redacted.Elasticsearch.APIKey = kRedacted + } + + if redacted.Elasticsearch.TLS != nil { + newTLS := *redacted.Elasticsearch.TLS + + if newTLS.Certificate.Key != "" { + newTLS.Certificate.Key = kRedacted + } + if newTLS.Certificate.Passphrase != "" { + newTLS.Certificate.Passphrase = kRedacted + } + + redacted.Elasticsearch.TLS = &newTLS + } + + return redacted +} + +func redactServerCfg(cfg *config.Config) config.Server { + const kRedacted = "[redacted]" + redacted := cfg.Inputs[0].Server + + if redacted.TLS != nil { + newTLS := *redacted.TLS + + if newTLS.Certificate.Key != "" { + newTLS.Certificate.Key = kRedacted + } + if newTLS.Certificate.Passphrase != "" { + newTLS.Certificate.Passphrase = kRedacted + } + + redacted.TLS = &newTLS + } + + return redacted +} + +func configChangedServer(curCfg, newCfg *config.Config) bool { + + zlog := log.With().Interface("new", redactServerCfg(newCfg)).Logger() + + changed := true + switch { + case curCfg == nil: + zlog.Info().Msg("initial server configuration") + case !reflect.DeepEqual(curCfg.Fleet, newCfg.Fleet): + zlog.Info(). + Interface("old", curCfg). + Msg("fleet configuration has changed") + case !reflect.DeepEqual(curCfg.Output, newCfg.Output): + zlog.Info(). + Interface("old", redactOutputCfg(curCfg)). + Msg("output configuration has changed") + case !reflect.DeepEqual(curCfg.Inputs[0].Server, newCfg.Inputs[0].Server): + zlog.Info(). + Interface("old", redactServerCfg(curCfg)). + Msg("server configuration has changed") + default: + changed = false + } + + return changed +} + +func configCacheChanged(curCfg, newCfg *config.Config) bool { + if curCfg == nil { + return false + } + return curCfg.Inputs[0].Cache != newCfg.Inputs[0].Cache +} + +func safeWait(g *errgroup.Group, to time.Duration) (err error) { + waitCh := make(chan error) + go func() { + waitCh <- g.Wait() + }() + + select { + case err = <-waitCh: + case <-time.After(to): + log.Warn().Msg("deadlock: goroutine locked up on errgroup.Wait()") + err = errors.New("Group wait timeout") + } + + return } func loggedRunFunc(ctx context.Context, tag string, runfn runFunc) func() error { return func() error { + log.Debug().Msg(tag + " started") + err := runfn(ctx) - var ev *zerolog.Event - if err != nil { - log.Error().Err(err) + + lvl := zerolog.DebugLevel + switch { + case err == nil: + case errors.Is(err, context.Canceled): + err = nil + default: + lvl = zerolog.ErrorLevel } - ev = log.Debug() - ev.Msg(tag + " exited") + + log.WithLevel(lvl).Err(err).Msg(tag + " exited") return err } } +func initRuntime(cfg *config.Config) { + gcPercent := cfg.Inputs[0].Server.Runtime.GCPercent + if gcPercent != 0 { + old := debug.SetGCPercent(gcPercent) + + log.Info(). + Int("old", old). + Int("new", gcPercent). + Msg("SetGCPercent") + } +} + +func (f *FleetServer) initBulker(ctx context.Context, cfg *config.Config) (*bulk.Bulker, error) { + es, err := es.NewClient(ctx, cfg, false, es.WithUserAgent(kUAFleetServer, f.bi)) + if err != nil { + return nil, err + } + + blk := bulk.NewBulker(es, bulk.BulkOptsFromCfg(cfg)...) + return blk, nil +} + func (f *FleetServer) runServer(ctx context.Context, cfg *config.Config) (err error) { - // Bulker is started in its own context and managed inside of this function. This is done so - // when the `ctx` is cancelled every worker using the bulker can get everything written on - // shutdown before the bulker is then cancelled. + initRuntime(cfg) + + // The metricsServer is only enabled if http.enabled is set in the config + metricsServer, err := f.initMetrics(ctx, cfg) + switch { + case err != nil: + return err + case metricsServer != nil: + defer metricsServer.Stop() + } + + // Bulker is started in its own context and managed in the scope of this function. This is done so + // when the `ctx` is cancelled, the bulker will remain executing until this function exits. + // This allows the child subsystems to continue to write to the data store while tearing down. bulkCtx, bulkCancel := context.WithCancel(context.Background()) defer bulkCancel() - es, bulker, err := bulk.InitES(bulkCtx, cfg) + + // Create the bulker subsystem + bulker, err := f.initBulker(bulkCtx, cfg) if err != nil { return err } - sv := saved.NewMgr(bulker, savedObjectKey()) - // Initial indices bootstrapping, needed for agents actions development - // TODO: remove this after the indices bootstrapping logic implemented in ES plugin - err = esboot.EnsureESIndices(ctx, es) - if err != nil { + // Execute the bulker engine in a goroutine with its orphaned context. + // Create an error channel for the case where the bulker exits + // unexpectedly (ie. not cancelled by the bulkCancel context). + errCh := make(chan error) + + go func() { + runFunc := loggedRunFunc(bulkCtx, "Bulker", bulker.Run) + + // Emit the error from bulker.Run to the local error channel. + // The error group will be listening for it. (see comments below) + errCh <- runFunc() + }() + + // Wrap context with an error group context to manage the lifecycle + // of the subsystems. An error from any subsystem, or if the + // parent context is cancelled, will cancel the group. + // see https://pkg.go.dev/golang.org/x/sync/errgroup#Group.Go + g, ctx := errgroup.WithContext(ctx) + + // Stub a function for inclusion in the errgroup that exits when + // the bulker exits. If the bulker exits before the error group, + // this will tear down the error group and g.Wait() will return. + // Otherwise it will be a noop. + g.Go(func() (err error) { + select { + case err = <-errCh: + case <-ctx.Done(): + err = ctx.Err() + } + return + }) + + if err = f.runSubsystems(ctx, cfg, g, bulker); err != nil { return err } - err = migrate.Migrate(ctx, sv, bulker) + + return g.Wait() +} + +func (f *FleetServer) runSubsystems(ctx context.Context, cfg *config.Config, g *errgroup.Group, bulker bulk.Bulk) (err error) { + esCli := bulker.Client() + + // Check version compatibility with Elasticsearch + err = ver.CheckCompatibility(ctx, esCli, f.bi.Version) if err != nil { - return err + return fmt.Errorf("failed version compatibility check with elasticsearch: %w", err) } - // Replacing to errgroup context - g, ctx := errgroup.WithContext(ctx) + // Run migrations; current safe to do in background. That may change in the future. + g.Go(loggedRunFunc(ctx, "Migrations", func(ctx context.Context) error { + return dl.Migrate(ctx, bulker) + })) + + // Monitoring es client, longer timeout, no retries + monCli, err := es.NewClient(ctx, cfg, true, es.WithUserAgent(kUAFleetServer, f.bi)) + if err != nil { + return err + } // Coordinator policy monitor - pim, err := monitor.New(dl.FleetPolicies, es) + pim, err := monitor.New(dl.FleetPolicies, esCli, monCli, + monitor.WithFetchSize(cfg.Inputs[0].Monitor.FetchSize), + monitor.WithPollTimeout(cfg.Inputs[0].Monitor.PollTimeout), + ) if err != nil { return err } g.Go(loggedRunFunc(ctx, "Policy index monitor", pim.Run)) - cord := coordinator.NewMonitor(cfg.Fleet, f.version, bulker, pim, coordinator.NewCoordinatorZero) + cord := coordinator.NewMonitor(cfg.Fleet, f.bi.Version, bulker, pim, coordinator.NewCoordinatorZero) g.Go(loggedRunFunc(ctx, "Coordinator policy monitor", cord.Run)) // Policy monitor - pm := policy.NewMonitor(bulker, pim, kPolicyThrottle) + pm := policy.NewMonitor(bulker, pim, cfg.Inputs[0].Server.Limits.PolicyThrottle) g.Go(loggedRunFunc(ctx, "Policy monitor", pm.Run)) + // Policy self monitor + sm := policy.NewSelfMonitor(cfg.Fleet, bulker, pim, cfg.Inputs[0].Policy.ID, f.reporter) + g.Go(loggedRunFunc(ctx, "Policy self monitor", sm.Run)) + // Actions monitoring var am monitor.SimpleMonitor var ad *action.Dispatcher var tr *action.TokenResolver - // Behind the feature flag - am, err = monitor.NewSimple(dl.FleetActions, es, monitor.WithExpiration(true)) + am, err = monitor.NewSimple(dl.FleetActions, esCli, monCli, + monitor.WithExpiration(true), + monitor.WithFetchSize(cfg.Inputs[0].Monitor.FetchSize), + monitor.WithPollTimeout(cfg.Inputs[0].Monitor.PollTimeout), + ) if err != nil { return err } @@ -253,23 +821,25 @@ func (f *FleetServer) runServer(ctx context.Context, cfg *config.Config) (err er return err } - bc := NewBulkCheckin(bulker) - g.Go(loggedRunFunc(ctx, "Bulk checkin", func(ctx context.Context) error { - return bc.Run(ctx, sv) - })) + bc := checkin.NewBulk(bulker) + g.Go(loggedRunFunc(ctx, "Bulk checkin", bc.Run)) - ct := NewCheckinT(f.cfg, f.cache, bc, pm, am, ad, tr, bulker) - et, err := NewEnrollerT(&f.cfg.Inputs[0].Server, bulker, f.cache) + ct := NewCheckinT(f.verCon, &cfg.Inputs[0].Server, f.cache, bc, pm, am, ad, tr, bulker) + et, err := NewEnrollerT(f.verCon, &cfg.Inputs[0].Server, bulker, f.cache) if err != nil { return err } - router := NewRouter(bulker, ct, et) + + at := NewArtifactT(&cfg.Inputs[0].Server, bulker, f.cache) + ack := NewAckT(&cfg.Inputs[0].Server, bulker, f.cache) + + router := NewRouter(bulker, ct, et, at, ack, sm) g.Go(loggedRunFunc(ctx, "Http server", func(ctx context.Context) error { - return runServer(ctx, router, &f.cfg.Inputs[0].Server) + return runServer(ctx, router, &cfg.Inputs[0].Server) })) - return g.Wait() + return err } // Reload reloads the fleet server with the latest configuration. diff --git a/cmd/fleet/main_integration_test.go b/cmd/fleet/main_integration_test.go new file mode 100644 index 000000000..964d36016 --- /dev/null +++ b/cmd/fleet/main_integration_test.go @@ -0,0 +1,233 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build integration +// +build integration + +package fleet + +import ( + "context" + "fmt" + "io" + "sync" + "testing" + "time" + + "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/logger" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/server" + "github.com/elastic/elastic-agent-client/v7/pkg/proto" + "github.com/elastic/go-ucfg" + "github.com/gofrs/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/fleet-server/v7/internal/pkg/build" + "github.com/elastic/fleet-server/v7/internal/pkg/dl" + "github.com/elastic/fleet-server/v7/internal/pkg/model" + ftesting "github.com/elastic/fleet-server/v7/internal/pkg/testing" + "github.com/elastic/fleet-server/v7/internal/pkg/testing/suite" +) + +var biInfo = build.Info{ + Version: "1.0.0", + Commit: "integration", +} + +var policyData = []byte(` +{ + "inputs": [ + { + "type": "fleet-server" + } + ] +} +`) + +var initialCfgData = ` +output: + elasticsearch: + hosts: '${ELASTICSEARCH_HOSTS:localhost:9200}' + username: '${ELASTICSEARCH_USERNAME:elastic}' + password: '${ELASTICSEARCH_PASSWORD:changeme}' +` + +var agentIdCfgData = ` +output: + elasticsearch: + hosts: '${ELASTICSEARCH_HOSTS:localhost:9200}' + username: '${ELASTICSEARCH_USERNAME:elastic}' + password: '${ELASTICSEARCH_PASSWORD:changeme}' +fleet: + agent: + id: 1e4954ce-af37-4731-9f4a-407b08e69e42 +` + +var badCfgData = ` +output: + elasticsearch: + hosts: 'localhost:63542' + username: '${ELASTICSEARCH_USERNAME:elastic}' + password: '${ELASTICSEARCH_PASSWORD:changeme}' +fleet: + agent: + id: 1e4954ce-af37-4731-9f4a-407b08e69e42 +` + +type agentSuite struct { + suite.RunningSuite +} + +func (s *agentSuite) TestAgentMode(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + bulker := ftesting.SetupBulk(ctx, t) + + // add a real default fleet server policy + policyId := uuid.Must(uuid.NewV4()).String() + _, err := dl.CreatePolicy(ctx, bulker, model.Policy{ + PolicyId: policyId, + RevisionIdx: 1, + DefaultFleetServer: true, + Data: policyData, + }) + require.NoError(t, err) + + // add entry for enrollment key (doesn't have to be a real key) + _, err = dl.CreateEnrollmentAPIKey(ctx, bulker, model.EnrollmentApiKey{ + Name: "Default", + ApiKey: "keyvalue", + ApiKeyId: "keyid", + PolicyId: policyId, + Active: true, + }) + require.NoError(t, err) + + app := &StubApp{} + control := createAndStartControlServer(t, app) + defer control.Stop() + appState, err := control.Register(app, initialCfgData) + require.NoError(t, err) + + r, w := io.Pipe() + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + agent, err := NewAgentMode(ucfg.New(), r, biInfo) + require.NoError(t, err) + err = agent.Run(ctx) + assert.NoError(t, err) + }() + + err = appState.WriteConnInfo(w) + require.NoError(t, err) + + // wait for fleet-server to report as degraded (starting mode without agent.id) + ftesting.Retry(t, ctx, func(ctx context.Context) error { + status := app.Status() + if status != proto.StateObserved_DEGRADED { + return fmt.Errorf("should be reported as degraded; instead its %s", status) + } + return nil + }, ftesting.RetrySleep(100*time.Millisecond), ftesting.RetryCount(120)) + + // reconfigure with agent ID set + err = appState.UpdateConfig(agentIdCfgData) + require.NoError(t, err) + + // wait for fleet-server to report as healthy + ftesting.Retry(t, ctx, func(ctx context.Context) error { + status := app.Status() + if status != proto.StateObserved_HEALTHY { + return fmt.Errorf("should be reported as healthy; instead its %s", status) + } + return nil + }, ftesting.RetrySleep(100*time.Millisecond), ftesting.RetryCount(120)) + + // trigger update with bad configuration + err = appState.UpdateConfig(badCfgData) + require.NoError(t, err) + + // wait for fleet-server to report as failed + ftesting.Retry(t, ctx, func(ctx context.Context) error { + status := app.Status() + if status != proto.StateObserved_FAILED { + return fmt.Errorf("should be reported as failed; instead its %s", status) + } + return nil + }, ftesting.RetrySleep(100*time.Millisecond), ftesting.RetryCount(120)) + + // reconfigure to good config + err = appState.UpdateConfig(agentIdCfgData) + require.NoError(t, err) + + // wait for fleet-server to report as healthy + ftesting.Retry(t, ctx, func(ctx context.Context) error { + status := app.Status() + if status != proto.StateObserved_HEALTHY { + return fmt.Errorf("should be reported as healthy; instead its %s", status) + } + return nil + }, ftesting.RetrySleep(100*time.Millisecond), ftesting.RetryCount(120)) + + // trigger stop + err = appState.Stop(10 * time.Second) + assert.NoError(t, err) + + // wait for go routine to exit + wg.Wait() +} + +func newDebugLogger(t *testing.T) *logger.Logger { + t.Helper() + + loggerCfg := logger.DefaultLoggingConfig() + loggerCfg.Level = logp.DebugLevel + + log, err := logger.NewFromConfig("", loggerCfg) + require.NoError(t, err) + return log +} + +func createAndStartControlServer(t *testing.T, handler server.Handler, extraConfigs ...func(*server.Server)) *server.Server { + t.Helper() + srv, err := server.New(newDebugLogger(t), "localhost:0", handler) + require.NoError(t, err) + for _, extra := range extraConfigs { + extra(srv) + } + require.NoError(t, srv.Start()) + return srv +} + +type StubApp struct { + lock sync.RWMutex + status proto.StateObserved_Status + message string + payload map[string]interface{} +} + +func (a *StubApp) Status() proto.StateObserved_Status { + a.lock.RLock() + defer a.lock.RUnlock() + return a.status +} + +func (a *StubApp) Message() string { + a.lock.RLock() + defer a.lock.RUnlock() + return a.message +} + +func (a *StubApp) OnStatusChange(_ *server.ApplicationState, status proto.StateObserved_Status, message string, payload map[string]interface{}) { + a.lock.Lock() + defer a.lock.Unlock() + a.status = status + a.message = message + a.payload = payload +} diff --git a/cmd/fleet/metrics.go b/cmd/fleet/metrics.go new file mode 100644 index 000000000..9b7cff0d7 --- /dev/null +++ b/cmd/fleet/metrics.go @@ -0,0 +1,142 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fleet + +import ( + "context" + + "github.com/elastic/beats/v7/libbeat/api" + "github.com/elastic/beats/v7/libbeat/cmd/instance/metrics" + "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/monitoring" + "github.com/elastic/fleet-server/v7/internal/pkg/config" + "github.com/elastic/fleet-server/v7/internal/pkg/dl" + "github.com/elastic/fleet-server/v7/internal/pkg/limit" + "github.com/elastic/fleet-server/v7/internal/pkg/logger" + + "github.com/pkg/errors" +) + +var ( + registry *monitoring.Registry + + cntHttpNew *monitoring.Uint + cntHttpClose *monitoring.Uint + + cntCheckin routeStats + cntEnroll routeStats + cntAcks routeStats + cntStatus routeStats + cntArtifacts artifactStats +) + +func (f *FleetServer) initMetrics(ctx context.Context, cfg *config.Config) (*api.Server, error) { + registry := monitoring.GetNamespace("info").GetRegistry() + if registry.Get("version") == nil { + monitoring.NewString(registry, "version").Set(f.bi.Version) + } + if registry.Get("name") == nil { + monitoring.NewString(registry, "name").Set(kServiceName) + } + + if !cfg.HTTP.Enabled { + return nil, nil + } + + // Start local api server; largely for metics. + zapStub := logger.NewZapStub("fleet-metrics") + cfgStub, err := common.NewConfigFrom(&cfg.HTTP) + if err != nil { + return nil, err + } + s, err := api.NewWithDefaultRoutes(zapStub, cfgStub, monitoring.GetNamespace) + if err != nil { + err = errors.Wrap(err, "could not start the HTTP server for the API") + } else { + s.Start() + } + + return s, err +} + +type routeStats struct { + active *monitoring.Uint + total *monitoring.Uint + rateLimit *monitoring.Uint + maxLimit *monitoring.Uint + failure *monitoring.Uint + drop *monitoring.Uint + bodyIn *monitoring.Uint + bodyOut *monitoring.Uint +} + +func (rt *routeStats) Register(registry *monitoring.Registry) { + rt.active = monitoring.NewUint(registry, "active") + rt.total = monitoring.NewUint(registry, "total") + rt.rateLimit = monitoring.NewUint(registry, "limit_rate") + rt.maxLimit = monitoring.NewUint(registry, "limit_max") + rt.failure = monitoring.NewUint(registry, "fail") + rt.drop = monitoring.NewUint(registry, "drop") + rt.bodyIn = monitoring.NewUint(registry, "body_in") + rt.bodyOut = monitoring.NewUint(registry, "body_out") +} + +func init() { + metrics.SetupMetrics(kServiceName) + registry = monitoring.Default.NewRegistry("http_server") + cntHttpNew = monitoring.NewUint(registry, "tcp_open") + cntHttpClose = monitoring.NewUint(registry, "tcp_close") + + routesRegistry := registry.NewRegistry("routes") + + cntCheckin.Register(routesRegistry.NewRegistry("checkin")) + cntEnroll.Register(routesRegistry.NewRegistry("enroll")) + cntArtifacts.Register(routesRegistry.NewRegistry("artifacts")) + cntAcks.Register(routesRegistry.NewRegistry("acks")) + cntStatus.Register(routesRegistry.NewRegistry("status")) +} + +func (rt *routeStats) IncError(err error) { + + switch { + case errors.Is(err, limit.ErrRateLimit): + rt.rateLimit.Inc() + case errors.Is(err, limit.ErrMaxLimit): + rt.maxLimit.Inc() + case errors.Is(err, context.Canceled): + rt.drop.Inc() + default: + rt.failure.Inc() + } +} + +func (rt *routeStats) IncStart() func() { + rt.total.Inc() + rt.active.Inc() + return rt.active.Dec +} + +type artifactStats struct { + routeStats + notFound *monitoring.Uint + throttle *monitoring.Uint +} + +func (rt *artifactStats) Register(registry *monitoring.Registry) { + rt.routeStats.Register(registry) + rt.notFound = monitoring.NewUint(registry, "not_found") + rt.throttle = monitoring.NewUint(registry, "throttle") +} + +func (rt *artifactStats) IncError(err error) { + switch { + case errors.Is(err, dl.ErrNotFound): + rt.notFound.Inc() + case errors.Is(err, ErrorThrottle): + rt.throttle.Inc() + default: + rt.routeStats.IncError(err) + } +} diff --git a/cmd/fleet/missing.txt b/cmd/fleet/missing.txt deleted file mode 100644 index 7215c197e..000000000 --- a/cmd/fleet/missing.txt +++ /dev/null @@ -1,6 +0,0 @@ -Missing stuff - -- handle upgrade and unenroll events -- audit logging on saved objects etc. -- runs as admin; doesn't drop creds. -- stats diff --git a/cmd/fleet/router.go b/cmd/fleet/router.go index d3cf5db35..8b6f6fc58 100644 --- a/cmd/fleet/router.go +++ b/cmd/fleet/router.go @@ -5,33 +5,93 @@ package fleet import ( + "net/http" + "github.com/elastic/fleet-server/v7/internal/pkg/bulk" + "github.com/elastic/fleet-server/v7/internal/pkg/logger" + "github.com/elastic/fleet-server/v7/internal/pkg/policy" "github.com/julienschmidt/httprouter" + "github.com/rs/zerolog/log" ) const ( - ROUTE_ENROLL = "/api/fleet/agents/:id" - ROUTE_CHECKIN = "/api/fleet/agents/:id/checkin" - ROUTE_ACKS = "/api/fleet/agents/:id/acks" + ROUTE_STATUS = "/api/status" + ROUTE_ENROLL = "/api/fleet/agents/:id" + ROUTE_CHECKIN = "/api/fleet/agents/:id/checkin" + ROUTE_ACKS = "/api/fleet/agents/:id/acks" + ROUTE_ARTIFACTS = "/api/fleet/artifacts/:id/:sha2" ) type Router struct { bulker bulk.Bulk + ver string ct *CheckinT et *EnrollerT + at *ArtifactT + ack *AckT + sm policy.SelfMonitor } -func NewRouter(bulker bulk.Bulk, ct *CheckinT, et *EnrollerT) *httprouter.Router { +func NewRouter(bulker bulk.Bulk, ct *CheckinT, et *EnrollerT, at *ArtifactT, ack *AckT, sm policy.SelfMonitor) *httprouter.Router { r := Router{ bulker: bulker, ct: ct, et: et, + sm: sm, + at: at, + ack: ack, + } + + routes := []struct { + method string + path string + handler httprouter.Handle + }{ + { + http.MethodGet, + ROUTE_STATUS, + r.handleStatus, + }, + { + http.MethodPost, + ROUTE_ENROLL, + r.handleEnroll, + }, + { + http.MethodPost, + ROUTE_CHECKIN, + r.handleCheckin, + }, + { + http.MethodPost, + ROUTE_ACKS, + r.handleAcks, + }, + { + http.MethodGet, + ROUTE_ARTIFACTS, + r.handleArtifacts, + }, } router := httprouter.New() - router.POST(ROUTE_ENROLL, r.handleEnroll) - router.POST(ROUTE_CHECKIN, r.handleCheckin) - router.POST(ROUTE_ACKS, r.handleAcks) + + // Install routes + for _, rte := range routes { + log.Info(). + Str("method", rte.method). + Str("path", rte.path). + Msg("fleet-server route added") + + router.Handle( + rte.method, + rte.path, + logger.HttpHandler(rte.handler), + ) + } + + log.Info().Msg("fleet-server routes set up") + return router } diff --git a/cmd/fleet/schema.go b/cmd/fleet/schema.go index 77371b6c4..c2b03bc5e 100644 --- a/cmd/fleet/schema.go +++ b/cmd/fleet/schema.go @@ -15,11 +15,7 @@ const ( const ( TypePolicyChange = "POLICY_CHANGE" TypeUnenroll = "UNENROLL" -) - -const ( - FieldLastCheckin = "last_checkin" - FieldLocalMetadata = "local_metadata" + TypeUpgrade = "UPGRADE" ) const kFleetAccessRolesJSON = ` @@ -35,29 +31,6 @@ const kFleetAccessRolesJSON = ` } ` -const kFleetOutputRolesJSON = ` -{ - "fleet-output": { - "cluster": ["monitor"], - "index": [{ - "names": [ - "logs-*", - "metrics-*", - "events-*", - ".ds-logs-*", - ".ds-metrics-*", - ".ds-events-*" - ], - "privileges": [ - "write", - "create_index", - "indices:admin/auto_create" - ] - }] - } -} -` - // Wrong: no AAD; // This defeats the signature check; // can copy from one to another and will dispatch. @@ -98,6 +71,7 @@ type EnrollResponse struct { } type CheckinRequest struct { + Status string `json:"status"` AckToken string `json:"ack_token,omitempty"` Events []Event `json:"events"` LocalMeta json.RawMessage `json:"local_metadata"` @@ -118,24 +92,34 @@ type AckResponse struct { } type ActionResp struct { - AgentId string `json:"agent_id"` - CreatedAt string `json:"created_at"` - Data json.RawMessage `json:"data"` - Id string `json:"id"` - Type string `json:"type"` - InputId string `json:"input_id"` + AgentId string `json:"agent_id"` + CreatedAt string `json:"created_at"` + Data interface{} `json:"data"` + Id string `json:"id"` + Type string `json:"type"` + InputType string `json:"input_type"` + Timeout int64 `json:"timeout,omitempty"` } type Event struct { - Type string `json:"type"` - SubType string `json:"subtype"` - AgentId string `json:"agent_id"` - ActionId string `json:"action_id"` - PolicyId string `json:"policy_id"` - StreamId string `json:"stream_id"` - Timestamp string `json:"timestamp"` - Message string `json:"message"` - Payload string `json:"payload,omitempty"` - Data json.RawMessage `json:"data,omitempty"` - Error string `json:"error,omitempty"` + Type string `json:"type"` + SubType string `json:"subtype"` + AgentId string `json:"agent_id"` + ActionId string `json:"action_id"` + PolicyId string `json:"policy_id"` + StreamId string `json:"stream_id"` + Timestamp string `json:"timestamp"` + Message string `json:"message"` + Payload json.RawMessage `json:"payload,omitempty"` + StartedAt string `json:"started_at"` + CompletedAt string `json:"completed_at"` + ActionData json.RawMessage `json:"action_data,omitempty"` + ActionResponse json.RawMessage `json:"action_response,omitempty"` + Data json.RawMessage `json:"data,omitempty"` + Error string `json:"error,omitempty"` +} + +type StatusResponse struct { + Name string `json:"name"` + Status string `json:"status"` } diff --git a/cmd/fleet/server.go b/cmd/fleet/server.go index ce32fad2f..9fba82e1d 100644 --- a/cmd/fleet/server.go +++ b/cmd/fleet/server.go @@ -6,111 +6,163 @@ package fleet import ( "context" - "github.com/elastic/fleet-server/v7/internal/pkg/config" + "crypto/tls" slog "log" "net" "net/http" - "github.com/elastic/fleet-server/v7/internal/pkg/rate" + "github.com/elastic/fleet-server/v7/internal/pkg/config" + "github.com/elastic/fleet-server/v7/internal/pkg/limit" + "github.com/elastic/fleet-server/v7/internal/pkg/logger" + "github.com/elastic/beats/v7/libbeat/common/transport/tlscommon" "github.com/julienschmidt/httprouter" "github.com/rs/zerolog/log" ) func diagConn(c net.Conn, s http.ConnState) { + if c == nil { + return + } + log.Trace(). Str("local", c.LocalAddr().String()). Str("remote", c.RemoteAddr().String()). Str("state", s.String()). Msg("connection state change") + + switch s { + case http.StateNew: + cntHttpNew.Inc() + case http.StateClosed: + cntHttpClose.Inc() + } } func runServer(ctx context.Context, router *httprouter.Router, cfg *config.Server) error { - addr := cfg.BindAddress() + listeners := cfg.BindEndpoints() rdto := cfg.Timeouts.Read wrto := cfg.Timeouts.Write - mhbz := cfg.MaxHeaderByteSize + idle := cfg.Timeouts.Idle + rdhr := cfg.Timeouts.ReadHeader + mhbz := cfg.Limits.MaxHeaderByteSize bctx := func(net.Listener) context.Context { return ctx } - log.Info(). - Str("bind", addr). - Dur("rdTimeout", rdto). - Dur("wrTimeout", wrto). - Msg("Server listening") - - server := http.Server{ - Addr: addr, - ReadTimeout: rdto, - WriteTimeout: wrto, - Handler: router, - BaseContext: bctx, - ConnState: diagConn, - MaxHeaderBytes: mhbz, - ErrorLog: errLogger(), - } + errChan := make(chan error) + cancelCtx, cancel := context.WithCancel(ctx) + defer cancel() + + for _, addr := range listeners { + log.Info(). + Str("bind", addr). + Dur("rdTimeout", rdto). + Dur("wrTimeout", wrto). + Msg("server listening") + + server := http.Server{ + Addr: addr, + ReadTimeout: rdto, + WriteTimeout: wrto, + IdleTimeout: idle, + ReadHeaderTimeout: rdhr, + Handler: router, + BaseContext: bctx, + ConnState: diagConn, + MaxHeaderBytes: mhbz, + ErrorLog: errLogger(), + } - forceCh := make(chan struct{}) - defer close(forceCh) - - // handler to close server - go func() { - select { - case <-ctx.Done(): - log.Debug().Msg("Force server close on ctx.Done()") - server.Close() - case <-forceCh: - log.Debug().Msg("Go routine forced closed on exit") + forceCh := make(chan struct{}) + defer close(forceCh) + + // handler to close server + go func() { + select { + case <-ctx.Done(): + log.Debug().Msg("force server close on ctx.Done()") + server.Close() + case <-forceCh: + log.Debug().Msg("go routine forced closed on exit") + } + }() + + var listenCfg net.ListenConfig + + ln, err := listenCfg.Listen(ctx, "tcp", addr) + if err != nil { + return err } - }() - ln, err := makeListener(ctx, addr, cfg) - if err != nil { - return err - } + // Bind the deferred Close() to the stack variable to handle case where 'ln' is wrapped + defer func() { ln.Close() }() - defer ln.Close() + // Conn Limiter must be before the TLS handshake in the stack; + // The server should not eat the cost of the handshake if there + // is no capacity to service the connection. + // Also, it appears the HTTP2 implementation depends on the tls.Listener + // being at the top of the stack. + ln = wrapConnLimitter(ctx, ln, cfg) - // TODO: Use tls.Config to properly lock down tls connection - keyFile := cfg.TLS.Key - certFile := cfg.TLS.Cert + if cfg.TLS != nil && cfg.TLS.IsEnabled() { + commonTlsCfg, err := tlscommon.LoadTLSServerConfig(cfg.TLS) + if err != nil { + return err + } + server.TLSConfig = commonTlsCfg.ToConfig() + + // Must enable http/2 in the configuration explicitly. + // (see https://golang.org/pkg/net/http/#Server.Serve) + server.TLSConfig.NextProtos = []string{"h2", "http/1.1"} + + ln = tls.NewListener(ln, server.TLSConfig) + + } else { + log.Warn().Msg("Exposed over insecure HTTP; enablement of TLS is strongly recommended") + } + + log.Debug().Msgf("Listening on %s", addr) + + go func(ctx context.Context, errChan chan error, ln net.Listener) { + if err := server.Serve(ln); err != nil && err != http.ErrServerClosed { + errChan <- err + } + }(cancelCtx, errChan, ln) - if keyFile != "" || certFile != "" { - return server.ServeTLS(ln, certFile, keyFile) } - if err := server.Serve(ln); err != nil && err != context.Canceled { - return err + select { + case err := <-errChan: + if err != context.Canceled { + return err + } + case <-cancelCtx.Done(): } return nil } -func makeListener(ctx context.Context, addr string, cfg *config.Server) (net.Listener, error) { - // Create listener - ln, err := net.Listen("tcp", addr) - if err != nil { - return nil, err - } +func wrapConnLimitter(ctx context.Context, ln net.Listener, cfg *config.Server) net.Listener { + hardLimit := cfg.Limits.MaxConnections - rateLimitBurst := cfg.RateLimitBurst - rateLimitInterval := cfg.RateLimitInterval + if hardLimit != 0 { + log.Info(). + Int("hardConnLimit", hardLimit). + Msg("server hard connection limiter installed") - if rateLimitInterval != 0 { - log.Info().Dur("interval", rateLimitInterval).Int("burst", rateLimitBurst).Msg("Server rate limiter installed") - ln = rate.NewRateListener(ctx, ln, rateLimitBurst, rateLimitInterval) + ln = limit.Listener(ln, hardLimit) } else { - log.Info().Msg("Server connection rate limiter disabled") + log.Info().Msg("server hard connection limiter disabled") } - return ln, err + return ln } type stubLogger struct { } func (s *stubLogger) Write(p []byte) (n int, err error) { - log.Error().Bytes("msg", p).Send() + log.Error().Bytes(logger.EcsMessage, p).Send() return len(p), nil } diff --git a/cmd/fleet/server_integration_test.go b/cmd/fleet/server_integration_test.go index 8dc86fbb3..f8abce330 100644 --- a/cmd/fleet/server_integration_test.go +++ b/cmd/fleet/server_integration_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build integration // +build integration package fleet @@ -9,11 +10,11 @@ package fleet import ( "bytes" "context" + "encoding/json" "fmt" "io/ioutil" "net/http" "path" - "strings" "testing" "time" @@ -24,10 +25,11 @@ import ( "github.com/stretchr/testify/require" "golang.org/x/sync/errgroup" - "github.com/elastic/fleet-server/v7/internal/pkg/cache" + "github.com/elastic/fleet-server/v7/internal/pkg/build" "github.com/elastic/fleet-server/v7/internal/pkg/config" "github.com/elastic/fleet-server/v7/internal/pkg/logger" "github.com/elastic/fleet-server/v7/internal/pkg/sleep" + "github.com/elastic/fleet-server/v7/internal/pkg/status" ftesting "github.com/elastic/fleet-server/v7/internal/pkg/testing" ) @@ -46,7 +48,7 @@ func (s *tserver) baseUrl() string { input := s.cfg.Inputs[0] tls := input.Server.TLS schema := "http" - if tls.Key != "" || tls.Cert != "" { + if tls != nil && tls.IsEnabled() { schema = "https" } return fmt.Sprintf("%s://%s:%d", schema, input.Server.Host, input.Server.Port) @@ -62,12 +64,7 @@ func startTestServer(ctx context.Context) (*tserver, error) { return nil, err } - c, err := cache.New() - if err != nil { - return nil, err - } - - logger.Init(cfg) + logger.Init(cfg, "fleet-server") port, err := ftesting.FreePort() if err != nil { @@ -81,7 +78,7 @@ func startTestServer(ctx context.Context) (*tserver, error) { cfg.Inputs[0].Server = *srvcfg log.Info().Uint16("port", port).Msg("Test fleet server") - srv, err := NewFleetServer(cfg, c, serverVersion) + srv, err := NewFleetServer(cfg, build.Info{Version: serverVersion}, status.NewLog()) if err != nil { return nil, err } @@ -104,7 +101,7 @@ func (s *tserver) waitServerUp(ctx context.Context, dur time.Duration) error { start := time.Now() cli := cleanhttp.DefaultClient() for { - res, err := cli.Get(s.baseUrl()) + res, err := cli.Get(s.baseUrl() + "/api/status") if err != nil { if time.Since(start) > dur { return err @@ -171,7 +168,16 @@ func TestServerUnauthorized(t *testing.T) { } raw, _ := ioutil.ReadAll(res.Body) - diff = cmp.Diff("no authorization header\n", string(raw)) + var resp errResp + err = json.Unmarshal(raw, &resp) + if err != nil { + t.Fatal(err) + } + diff = cmp.Diff(400, resp.StatusCode) + if diff != "" { + t.Fatal(diff) + } + diff = cmp.Diff("BadRequest", resp.Error) if diff != "" { t.Fatal(diff) } @@ -180,7 +186,7 @@ func TestServerUnauthorized(t *testing.T) { // Unauthorized, expecting error from /_security/_authenticate t.Run("unauthorized", func(t *testing.T) { - const expectedErrResponsePrefix = `Fail Auth: [401 Unauthorized]` + for _, u := range agenturls { req, err := http.NewRequest("POST", u, bytes.NewBuffer([]byte("{}"))) require.NoError(t, err) @@ -197,8 +203,18 @@ func TestServerUnauthorized(t *testing.T) { } raw, _ := ioutil.ReadAll(res.Body) - if !strings.HasPrefix(string(raw), expectedErrResponsePrefix) { - t.Fatalf("unexpected error: %s", string(raw)) + var resp errResp + err = json.Unmarshal(raw, &resp) + if err != nil { + t.Fatal(err) + } + diff = cmp.Diff(400, resp.StatusCode) + if diff != "" { + t.Fatal(diff) + } + diff = cmp.Diff("BadRequest", resp.Error) + if diff != "" { + t.Fatal(diff) } } }) diff --git a/cmd/fleet/server_test.go b/cmd/fleet/server_test.go index 38a905548..797c95fdf 100644 --- a/cmd/fleet/server_test.go +++ b/cmd/fleet/server_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build !integration // +build !integration package fleet @@ -16,6 +17,7 @@ import ( "github.com/stretchr/testify/require" "github.com/elastic/fleet-server/v7/internal/pkg/cache" + "github.com/elastic/fleet-server/v7/internal/pkg/checkin" "github.com/elastic/fleet-server/v7/internal/pkg/config" "github.com/elastic/fleet-server/v7/internal/pkg/monitor/mock" "github.com/elastic/fleet-server/v7/internal/pkg/policy" @@ -33,17 +35,18 @@ func TestRunServer(t *testing.T) { cfg.Host = "localhost" cfg.Port = port - c, err := cache.New() + verCon := mustBuildConstraints("8.0.0") + c, err := cache.New(cache.Config{NumCounters: 100, MaxCost: 100000}) require.NoError(t, err) bulker := ftesting.MockBulk{} pim := mock.NewMockIndexMonitor() - pm := policy.NewMonitor(bulker, pim, kPolicyThrottle) - bc := NewBulkCheckin(nil) - ct := NewCheckinT(nil, c, bc, pm, nil, nil, nil, nil) - et, err := NewEnrollerT(cfg, nil, c) + pm := policy.NewMonitor(bulker, pim, 5*time.Millisecond) + bc := checkin.NewBulk(nil) + ct := NewCheckinT(verCon, cfg, c, bc, pm, nil, nil, nil, nil) + et, err := NewEnrollerT(verCon, cfg, nil, c) require.NoError(t, err) - router := NewRouter(bulker, ct, et) + router := NewRouter(bulker, ct, et, nil, nil, nil) errCh := make(chan error) var wg sync.WaitGroup diff --git a/cmd/fleet/userAgent.go b/cmd/fleet/userAgent.go new file mode 100644 index 000000000..2c960b886 --- /dev/null +++ b/cmd/fleet/userAgent.go @@ -0,0 +1,109 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fleet + +import ( + "errors" + "fmt" + "math" + "net/http" + "strconv" + "strings" + + "github.com/hashicorp/go-version" + "github.com/rs/zerolog" +) + +const ( + // MinVersion is the minimum version an Elastic Agent must be to communicate + MinVersion = "7.13" + + userAgentPrefix = "elastic agent " +) + +var ( + ErrInvalidUserAgent = errors.New("user-agent is invalid") + ErrUnsupportedVersion = errors.New("version is not supported") +) + +// buildVersionConstraint turns the version into a constraint to ensure that the connecting Elastic Agent's are +// a supported version. +func buildVersionConstraint(verStr string) (version.Constraints, error) { + ver, err := version.NewVersion(verStr) + if err != nil { + return nil, err + } + verStr = maximizePatch(ver) + return version.NewConstraint(fmt.Sprintf(">= %s, <= %s", MinVersion, verStr)) +} + +// maximizePatch turns the version into a string that has the patch value set to the maximum integer. +// +// Used to allow the Elastic Agent to be at a higher patch version than the Fleet Server, but require that the +// Elastic Agent is not higher in MAJOR or MINOR. +func maximizePatch(ver *version.Version) string { + segments := ver.Segments() + if len(segments) > 2 { + segments = segments[:2] + } + segments = append(segments, math.MaxInt32) + segStrs := make([]string, 0, len(segments)) + for _, segment := range segments { + segStrs = append(segStrs, strconv.Itoa(segment)) + } + return strings.Join(segStrs, ".") +} + +// validateUserAgent validates that the User-Agent of the connecting Elastic Agent is valid and that the version is +// supported for this Fleet Server. +func validateUserAgent(zlog zerolog.Logger, r *http.Request, verConst version.Constraints) (string, error) { + userAgent := r.Header.Get("User-Agent") + + zlog = zlog.With().Str("userAgent", userAgent).Logger() + + if userAgent == "" { + zlog.Info(). + Err(ErrInvalidUserAgent). + Msg("empty User-Agent") + return "", ErrInvalidUserAgent + } + + userAgent = strings.ToLower(userAgent) + if !strings.HasPrefix(userAgent, userAgentPrefix) { + zlog.Info(). + Err(ErrInvalidUserAgent). + Str("targetPrefix", userAgentPrefix). + Msg("invalid user agent prefix") + return "", ErrInvalidUserAgent + } + + // Trim "elastic agent " prefix + s := strings.TrimPrefix(userAgent, userAgentPrefix) + + // Split the version to accommodate versions with suffixes such as v8.0.0-snapshot v8.0.0-alpha1 + verSep := strings.Split(s, "-") + + // Trim leading and traling spaces + verStr := strings.TrimSpace(verSep[0]) + + ver, err := version.NewVersion(verStr) + if err != nil { + zlog.Info(). + Err(err). + Str("verStr", verStr). + Msg("invalid user agent version string") + return "", ErrInvalidUserAgent + } + if !verConst.Check(ver) { + zlog.Info(). + Err(ErrUnsupportedVersion). + Str("verStr", verStr). + Str("constraints", verConst.String()). + Msg("unsuported user agent version") + return "", ErrUnsupportedVersion + } + + return ver.String(), nil +} diff --git a/cmd/fleet/userAgent_test.go b/cmd/fleet/userAgent_test.go new file mode 100644 index 000000000..8beb40ed0 --- /dev/null +++ b/cmd/fleet/userAgent_test.go @@ -0,0 +1,130 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fleet + +import ( + "net/http/httptest" + "testing" + + "github.com/hashicorp/go-version" + "github.com/rs/zerolog/log" +) + +func TestValidateUserAgent(t *testing.T) { + tests := []struct { + userAgent string + verCon version.Constraints + err error + }{ + { + userAgent: "", + verCon: nil, + err: ErrInvalidUserAgent, + }, + { + userAgent: "bad value", + verCon: nil, + err: ErrInvalidUserAgent, + }, + { + userAgent: "eLaStIc AGeNt", + verCon: nil, + err: ErrInvalidUserAgent, + }, + { + userAgent: "eLaStIc AGeNt v7.10.0", + verCon: mustBuildConstraints("7.13.0"), + err: ErrUnsupportedVersion, + }, + { + userAgent: "eLaStIc AGeNt v7.11.1", + verCon: mustBuildConstraints("7.13.0"), + err: ErrUnsupportedVersion, + }, + { + userAgent: "eLaStIc AGeNt v7.12.5", + verCon: mustBuildConstraints("7.13.0"), + err: ErrUnsupportedVersion, + }, + { + userAgent: "eLaStIc AGeNt v7.13.0", + verCon: mustBuildConstraints("7.13.0"), + err: nil, + }, + { + userAgent: "eLaStIc AGeNt v7.13.0", + verCon: mustBuildConstraints("7.13.1"), + err: nil, + }, + { + userAgent: "eLaStIc AGeNt v7.13.1", + verCon: mustBuildConstraints("7.13.0"), + err: nil, + }, + { + userAgent: "eLaStIc AGeNt v7.14.0", + verCon: mustBuildConstraints("7.13.0"), + err: ErrUnsupportedVersion, + }, + { + userAgent: "eLaStIc AGeNt v8.0.0", + verCon: mustBuildConstraints("7.13.0"), + err: ErrUnsupportedVersion, + }, + { + userAgent: "eLaStIc AGeNt v7.13.0", + verCon: mustBuildConstraints("8.0.0"), + err: nil, + }, + { + userAgent: "eLaStIc AGeNt v7.13.0", + verCon: mustBuildConstraints("8.0.0-alpha1"), + err: nil, + }, + { + userAgent: "eLaStIc AGeNt v8.0.0-alpha1", + verCon: mustBuildConstraints("8.0.0-alpha1"), + err: nil, + }, + { + userAgent: "eLaStIc AGeNt v8.0.0-alpha1", + verCon: mustBuildConstraints("8.0.0"), + err: nil, + }, + { + userAgent: "eLaStIc AGeNt v8.0.0-anything", + verCon: mustBuildConstraints("8.0.0"), + err: nil, + }, + { + userAgent: "eLaStIc AGeNt v7.15.0-anything", + verCon: mustBuildConstraints("8.0.0"), + err: nil, + }, + { + userAgent: "eLaStIc AGeNt v7.15.0-anything", + verCon: mustBuildConstraints("8.0.0-beta1"), + err: nil, + }, + } + for _, tr := range tests { + t.Run(tr.userAgent, func(t *testing.T) { + req := httptest.NewRequest("GET", "/", nil) + req.Header.Set("User-Agent", tr.userAgent) + _, res := validateUserAgent(log.Logger, req, tr.verCon) + if tr.err != res { + t.Fatalf("err mismatch: %v != %v", tr.err, res) + } + }) + } +} + +func mustBuildConstraints(verStr string) version.Constraints { + con, err := buildVersionConstraint(verStr) + if err != nil { + panic(err) + } + return con +} diff --git a/dev-tools/buildlimits/buildlimits.go b/dev-tools/buildlimits/buildlimits.go new file mode 100644 index 000000000..22f007bca --- /dev/null +++ b/dev-tools/buildlimits/buildlimits.go @@ -0,0 +1,264 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package main + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "io/ioutil" + "os" + "text/template" + + "github.com/elastic/beats/v7/licenses" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/packer" +) + +var ( + input string + output string + license string +) + +func init() { + flag.StringVar(&input, "in", "", "Source of input. \"-\" means reading from stdin") + flag.StringVar(&output, "out", "-", "Output path. \"-\" means writing to stdout") + flag.StringVar(&license, "license", "Elastic", "License header for generated file.") +} + +var tmpl = template.Must(template.New("specs").Parse(` +{{ .License }} +// Code generated by dev-tools/cmd/buildlimits/buildlimits.go - DO NOT EDIT. + +package config + +import ( + "math" + "runtime" + "strings" + "time" + + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/packer" + "github.com/elastic/go-ucfg/yaml" + "github.com/pbnjay/memory" + "github.com/pkg/errors" +) + +const ( + defaultCacheNumCounters = 500000 // 10x times expected count + defaultCacheMaxCost = 50 * 1024 * 1024 // 50MiB cache size + + defaultMaxConnections = 0 // no limit + defaultPolicyThrottle = time.Millisecond * 5 + + defaultCheckinInterval = time.Millisecond + defaultCheckinBurst = 1000 + defaultCheckinMax = 0 + defaultCheckinMaxBody = 1024 * 1024 + + defaultArtifactInterval = time.Millisecond * 5 + defaultArtifactBurst = 25 + defaultArtifactMax = 50 + defaultArtifactMaxBody = 0 + + defaultEnrollInterval = time.Millisecond * 10 + defaultEnrollBurst = 100 + defaultEnrollMax = 50 + defaultEnrollMaxBody = 1024 * 512 + + defaultAckInterval = time.Millisecond * 10 + defaultAckBurst = 100 + defaultAckMax = 50 + defaultAckMaxBody = 1024 * 1024 * 2 +) + +type valueRange struct { + Min int ` + "`config:\"min\"`" + ` + Max int ` + "`config:\"max\"`" + ` +} + +type envLimits struct { + RAM valueRange ` + "`config:\"ram\"`" + ` + Server *serverLimitDefaults ` + "`config:\"server_limits\"`" + ` + Cache *cacheLimits ` + "`config:\"cache_limits\"`" + ` +} + +func defaultEnvLimits() *envLimits { + return &envLimits{ + RAM: valueRange{ + Min: 0, + Max: int(getMaxInt()), + }, + Server: defaultserverLimitDefaults(), + Cache: defaultCacheLimits(), + } +} + +type cacheLimits struct { + NumCounters int64 ` + "`config:\"num_counters\"`" + ` + MaxCost int64 ` + "`config:\"max_cost\"`" + ` +} + +func defaultCacheLimits() *cacheLimits { + return &cacheLimits{ + NumCounters: defaultCacheNumCounters, + MaxCost: defaultCacheMaxCost, + } +} + +type limit struct { + Interval time.Duration ` + "`config:\"interval\"`" + ` + Burst int ` + "`config:\"burst\"`" + ` + Max int64 ` + "`config:\"max\"`" + ` + MaxBody int64 ` + "`config:\"max_body_byte_size\"`" + ` +} + +type serverLimitDefaults struct { + PolicyThrottle time.Duration ` + "`config:\"policy_throttle\"`" + ` + MaxConnections int ` + "`config:\"max_connections\"`" + ` + + CheckinLimit limit ` + "`config:\"checkin_limit\"`" + ` + ArtifactLimit limit ` + "`config:\"artifact_limit\"`" + ` + EnrollLimit limit ` + "`config:\"enroll_limit\"`" + ` + AckLimit limit ` + "`config:\"ack_limit\"`" + ` +} + +func defaultserverLimitDefaults() *serverLimitDefaults { + return &serverLimitDefaults{ + PolicyThrottle: defaultCacheNumCounters, + MaxConnections: defaultCacheMaxCost, + + CheckinLimit: limit{ + Interval: defaultCheckinInterval, + Burst: defaultCheckinBurst, + Max: defaultCheckinMax, + MaxBody: defaultCheckinMaxBody, + }, + ArtifactLimit: limit{ + Interval: defaultArtifactInterval, + Burst: defaultArtifactBurst, + Max: defaultArtifactMax, + MaxBody: defaultArtifactMaxBody, + }, + EnrollLimit: limit{ + Interval: defaultEnrollInterval, + Burst: defaultEnrollBurst, + Max: defaultEnrollMax, + MaxBody: defaultEnrollMaxBody, + }, + AckLimit: limit{ + Interval: defaultAckInterval, + Burst: defaultAckBurst, + Max: defaultAckMax, + MaxBody: defaultAckMaxBody, + }, + } +} + +var defaults []*envLimits + +func init() { + // Packed Files + {{ range $i, $f := .Files -}} + // {{ $f }} + {{ end -}} + unpacked := packer.MustUnpack("{{ .Pack }}") + + for f, v := range unpacked { + cfg, err := yaml.NewConfig(v, DefaultOptions...) + if err != nil { + panic(errors.Wrap(err, "Cannot read spec from "+f)) + } + + l := defaultEnvLimits() + if err := cfg.Unpack(&l, DefaultOptions...); err != nil { + panic(errors.Wrap(err, "Cannot unpack spec from "+f)) + } + + defaults = append(defaults, l) + } +} + +func loadLimits() *envLimits { + ramSize := int(memory.TotalMemory() / 1024 / 1024) + return loadLimitsForRam(ramSize) +} + +func loadLimitsForRam(currentRAM int) *envLimits { + for _, l := range defaults { + // get max possible config for current env + if l.RAM.Min < currentRAM && currentRAM <= l.RAM.Max { + return l + } + } + + return defaultEnvLimits() +} + +func getMaxInt() int64 { + if strings.HasSuffix(runtime.GOARCH, "64") { + return math.MaxInt64 + } + return math.MaxInt32 +} + +`)) + +func main() { + flag.Parse() + + if len(input) == 0 { + fmt.Fprintln(os.Stderr, "Invalid input source") + os.Exit(1) + } + + l, err := licenses.Find(license) + if err != nil { + fmt.Fprintf(os.Stderr, "problem to retrieve the license, error: %+v", err) + os.Exit(1) + return + } + + data, err := gen(input, l) + if err != nil { + fmt.Fprintf(os.Stderr, "Error while generating the file, err: %+v\n", err) + os.Exit(1) + } + + if output == "-" { + os.Stdout.Write(data) + return + } else { + ioutil.WriteFile(output, data, 0640) + } + + return +} + +func gen(path string, l string) ([]byte, error) { + pack, files, err := packer.Pack(input) + if err != nil { + return nil, err + } + + var buf bytes.Buffer + tmpl.Execute(&buf, struct { + Pack string + Files []string + License string + }{ + Pack: pack, + Files: files, + License: l, + }) + + formatted, err := format.Source(buf.Bytes()) + if err != nil { + return nil, err + } + + return formatted, nil +} diff --git a/dev-tools/cherrypick_pr b/dev-tools/cherrypick_pr new file mode 100755 index 000000000..cfa73a0e6 --- /dev/null +++ b/dev-tools/cherrypick_pr @@ -0,0 +1,209 @@ +#!/usr/bin/env python3 +"""Cherry pick and backport a PR""" +from __future__ import print_function + +from builtins import input +import sys +import os +import argparse +from os.path import expanduser +import re +from subprocess import check_call, call, check_output +import requests + +usage = """ +Example usage: + +./dev-tools/cherrypick_pr --create_pr 5.0 2565 6490604aa0cf7fa61932a90700e6ca988fc8a527 + +In case of backporting errors, fix them, then run: + +git cherry-pick --continue +./dev-tools/cherrypick_pr --create_pr 5.0 2565 6490604aa0cf7fa61932a90700e6ca988fc8a527 --continue + +This script does the following: + +* cleanups both from_branch and to_branch (warning: drops local changes) +* creates a temporary branch named something like "branch_2565" +* calls the git cherry-pick command in this branch +* after fixing the merge errors (if needed), pushes the branch to your + remote +* if the --create_pr flag is used, it uses the GitHub API to create the PR + for you. Note that this requires you to have a Github token with the + public_repo scope in the `~/.elastic/github.token` file. This token + should be also authorized to Elastic organization so as to work with single-sign-on. + (see https://help.github.com/en/articles/authorizing-a-personal-access-token-for-use-with-saml-single-sign-on) + +Note that you need to take the commit hashes from `git log` on the +from_branch, copying the IDs from Github doesn't work in case we squashed the +PR. +""" + + +def main(): + """Main""" + parser = argparse.ArgumentParser( + description="Creates a PR for cherry-picking commits", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=usage) + parser.add_argument("to_branch", + help="To branch (e.g 7.x)") + parser.add_argument("pr_number", + help="The PR number being merged (e.g. 2345)") + parser.add_argument("commit_hashes", metavar="hash", nargs="+", + help="The commit hashes to cherry pick." + + " You can specify multiple.") + parser.add_argument("--yes", action="store_true", + help="Assume yes. Warning: discards local changes.") + parser.add_argument("--continue", action="store_true", + help="Continue after fixing merging errors.") + parser.add_argument("--from_branch", default="master", + help="From branch") + parser.add_argument("--create_pr", action="store_true", + help="Create a PR using the Github API " + + "(requires token in ~/.elastic/github.token)") + parser.add_argument("--diff", action="store_true", + help="Display the diff before pushing the PR") + parser.add_argument("--remote", default="", + help="Which remote to push the backport branch to") + parser.add_argument("--zube-team", default="", + help="Team the PR belongs to") + parser.add_argument("--keep-backport-label", action="store_true", + help="Preserve label needs_backport in original PR") + args = parser.parse_args() + + print(args) + + tmp_branch = "backport_{}_{}".format(args.pr_number, args.to_branch) + + if not vars(args)["continue"]: + if not args.yes and input("This will destroy all local changes. " + + "Continue? [y/n]: ") != "y": + return 1 + check_call("git reset --hard", shell=True) + check_call("git clean -df", shell=True) + check_call("git fetch", shell=True) + + check_call("git checkout {}".format(args.from_branch), shell=True) + check_call("git pull", shell=True) + + check_call("git checkout {}".format(args.to_branch), shell=True) + check_call("git pull", shell=True) + + call("git branch -D {} > /dev/null".format(tmp_branch), shell=True) + check_call("git checkout -b {}".format(tmp_branch), shell=True) + if call("git cherry-pick -x {}".format(" ".join(args.commit_hashes)), + shell=True) != 0: + print("Looks like you have cherry-pick errors.") + print("Fix them, then run: ") + print(" git cherry-pick --continue") + print(" {} --continue".format(" ".join(sys.argv))) + return 1 + + if len(check_output("git status -s", shell=True).strip()) > 0: + print("Looks like you have uncommitted changes." + + " Please execute first: git cherry-pick --continue") + return 1 + + if len(check_output("git log HEAD...{}".format(args.to_branch), + shell=True).strip()) == 0: + print("No commit to push") + return 1 + + if args.diff: + call("git diff {}".format(args.to_branch), shell=True) + if input("Continue? [y/n]: ") != "y": + print("Aborting cherry-pick.") + return 1 + + print("Ready to push branch.") + + remote = args.remote + if not remote: + remote = input("To which remote should I push? (your fork): ") + + call("git push {} :{} > /dev/null".format(remote, tmp_branch), + shell=True) + check_call("git push --set-upstream {} {}" + .format(remote, tmp_branch), shell=True) + if not args.create_pr: + print("Done. Open PR by following this URL: \n\t" + + "https://github.com/elastic/fleet-server/compare/{}...{}:{}?expand=1" + .format(args.to_branch, remote, tmp_branch)) + else: + token = open(expanduser("~/.elastic/github.token"), "r").read().strip() + base = "https://api.github.com/repos/elastic/fleet-server" + session = requests.Session() + session.headers.update({"Authorization": "token " + token}) + + original_pr = session.get(base + "/pulls/" + args.pr_number).json() + + # get the github username from the remote where we pushed + remote_url = check_output("git remote get-url {}".format(remote), + shell=True) + remote_user = re.search("github.com[:/](.+)/fleet-server", str(remote_url)).group(1) + + # create PR + request = session.post(base + "/pulls", json=dict( + title="Cherry-pick #{} to {}: {}".format(args.pr_number, args.to_branch, original_pr["title"]), + head=remote_user + ":" + tmp_branch, + base=args.to_branch, + body="Cherry-pick of PR #{} to {} branch. Original message: \n\n{}" + .format(args.pr_number, args.to_branch, original_pr["body"]) + )) + if request.status_code > 299: + print("Creating PR failed: {}".format(request.json())) + sys.exit(1) + new_pr = request.json() + + # add labels + labels = ["backport"] + + zube_teams = zube_team_labels(original_pr) + if args.zube_team: + resp = session.get(base + "/labels/Team:"+args.zube_team) + if resp.status_code != 200: + print("Cannot find team label", resp.text) + sys.exit(1) + zube_teams = ["Team:" + args.zube_team] + + if len(zube_teams) > 0: + labels += zube_teams + labels.append("[zube]: In Review") + else: + labels.append("review") + + session.post( + base + "/issues/{}/labels".format(new_pr["number"]), json=labels) + + if not args.keep_backport_label: + # remove needs backport label from the original PR + session.delete(base + "/issues/{}/labels/needs_backport".format(args.pr_number)) + + # get version and set a version label on the original PR + version = get_version(os.getcwd()) + if version: + session.post( + base + "/issues/{}/labels".format(args.pr_number), json=["v" + version]) + + print("\nDone. PR created: {}".format(new_pr["html_url"])) + print("Please go and check it and add the review tags") + +def get_version(repo_dir): + pattern = re.compile(r'(const\s|)\w*(v|V)ersion\s=\s"(?P.*)"') + with open(os.path.join(repo_dir, "main.go"), "r") as f: + for line in f: + match = pattern.match(line) + if match: + return match.group('version') + +def zube_team_labels(pr): + teams = [] + for label in pr.get('labels', []): + name = label.get('name', '') + if name.startswith('Team:'): + teams.append(name) + return teams + +if __name__ == "__main__": + sys.exit(main()) diff --git a/dev-tools/common.bash b/dev-tools/common.bash index 9f8ac1e1e..1e5aedfeb 100644 --- a/dev-tools/common.bash +++ b/dev-tools/common.bash @@ -34,18 +34,18 @@ get_go_version() { fi } -# install_gimme -# Install gimme to HOME/bin. -install_gimme() { - # Install gimme - if [ ! -f "${HOME}/bin/gimme" ]; then - mkdir -p ${HOME}/bin - curl -sL -o ${HOME}/bin/gimme https://raw.githubusercontent.com/travis-ci/gimme/v1.1.0/gimme - chmod +x ${HOME}/bin/gimme +# install_gvm +# Install gvm to /usr/local/bin. +# To read more about installing gvm in other platforms: https://github.com/andrewkroh/gvm#installation +install_gvm() { + # Install gvm + if [ ! -f "/usr/local/bin/gvm" ]; then + curl -sL -o ~/bin/gvm https://github.com/andrewkroh/gvm/releases/download/v0.3.0/gvm-linux-amd64 + chmod +x /usr/local/bin/gvm fi - GIMME="${HOME}/bin/gimme" - debug "Gimme version $(${GIMME} version)" + GVM="/usr/local/bin/gvm" + debug "Gvm version $(${GVM} --version)" } # setup_go_root "version" @@ -55,11 +55,10 @@ install_gimme() { setup_go_root() { local version=${1} - install_gimme + install_gvm # Setup GOROOT and add go to the PATH. - ${GIMME} "${version}" > /dev/null - source "${HOME}/.gimme/envs/go${version}.env" 2> /dev/null + eval "$(${GVM} ${version})" debug "$(go version)" } diff --git a/dev-tools/dependencies-report b/dev-tools/dependencies-report old mode 100644 new mode 100755 index a824d0f4b..9492a9d59 --- a/dev-tools/dependencies-report +++ b/dev-tools/dependencies-report @@ -34,7 +34,9 @@ done go mod tidy go mod download -go list -m -json all $@ | go run go.elastic.co/go-licence-detector \ +GOPATH=`go env GOPATH` +env GOBIN=$GOPATH/bin/ go install go.elastic.co/go-licence-detector@v0.4.0 +go list -m -json all $@ | $GOPATH/bin/go-licence-detector \ -includeIndirect \ -rules "$SRCPATH/notice/rules.json" \ -overrides "$SRCPATH/notice/overrides.json" \ diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index b56cca78c..c6317a44e 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=8.0.0-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-f2941f42-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file diff --git a/dev-tools/integration/main.go b/dev-tools/integration/main.go deleted file mode 100644 index 3484529ad..000000000 --- a/dev-tools/integration/main.go +++ /dev/null @@ -1,2483 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package main - -import ( - "context" - "errors" - "fmt" - - "github.com/elastic/fleet-server/v7/internal/pkg/config" - "github.com/elastic/fleet-server/v7/internal/pkg/es" - "github.com/elastic/fleet-server/v7/internal/pkg/esboot" - - "github.com/rs/zerolog/log" -) - -func checkErr(err error) { - if err != nil { - panic(err) - } -} - -// Setup for integration testing -// Create the indices and data streams -func main() { - fmt.Println("Setting up the indices") - - cfg, err := config.LoadFile("fleet-server.yml") - checkErr(err) - - ctx := context.Background() - es, err := es.NewClient(ctx, cfg) - checkErr(err) - - err = esboot.EnsureESIndices(ctx, es) - checkErr(err) - - // Create .kibana index for integration tests - // This temporarily until all the parts are unplugged from .kibana - // Otherwise the fleet server fails to start at the moment - const name = ".kibana" - err = esboot.EnsureIndex(ctx, es, name, kibanaMapping) - if errors.Is(err, esboot.ErrResourceAlreadyExists) { - log.Info().Str("name", name).Msg("Index already exists") - err = nil - } - checkErr(err) -} - -const kibanaMapping = `{ - "dynamic" : "strict", - "properties" : { - "action" : { - "properties" : { - "actionTypeId" : { - "type" : "keyword" - }, - "config" : { - "type" : "object", - "enabled" : false - }, - "name" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - }, - "secrets" : { - "type" : "binary" - } - } - }, - "action_task_params" : { - "properties" : { - "actionId" : { - "type" : "keyword" - }, - "apiKey" : { - "type" : "binary" - }, - "params" : { - "type" : "object", - "enabled" : false - } - } - }, - "alert" : { - "properties" : { - "actions" : { - "type" : "nested", - "properties" : { - "actionRef" : { - "type" : "keyword" - }, - "actionTypeId" : { - "type" : "keyword" - }, - "group" : { - "type" : "keyword" - }, - "params" : { - "type" : "object", - "enabled" : false - } - } - }, - "alertTypeId" : { - "type" : "keyword" - }, - "apiKey" : { - "type" : "binary" - }, - "apiKeyOwner" : { - "type" : "keyword" - }, - "consumer" : { - "type" : "keyword" - }, - "createdAt" : { - "type" : "date" - }, - "createdBy" : { - "type" : "keyword" - }, - "enabled" : { - "type" : "boolean" - }, - "executionStatus" : { - "properties" : { - "error" : { - "properties" : { - "message" : { - "type" : "keyword" - }, - "reason" : { - "type" : "keyword" - } - } - }, - "lastExecutionDate" : { - "type" : "date" - }, - "status" : { - "type" : "keyword" - } - } - }, - "meta" : { - "properties" : { - "versionApiKeyLastmodified" : { - "type" : "keyword" - } - } - }, - "muteAll" : { - "type" : "boolean" - }, - "mutedInstanceIds" : { - "type" : "keyword" - }, - "name" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - }, - "params" : { - "type" : "object", - "enabled" : false - }, - "schedule" : { - "properties" : { - "interval" : { - "type" : "keyword" - } - } - }, - "scheduledTaskId" : { - "type" : "keyword" - }, - "tags" : { - "type" : "keyword" - }, - "throttle" : { - "type" : "keyword" - }, - "updatedAt" : { - "type" : "date" - }, - "updatedBy" : { - "type" : "keyword" - } - } - }, - "api_key_pending_invalidation" : { - "properties" : { - "apiKeyId" : { - "type" : "keyword" - }, - "createdAt" : { - "type" : "date" - } - } - }, - "apm-indices" : { - "properties" : { - "apm_oss" : { - "properties" : { - "errorIndices" : { - "type" : "keyword" - }, - "metricsIndices" : { - "type" : "keyword" - }, - "onboardingIndices" : { - "type" : "keyword" - }, - "sourcemapIndices" : { - "type" : "keyword" - }, - "spanIndices" : { - "type" : "keyword" - }, - "transactionIndices" : { - "type" : "keyword" - } - } - } - } - }, - "apm-telemetry" : { - "type" : "object", - "dynamic" : "false" - }, - "app_search_telemetry" : { - "type" : "object", - "dynamic" : "false" - }, - "application_usage_daily" : { - "dynamic" : "false", - "properties" : { - "timestamp" : { - "type" : "date" - } - } - }, - "application_usage_totals" : { - "type" : "object", - "dynamic" : "false" - }, - "application_usage_transactional" : { - "type" : "object", - "dynamic" : "false" - }, - "background-session" : { - "properties" : { - "created" : { - "type" : "date" - }, - "expires" : { - "type" : "date" - }, - "idMapping" : { - "type" : "object", - "enabled" : false - }, - "initialState" : { - "type" : "object", - "enabled" : false - }, - "name" : { - "type" : "keyword" - }, - "restoreState" : { - "type" : "object", - "enabled" : false - }, - "status" : { - "type" : "keyword" - } - } - }, - "book" : { - "properties" : { - "author" : { - "type" : "keyword" - }, - "readIt" : { - "type" : "boolean" - }, - "title" : { - "type" : "keyword" - } - } - }, - "canvas-element" : { - "dynamic" : "false", - "properties" : { - "@created" : { - "type" : "date" - }, - "@timestamp" : { - "type" : "date" - }, - "content" : { - "type" : "text" - }, - "help" : { - "type" : "text" - }, - "image" : { - "type" : "text" - }, - "name" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - } - } - }, - "canvas-workpad" : { - "dynamic" : "false", - "properties" : { - "@created" : { - "type" : "date" - }, - "@timestamp" : { - "type" : "date" - }, - "name" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - } - } - }, - "canvas-workpad-template" : { - "dynamic" : "false", - "properties" : { - "help" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - }, - "name" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - }, - "tags" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - }, - "template_key" : { - "type" : "keyword" - } - } - }, - "cases" : { - "properties" : { - "closed_at" : { - "type" : "date" - }, - "closed_by" : { - "properties" : { - "email" : { - "type" : "keyword" - }, - "full_name" : { - "type" : "keyword" - }, - "username" : { - "type" : "keyword" - } - } - }, - "connector" : { - "properties" : { - "fields" : { - "properties" : { - "key" : { - "type" : "text" - }, - "value" : { - "type" : "text" - } - } - }, - "id" : { - "type" : "keyword" - }, - "name" : { - "type" : "text" - }, - "type" : { - "type" : "keyword" - } - } - }, - "created_at" : { - "type" : "date" - }, - "created_by" : { - "properties" : { - "email" : { - "type" : "keyword" - }, - "full_name" : { - "type" : "keyword" - }, - "username" : { - "type" : "keyword" - } - } - }, - "description" : { - "type" : "text" - }, - "external_service" : { - "properties" : { - "connector_id" : { - "type" : "keyword" - }, - "connector_name" : { - "type" : "keyword" - }, - "external_id" : { - "type" : "keyword" - }, - "external_title" : { - "type" : "text" - }, - "external_url" : { - "type" : "text" - }, - "pushed_at" : { - "type" : "date" - }, - "pushed_by" : { - "properties" : { - "email" : { - "type" : "keyword" - }, - "full_name" : { - "type" : "keyword" - }, - "username" : { - "type" : "keyword" - } - } - } - } - }, - "status" : { - "type" : "keyword" - }, - "tags" : { - "type" : "keyword" - }, - "title" : { - "type" : "keyword" - }, - "updated_at" : { - "type" : "date" - }, - "updated_by" : { - "properties" : { - "email" : { - "type" : "keyword" - }, - "full_name" : { - "type" : "keyword" - }, - "username" : { - "type" : "keyword" - } - } - } - } - }, - "cases-comments" : { - "properties" : { - "alertId" : { - "type" : "keyword" - }, - "comment" : { - "type" : "text" - }, - "created_at" : { - "type" : "date" - }, - "created_by" : { - "properties" : { - "email" : { - "type" : "keyword" - }, - "full_name" : { - "type" : "keyword" - }, - "username" : { - "type" : "keyword" - } - } - }, - "index" : { - "type" : "keyword" - }, - "pushed_at" : { - "type" : "date" - }, - "pushed_by" : { - "properties" : { - "email" : { - "type" : "keyword" - }, - "full_name" : { - "type" : "keyword" - }, - "username" : { - "type" : "keyword" - } - } - }, - "type" : { - "type" : "keyword" - }, - "updated_at" : { - "type" : "date" - }, - "updated_by" : { - "properties" : { - "email" : { - "type" : "keyword" - }, - "full_name" : { - "type" : "keyword" - }, - "username" : { - "type" : "keyword" - } - } - } - } - }, - "cases-configure" : { - "properties" : { - "closure_type" : { - "type" : "keyword" - }, - "connector" : { - "properties" : { - "fields" : { - "properties" : { - "key" : { - "type" : "text" - }, - "value" : { - "type" : "text" - } - } - }, - "id" : { - "type" : "keyword" - }, - "name" : { - "type" : "text" - }, - "type" : { - "type" : "keyword" - } - } - }, - "created_at" : { - "type" : "date" - }, - "created_by" : { - "properties" : { - "email" : { - "type" : "keyword" - }, - "full_name" : { - "type" : "keyword" - }, - "username" : { - "type" : "keyword" - } - } - }, - "updated_at" : { - "type" : "date" - }, - "updated_by" : { - "properties" : { - "email" : { - "type" : "keyword" - }, - "full_name" : { - "type" : "keyword" - }, - "username" : { - "type" : "keyword" - } - } - } - } - }, - "cases-user-actions" : { - "properties" : { - "action" : { - "type" : "keyword" - }, - "action_at" : { - "type" : "date" - }, - "action_by" : { - "properties" : { - "email" : { - "type" : "keyword" - }, - "full_name" : { - "type" : "keyword" - }, - "username" : { - "type" : "keyword" - } - } - }, - "action_field" : { - "type" : "keyword" - }, - "new_value" : { - "type" : "text" - }, - "old_value" : { - "type" : "text" - } - } - }, - "config" : { - "dynamic" : "false", - "properties" : { - "buildNum" : { - "type" : "keyword" - } - } - }, - "dashboard" : { - "properties" : { - "description" : { - "type" : "text" - }, - "hits" : { - "type" : "integer", - "index" : false, - "doc_values" : false - }, - "kibanaSavedObjectMeta" : { - "properties" : { - "searchSourceJSON" : { - "type" : "text", - "index" : false - } - } - }, - "optionsJSON" : { - "type" : "text", - "index" : false - }, - "panelsJSON" : { - "type" : "text", - "index" : false - }, - "refreshInterval" : { - "properties" : { - "display" : { - "type" : "keyword", - "index" : false, - "doc_values" : false - }, - "pause" : { - "type" : "boolean", - "doc_values" : false, - "index" : false - }, - "section" : { - "type" : "integer", - "index" : false, - "doc_values" : false - }, - "value" : { - "type" : "integer", - "index" : false, - "doc_values" : false - } - } - }, - "timeFrom" : { - "type" : "keyword", - "index" : false, - "doc_values" : false - }, - "timeRestore" : { - "type" : "boolean", - "doc_values" : false, - "index" : false - }, - "timeTo" : { - "type" : "keyword", - "index" : false, - "doc_values" : false - }, - "title" : { - "type" : "text" - }, - "version" : { - "type" : "integer" - } - } - }, - "endpoint:user-artifact" : { - "properties" : { - "body" : { - "type" : "binary" - }, - "compressionAlgorithm" : { - "type" : "keyword", - "index" : false - }, - "created" : { - "type" : "date", - "index" : false - }, - "decodedSha256" : { - "type" : "keyword", - "index" : false - }, - "decodedSize" : { - "type" : "long", - "index" : false - }, - "encodedSha256" : { - "type" : "keyword" - }, - "encodedSize" : { - "type" : "long", - "index" : false - }, - "encryptionAlgorithm" : { - "type" : "keyword", - "index" : false - }, - "identifier" : { - "type" : "keyword" - } - } - }, - "endpoint:user-artifact-manifest" : { - "properties" : { - "created" : { - "type" : "date", - "index" : false - }, - "ids" : { - "type" : "keyword", - "index" : false - }, - "schemaVersion" : { - "type" : "keyword" - }, - "semanticVersion" : { - "type" : "keyword", - "index" : false - } - } - }, - "enterprise_search_telemetry" : { - "type" : "object", - "dynamic" : "false" - }, - "epm-packages" : { - "properties" : { - "es_index_patterns" : { - "type" : "object", - "enabled" : false - }, - "install_source" : { - "type" : "keyword" - }, - "install_started_at" : { - "type" : "date" - }, - "install_status" : { - "type" : "keyword" - }, - "install_version" : { - "type" : "keyword" - }, - "installed_es" : { - "type" : "nested", - "properties" : { - "id" : { - "type" : "keyword" - }, - "type" : { - "type" : "keyword" - } - } - }, - "installed_kibana" : { - "type" : "nested", - "properties" : { - "id" : { - "type" : "keyword" - }, - "type" : { - "type" : "keyword" - } - } - }, - "internal" : { - "type" : "boolean" - }, - "name" : { - "type" : "keyword" - }, - "removable" : { - "type" : "boolean" - }, - "version" : { - "type" : "keyword" - } - } - }, - "exception-list" : { - "properties" : { - "_tags" : { - "type" : "keyword" - }, - "comments" : { - "properties" : { - "comment" : { - "type" : "keyword" - }, - "created_at" : { - "type" : "keyword" - }, - "created_by" : { - "type" : "keyword" - }, - "id" : { - "type" : "keyword" - }, - "updated_at" : { - "type" : "keyword" - }, - "updated_by" : { - "type" : "keyword" - } - } - }, - "created_at" : { - "type" : "keyword" - }, - "created_by" : { - "type" : "keyword" - }, - "description" : { - "type" : "keyword" - }, - "entries" : { - "properties" : { - "entries" : { - "properties" : { - "field" : { - "type" : "keyword" - }, - "operator" : { - "type" : "keyword" - }, - "type" : { - "type" : "keyword" - }, - "value" : { - "type" : "keyword", - "fields" : { - "text" : { - "type" : "text" - } - } - } - } - }, - "field" : { - "type" : "keyword" - }, - "list" : { - "properties" : { - "id" : { - "type" : "keyword" - }, - "type" : { - "type" : "keyword" - } - } - }, - "operator" : { - "type" : "keyword" - }, - "type" : { - "type" : "keyword" - }, - "value" : { - "type" : "keyword", - "fields" : { - "text" : { - "type" : "text" - } - } - } - } - }, - "immutable" : { - "type" : "boolean" - }, - "item_id" : { - "type" : "keyword" - }, - "list_id" : { - "type" : "keyword" - }, - "list_type" : { - "type" : "keyword" - }, - "meta" : { - "type" : "keyword" - }, - "name" : { - "type" : "keyword" - }, - "os_types" : { - "type" : "keyword" - }, - "tags" : { - "type" : "keyword" - }, - "tie_breaker_id" : { - "type" : "keyword" - }, - "type" : { - "type" : "keyword" - }, - "updated_by" : { - "type" : "keyword" - }, - "version" : { - "type" : "keyword" - } - } - }, - "exception-list-agnostic" : { - "properties" : { - "_tags" : { - "type" : "keyword" - }, - "comments" : { - "properties" : { - "comment" : { - "type" : "keyword" - }, - "created_at" : { - "type" : "keyword" - }, - "created_by" : { - "type" : "keyword" - }, - "id" : { - "type" : "keyword" - }, - "updated_at" : { - "type" : "keyword" - }, - "updated_by" : { - "type" : "keyword" - } - } - }, - "created_at" : { - "type" : "keyword" - }, - "created_by" : { - "type" : "keyword" - }, - "description" : { - "type" : "keyword" - }, - "entries" : { - "properties" : { - "entries" : { - "properties" : { - "field" : { - "type" : "keyword" - }, - "operator" : { - "type" : "keyword" - }, - "type" : { - "type" : "keyword" - }, - "value" : { - "type" : "keyword", - "fields" : { - "text" : { - "type" : "text" - } - } - } - } - }, - "field" : { - "type" : "keyword" - }, - "list" : { - "properties" : { - "id" : { - "type" : "keyword" - }, - "type" : { - "type" : "keyword" - } - } - }, - "operator" : { - "type" : "keyword" - }, - "type" : { - "type" : "keyword" - }, - "value" : { - "type" : "keyword", - "fields" : { - "text" : { - "type" : "text" - } - } - } - } - }, - "immutable" : { - "type" : "boolean" - }, - "item_id" : { - "type" : "keyword" - }, - "list_id" : { - "type" : "keyword" - }, - "list_type" : { - "type" : "keyword" - }, - "meta" : { - "type" : "keyword" - }, - "name" : { - "type" : "keyword" - }, - "os_types" : { - "type" : "keyword" - }, - "tags" : { - "type" : "keyword" - }, - "tie_breaker_id" : { - "type" : "keyword" - }, - "type" : { - "type" : "keyword" - }, - "updated_by" : { - "type" : "keyword" - }, - "version" : { - "type" : "keyword" - } - } - }, - "file-upload-telemetry" : { - "properties" : { - "filesUploadedTotalCount" : { - "type" : "long" - } - } - }, - "fleet-agent-actions" : { - "properties" : { - "ack_data" : { - "type" : "text" - }, - "agent_id" : { - "type" : "keyword" - }, - "created_at" : { - "type" : "date" - }, - "data" : { - "type" : "binary" - }, - "policy_id" : { - "type" : "keyword" - }, - "policy_revision" : { - "type" : "integer" - }, - "sent_at" : { - "type" : "date" - }, - "type" : { - "type" : "keyword" - } - } - }, - "fleet-agent-events" : { - "properties" : { - "action_id" : { - "type" : "keyword" - }, - "agent_id" : { - "type" : "keyword" - }, - "data" : { - "type" : "text" - }, - "message" : { - "type" : "text" - }, - "payload" : { - "type" : "text" - }, - "policy_id" : { - "type" : "keyword" - }, - "stream_id" : { - "type" : "keyword" - }, - "subtype" : { - "type" : "keyword" - }, - "timestamp" : { - "type" : "date" - }, - "type" : { - "type" : "keyword" - } - } - }, - "fleet-agents" : { - "properties" : { - "access_api_key_id" : { - "type" : "keyword" - }, - "active" : { - "type" : "boolean" - }, - "current_error_events" : { - "type" : "text", - "index" : false - }, - "default_api_key" : { - "type" : "binary" - }, - "default_api_key_id" : { - "type" : "keyword" - }, - "enrolled_at" : { - "type" : "date" - }, - "last_checkin" : { - "type" : "date" - }, - "last_checkin_status" : { - "type" : "keyword" - }, - "last_updated" : { - "type" : "date" - }, - "local_metadata" : { - "type" : "flattened" - }, - "packages" : { - "type" : "keyword" - }, - "policy_id" : { - "type" : "keyword" - }, - "policy_revision" : { - "type" : "integer" - }, - "shared_id" : { - "type" : "keyword" - }, - "type" : { - "type" : "keyword" - }, - "unenrolled_at" : { - "type" : "date" - }, - "unenrollment_started_at" : { - "type" : "date" - }, - "updated_at" : { - "type" : "date" - }, - "upgrade_started_at" : { - "type" : "date" - }, - "upgraded_at" : { - "type" : "date" - }, - "user_provided_metadata" : { - "type" : "flattened" - }, - "version" : { - "type" : "keyword" - } - } - }, - "fleet-enrollment-api-keys" : { - "properties" : { - "active" : { - "type" : "boolean" - }, - "api_key" : { - "type" : "binary" - }, - "api_key_id" : { - "type" : "keyword" - }, - "created_at" : { - "type" : "date" - }, - "expire_at" : { - "type" : "date" - }, - "name" : { - "type" : "keyword" - }, - "policy_id" : { - "type" : "keyword" - }, - "type" : { - "type" : "keyword" - }, - "updated_at" : { - "type" : "date" - } - } - }, - "graph-workspace" : { - "properties" : { - "description" : { - "type" : "text" - }, - "kibanaSavedObjectMeta" : { - "properties" : { - "searchSourceJSON" : { - "type" : "text" - } - } - }, - "numLinks" : { - "type" : "integer" - }, - "numVertices" : { - "type" : "integer" - }, - "title" : { - "type" : "text" - }, - "version" : { - "type" : "integer" - }, - "wsState" : { - "type" : "text" - } - } - }, - "index-pattern" : { - "dynamic" : "false", - "properties" : { - "title" : { - "type" : "text" - }, - "type" : { - "type" : "keyword" - } - } - }, - "infrastructure-ui-source" : { - "type" : "object", - "dynamic" : "false" - }, - "ingest-agent-policies" : { - "properties" : { - "description" : { - "type" : "text" - }, - "is_default" : { - "type" : "boolean" - }, - "monitoring_enabled" : { - "type" : "keyword", - "index" : false - }, - "name" : { - "type" : "keyword" - }, - "namespace" : { - "type" : "keyword" - }, - "package_policies" : { - "type" : "keyword" - }, - "revision" : { - "type" : "integer" - }, - "status" : { - "type" : "keyword" - }, - "updated_at" : { - "type" : "date" - }, - "updated_by" : { - "type" : "keyword" - } - } - }, - "ingest-outputs" : { - "properties" : { - "ca_sha256" : { - "type" : "keyword", - "index" : false - }, - "config" : { - "type" : "flattened" - }, - "config_yaml" : { - "type" : "text" - }, - "fleet_enroll_password" : { - "type" : "binary" - }, - "fleet_enroll_username" : { - "type" : "binary" - }, - "hosts" : { - "type" : "keyword" - }, - "is_default" : { - "type" : "boolean" - }, - "name" : { - "type" : "keyword" - }, - "type" : { - "type" : "keyword" - } - } - }, - "ingest-package-policies" : { - "properties" : { - "created_at" : { - "type" : "date" - }, - "created_by" : { - "type" : "keyword" - }, - "description" : { - "type" : "text" - }, - "enabled" : { - "type" : "boolean" - }, - "inputs" : { - "type" : "nested", - "enabled" : false, - "properties" : { - "config" : { - "type" : "flattened" - }, - "enabled" : { - "type" : "boolean" - }, - "streams" : { - "type" : "nested", - "properties" : { - "compiled_stream" : { - "type" : "flattened" - }, - "config" : { - "type" : "flattened" - }, - "data_stream" : { - "properties" : { - "dataset" : { - "type" : "keyword" - }, - "type" : { - "type" : "keyword" - } - } - }, - "enabled" : { - "type" : "boolean" - }, - "id" : { - "type" : "keyword" - }, - "vars" : { - "type" : "flattened" - } - } - }, - "type" : { - "type" : "keyword" - }, - "vars" : { - "type" : "flattened" - } - } - }, - "name" : { - "type" : "keyword" - }, - "namespace" : { - "type" : "keyword" - }, - "output_id" : { - "type" : "keyword" - }, - "package" : { - "properties" : { - "name" : { - "type" : "keyword" - }, - "title" : { - "type" : "keyword" - }, - "version" : { - "type" : "keyword" - } - } - }, - "policy_id" : { - "type" : "keyword" - }, - "revision" : { - "type" : "integer" - }, - "updated_at" : { - "type" : "date" - }, - "updated_by" : { - "type" : "keyword" - } - } - }, - "ingest_manager_settings" : { - "properties" : { - "agent_auto_upgrade" : { - "type" : "keyword" - }, - "has_seen_add_data_notice" : { - "type" : "boolean", - "index" : false - }, - "kibana_ca_sha256" : { - "type" : "keyword" - }, - "kibana_urls" : { - "type" : "keyword" - }, - "package_auto_upgrade" : { - "type" : "keyword" - } - } - }, - "inventory-view" : { - "type" : "object", - "dynamic" : "false" - }, - "kql-telemetry" : { - "properties" : { - "optInCount" : { - "type" : "long" - }, - "optOutCount" : { - "type" : "long" - } - } - }, - "lens" : { - "properties" : { - "description" : { - "type" : "text" - }, - "expression" : { - "type" : "keyword", - "index" : false, - "doc_values" : false - }, - "state" : { - "type" : "flattened" - }, - "title" : { - "type" : "text" - }, - "visualizationType" : { - "type" : "keyword" - } - } - }, - "lens-ui-telemetry" : { - "properties" : { - "count" : { - "type" : "integer" - }, - "date" : { - "type" : "date" - }, - "name" : { - "type" : "keyword" - }, - "type" : { - "type" : "keyword" - } - } - }, - "map" : { - "properties" : { - "description" : { - "type" : "text" - }, - "layerListJSON" : { - "type" : "text" - }, - "mapStateJSON" : { - "type" : "text" - }, - "title" : { - "type" : "text" - }, - "uiStateJSON" : { - "type" : "text" - }, - "version" : { - "type" : "integer" - } - } - }, - "maps-telemetry" : { - "type" : "object", - "enabled" : false - }, - "metrics-explorer-view" : { - "type" : "object", - "dynamic" : "false" - }, - "migrationVersion" : { - "dynamic" : "true", - "properties" : { - "config" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "space" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - } - } - }, - "ml-job" : { - "properties" : { - "datafeed_id" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - }, - "job_id" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - }, - "type" : { - "type" : "keyword" - } - } - }, - "ml-telemetry" : { - "properties" : { - "file_data_visualizer" : { - "properties" : { - "index_creation_count" : { - "type" : "long" - } - } - } - } - }, - "monitoring-telemetry" : { - "properties" : { - "reportedClusterUuids" : { - "type" : "keyword" - } - } - }, - "namespace" : { - "type" : "keyword" - }, - "namespaces" : { - "type" : "keyword" - }, - "originId" : { - "type" : "keyword" - }, - "query" : { - "properties" : { - "description" : { - "type" : "text" - }, - "filters" : { - "type" : "object", - "enabled" : false - }, - "query" : { - "properties" : { - "language" : { - "type" : "keyword" - }, - "query" : { - "type" : "keyword", - "index" : false - } - } - }, - "timefilter" : { - "type" : "object", - "enabled" : false - }, - "title" : { - "type" : "text" - } - } - }, - "references" : { - "type" : "nested", - "properties" : { - "id" : { - "type" : "keyword" - }, - "name" : { - "type" : "keyword" - }, - "type" : { - "type" : "keyword" - } - } - }, - "sample-data-telemetry" : { - "properties" : { - "installCount" : { - "type" : "long" - }, - "unInstallCount" : { - "type" : "long" - } - } - }, - "search" : { - "properties" : { - "columns" : { - "type" : "keyword", - "index" : false, - "doc_values" : false - }, - "description" : { - "type" : "text" - }, - "hits" : { - "type" : "integer", - "index" : false, - "doc_values" : false - }, - "kibanaSavedObjectMeta" : { - "properties" : { - "searchSourceJSON" : { - "type" : "text", - "index" : false - } - } - }, - "sort" : { - "type" : "keyword", - "index" : false, - "doc_values" : false - }, - "title" : { - "type" : "text" - }, - "version" : { - "type" : "integer" - } - } - }, - "search-telemetry" : { - "type" : "object", - "dynamic" : "false" - }, - "siem-detection-engine-rule-actions" : { - "properties" : { - "actions" : { - "properties" : { - "action_type_id" : { - "type" : "keyword" - }, - "group" : { - "type" : "keyword" - }, - "id" : { - "type" : "keyword" - }, - "params" : { - "type" : "object", - "enabled" : false - } - } - }, - "alertThrottle" : { - "type" : "keyword" - }, - "ruleAlertId" : { - "type" : "keyword" - }, - "ruleThrottle" : { - "type" : "keyword" - } - } - }, - "siem-detection-engine-rule-status" : { - "properties" : { - "alertId" : { - "type" : "keyword" - }, - "bulkCreateTimeDurations" : { - "type" : "float" - }, - "gap" : { - "type" : "text" - }, - "lastFailureAt" : { - "type" : "date" - }, - "lastFailureMessage" : { - "type" : "text" - }, - "lastLookBackDate" : { - "type" : "date" - }, - "lastSuccessAt" : { - "type" : "date" - }, - "lastSuccessMessage" : { - "type" : "text" - }, - "searchAfterTimeDurations" : { - "type" : "float" - }, - "status" : { - "type" : "keyword" - }, - "statusDate" : { - "type" : "date" - } - } - }, - "siem-ui-timeline" : { - "properties" : { - "columns" : { - "properties" : { - "aggregatable" : { - "type" : "boolean" - }, - "category" : { - "type" : "keyword" - }, - "columnHeaderType" : { - "type" : "keyword" - }, - "description" : { - "type" : "text" - }, - "example" : { - "type" : "text" - }, - "id" : { - "type" : "keyword" - }, - "indexes" : { - "type" : "keyword" - }, - "name" : { - "type" : "text" - }, - "placeholder" : { - "type" : "text" - }, - "searchable" : { - "type" : "boolean" - }, - "type" : { - "type" : "keyword" - } - } - }, - "created" : { - "type" : "date" - }, - "createdBy" : { - "type" : "text" - }, - "dataProviders" : { - "properties" : { - "and" : { - "properties" : { - "enabled" : { - "type" : "boolean" - }, - "excluded" : { - "type" : "boolean" - }, - "id" : { - "type" : "keyword" - }, - "kqlQuery" : { - "type" : "text" - }, - "name" : { - "type" : "text" - }, - "queryMatch" : { - "properties" : { - "displayField" : { - "type" : "text" - }, - "displayValue" : { - "type" : "text" - }, - "field" : { - "type" : "text" - }, - "operator" : { - "type" : "text" - }, - "value" : { - "type" : "text" - } - } - }, - "type" : { - "type" : "text" - } - } - }, - "enabled" : { - "type" : "boolean" - }, - "excluded" : { - "type" : "boolean" - }, - "id" : { - "type" : "keyword" - }, - "kqlQuery" : { - "type" : "text" - }, - "name" : { - "type" : "text" - }, - "queryMatch" : { - "properties" : { - "displayField" : { - "type" : "text" - }, - "displayValue" : { - "type" : "text" - }, - "field" : { - "type" : "text" - }, - "operator" : { - "type" : "text" - }, - "value" : { - "type" : "text" - } - } - }, - "type" : { - "type" : "text" - } - } - }, - "dateRange" : { - "properties" : { - "end" : { - "type" : "date" - }, - "start" : { - "type" : "date" - } - } - }, - "description" : { - "type" : "text" - }, - "eventType" : { - "type" : "keyword" - }, - "excludedRowRendererIds" : { - "type" : "text" - }, - "favorite" : { - "properties" : { - "favoriteDate" : { - "type" : "date" - }, - "fullName" : { - "type" : "text" - }, - "keySearch" : { - "type" : "text" - }, - "userName" : { - "type" : "text" - } - } - }, - "filters" : { - "properties" : { - "exists" : { - "type" : "text" - }, - "match_all" : { - "type" : "text" - }, - "meta" : { - "properties" : { - "alias" : { - "type" : "text" - }, - "controlledBy" : { - "type" : "text" - }, - "disabled" : { - "type" : "boolean" - }, - "field" : { - "type" : "text" - }, - "formattedValue" : { - "type" : "text" - }, - "index" : { - "type" : "keyword" - }, - "key" : { - "type" : "keyword" - }, - "negate" : { - "type" : "boolean" - }, - "params" : { - "type" : "text" - }, - "type" : { - "type" : "keyword" - }, - "value" : { - "type" : "text" - } - } - }, - "missing" : { - "type" : "text" - }, - "query" : { - "type" : "text" - }, - "range" : { - "type" : "text" - }, - "script" : { - "type" : "text" - } - } - }, - "indexNames" : { - "type" : "text" - }, - "kqlMode" : { - "type" : "keyword" - }, - "kqlQuery" : { - "properties" : { - "filterQuery" : { - "properties" : { - "kuery" : { - "properties" : { - "expression" : { - "type" : "text" - }, - "kind" : { - "type" : "keyword" - } - } - }, - "serializedQuery" : { - "type" : "text" - } - } - } - } - }, - "savedQueryId" : { - "type" : "keyword" - }, - "sort" : { - "properties" : { - "columnId" : { - "type" : "keyword" - }, - "sortDirection" : { - "type" : "keyword" - } - } - }, - "status" : { - "type" : "keyword" - }, - "templateTimelineId" : { - "type" : "text" - }, - "templateTimelineVersion" : { - "type" : "integer" - }, - "timelineType" : { - "type" : "keyword" - }, - "title" : { - "type" : "text" - }, - "updated" : { - "type" : "date" - }, - "updatedBy" : { - "type" : "text" - } - } - }, - "siem-ui-timeline-note" : { - "properties" : { - "created" : { - "type" : "date" - }, - "createdBy" : { - "type" : "text" - }, - "eventId" : { - "type" : "keyword" - }, - "note" : { - "type" : "text" - }, - "timelineId" : { - "type" : "keyword" - }, - "updated" : { - "type" : "date" - }, - "updatedBy" : { - "type" : "text" - } - } - }, - "siem-ui-timeline-pinned-event" : { - "properties" : { - "created" : { - "type" : "date" - }, - "createdBy" : { - "type" : "text" - }, - "eventId" : { - "type" : "keyword" - }, - "timelineId" : { - "type" : "keyword" - }, - "updated" : { - "type" : "date" - }, - "updatedBy" : { - "type" : "text" - } - } - }, - "space" : { - "properties" : { - "_reserved" : { - "type" : "boolean" - }, - "color" : { - "type" : "keyword" - }, - "description" : { - "type" : "text" - }, - "disabledFeatures" : { - "type" : "keyword" - }, - "imageUrl" : { - "type" : "text", - "index" : false - }, - "initials" : { - "type" : "keyword" - }, - "name" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 2048 - } - } - } - } - }, - "tag" : { - "properties" : { - "color" : { - "type" : "text" - }, - "description" : { - "type" : "text" - }, - "name" : { - "type" : "text" - } - } - }, - "telemetry" : { - "properties" : { - "allowChangingOptInStatus" : { - "type" : "boolean" - }, - "enabled" : { - "type" : "boolean" - }, - "lastReported" : { - "type" : "date" - }, - "lastVersionChecked" : { - "type" : "keyword" - }, - "reportFailureCount" : { - "type" : "integer" - }, - "reportFailureVersion" : { - "type" : "keyword" - }, - "sendUsageFrom" : { - "type" : "keyword" - }, - "userHasSeenNotice" : { - "type" : "boolean" - } - } - }, - "timelion-sheet" : { - "properties" : { - "description" : { - "type" : "text" - }, - "hits" : { - "type" : "integer" - }, - "kibanaSavedObjectMeta" : { - "properties" : { - "searchSourceJSON" : { - "type" : "text" - } - } - }, - "timelion_chart_height" : { - "type" : "integer" - }, - "timelion_columns" : { - "type" : "integer" - }, - "timelion_interval" : { - "type" : "keyword" - }, - "timelion_other_interval" : { - "type" : "keyword" - }, - "timelion_rows" : { - "type" : "integer" - }, - "timelion_sheet" : { - "type" : "text" - }, - "title" : { - "type" : "text" - }, - "version" : { - "type" : "integer" - } - } - }, - "todo" : { - "properties" : { - "icon" : { - "type" : "keyword" - }, - "task" : { - "type" : "text" - }, - "title" : { - "type" : "keyword" - } - } - }, - "tsvb-validation-telemetry" : { - "properties" : { - "failedRequests" : { - "type" : "long" - } - } - }, - "type" : { - "type" : "keyword" - }, - "ui-metric" : { - "properties" : { - "count" : { - "type" : "integer" - } - } - }, - "updated_at" : { - "type" : "date" - }, - "upgrade-assistant-reindex-operation" : { - "properties" : { - "errorMessage" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "indexName" : { - "type" : "keyword" - }, - "lastCompletedStep" : { - "type" : "long" - }, - "locked" : { - "type" : "date" - }, - "newIndexName" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "reindexOptions" : { - "properties" : { - "openAndClose" : { - "type" : "boolean" - }, - "queueSettings" : { - "properties" : { - "queuedAt" : { - "type" : "long" - }, - "startedAt" : { - "type" : "long" - } - } - } - } - }, - "reindexTaskId" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "reindexTaskPercComplete" : { - "type" : "float" - }, - "runningReindexCount" : { - "type" : "integer" - }, - "status" : { - "type" : "integer" - } - } - }, - "upgrade-assistant-telemetry" : { - "properties" : { - "features" : { - "properties" : { - "deprecation_logging" : { - "properties" : { - "enabled" : { - "type" : "boolean", - "null_value" : true - } - } - } - } - }, - "ui_open" : { - "properties" : { - "cluster" : { - "type" : "long", - "null_value" : 0 - }, - "indices" : { - "type" : "long", - "null_value" : 0 - }, - "overview" : { - "type" : "long", - "null_value" : 0 - } - } - }, - "ui_reindex" : { - "properties" : { - "close" : { - "type" : "long", - "null_value" : 0 - }, - "open" : { - "type" : "long", - "null_value" : 0 - }, - "start" : { - "type" : "long", - "null_value" : 0 - }, - "stop" : { - "type" : "long", - "null_value" : 0 - } - } - } - } - }, - "uptime-dynamic-settings" : { - "type" : "object", - "dynamic" : "false" - }, - "url" : { - "properties" : { - "accessCount" : { - "type" : "long" - }, - "accessDate" : { - "type" : "date" - }, - "createDate" : { - "type" : "date" - }, - "url" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 2048 - } - } - } - } - }, - "visualization" : { - "properties" : { - "description" : { - "type" : "text" - }, - "kibanaSavedObjectMeta" : { - "properties" : { - "searchSourceJSON" : { - "type" : "text", - "index" : false - } - } - }, - "savedSearchRefName" : { - "type" : "keyword", - "index" : false, - "doc_values" : false - }, - "title" : { - "type" : "text" - }, - "uiStateJSON" : { - "type" : "text", - "index" : false - }, - "version" : { - "type" : "integer" - }, - "visState" : { - "type" : "text", - "index" : false - } - } - }, - "workplace_search_telemetry" : { - "type" : "object", - "dynamic" : "false" - } - } - }` diff --git a/dev-tools/integration/wait-for-elasticsearch.sh b/dev-tools/integration/wait-for-elasticsearch.sh index ec9d251f7..509f1cad7 100755 --- a/dev-tools/integration/wait-for-elasticsearch.sh +++ b/dev-tools/integration/wait-for-elasticsearch.sh @@ -30,9 +30,9 @@ until [ "$health" = 'green' ]; do health="$(curl -fsSL "$host/_cat/health?h=status")" echo $health health="$(echo "$health" | tr -d '[:space:]')" - >&2 echo "Elastic Search is unavailable - sleeping" + >&2 echo "Elasticsearch is unavailable - sleeping" sleep 1 done ->&2 echo "Elastic Search is up" +>&2 echo "Elasticsearch is up" exec $cmd \ No newline at end of file diff --git a/dev-tools/rpm/nfpm.yaml b/dev-tools/rpm/nfpm.yaml deleted file mode 100644 index dc9fb62eb..000000000 --- a/dev-tools/rpm/nfpm.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# nfpm example config file -# env APP_RELEASE=11.1 APP_VERSION=1.2.3 ../nfpm/nfpm -f ./build/rpm/nfpm.yaml pkg --target evmon.rpm -name: "fleet" -arch: "x86_64" -platform: "linux" -version: "v${APP_VERSION}" -release: "${APP_RELEASE}" -section: "default" -maintainer: "devops@endgame.com" -description: Elastic Fleet -vendor: "Elastic NV" -homepage: "http://www.elastic.co/" -contents: - - src: ./bin/fleet - dst: /usr/bin/fleet - - - src: ./systemd/fleet.service - dst: /usr/lib/systemd/system/fleet.service - type: config - - - src: ./fleet-server.yml - dst: /usr/share/fleet/fleet-server.yml - type: config - diff --git a/example/fleet-server-100.yml b/example/fleet-server-100.yml new file mode 100644 index 000000000..f6a1ad708 --- /dev/null +++ b/example/fleet-server-100.yml @@ -0,0 +1,50 @@ +# This sample configuration file demonstrates tweaks to limit the resource usage +# of a very small (100 agent) installation. Target is 1 CPU, 50MiB RAM. + +output: + elasticsearch: + hosts: '${ELASTICSEARCH_HOSTS:localhost:9200}' + username: '${ELASTICSEARCH_USERNAME:elastic}' + password: '${ELASTICSEARCH_PASSWORD:changeme}' + +fleet: + agent: + id: 1e4954ce-af37-4731-9f4a-407b08e69e42 # Normally provided by the agent; stubbed here. + +inputs: + - cache: + num_counters: 2000 # Limit the size of the hash table to rougly 10x expected number of elements + max_cost: 2097152 # Limit the total size of data allowed in the cache, 2 MiB in bytes. + server: + limits: + policy_throttle: 200ms # Roll out a new policy every 200ms; roughly 5 per second. + max_connections: 200 # Hard limit on the number of connections accepted; defends TLS connection flood. + checkin_limit: + interval: 50ms # Check in no faster than 20 per second. + burst: 25 # Allow burst up to 25, then fall back to interval rate. + max: 100 # No more than 100 long polls allowed. THIS EFFECTIVELY LIMITS MAX ENDPOINTS. + artifact_limit: + interval: 100ms # Roll out 10 artifacts per second + burst: 10 # Small burst prevents outbound buffer explosion. + max: 10 # Only 10 transactions at a time max. This should generally not be a relavent limitation as the transactions are cached. + ack_limit: + interval: 10ms # Allow ACK only 100 per second. ACK payload is unbounded in RAM so need to limit. + burst: 20 # Allow burst up to 20, then fall back to interrval rate. + max: 20 # Cannot have too many processing at once due to unbounded payload size. + enroll_limit: + interval: 100ms # Enroll is both CPU and RAM intensive. Limit to 10 per second. + burst: 5 # Allow intial burst, but limit to max. + max: 10 # Max limit. + ssl: + enabled: true + key: /path/to/key.pem # To support TLS, server needs cert, key pair + certificate: /path/to/cert.pem + bulk: + flush_max_pending: 8 # Limit the number of pending ES bulk operations + flush_interval: 100ms # Flush ES bulk queues on this interval. + runtime: + gc_percent: 20 # Force the GC to execute more frequently: see https://golang.org/pkg/runtime/debug/#SetGCPercent + + +http: + enabled: true # Enable metrics on http://localhost:5066/stats \ No newline at end of file diff --git a/fleet-server.yml b/fleet-server.yml index bd1d94944..44bc1c368 100644 --- a/fleet-server.yml +++ b/fleet-server.yml @@ -3,9 +3,59 @@ output: hosts: '${ELASTICSEARCH_HOSTS:localhost:9200}' username: '${ELASTICSEARCH_USERNAME:elastic}' password: '${ELASTICSEARCH_PASSWORD:changeme}' + #service_token: 'token' # comment out username/password when this is set fleet: agent: id: 1e4954ce-af37-4731-9f4a-407b08e69e42 logging: - level: '${LOG_LEVEL:INFO}' + level: '${LOG_LEVEL:DEBUG}' + +# Input config provided by the Elastic Agent for the server +#inputs: +# - type: fleet-server +# server: +# host: localhost +# port: 8220 +# timeouts: +# checkin_long_poll: 300s # long poll timeout +# profiler: +# enabled: true # enable profiler +# limits: +# policy_throttle: 100ms +# max_connetions: 150 +# checkin_limit: +# interval: 100ms +# burst: 25 +# max: 100 +# artifact_limit: +# interval: 10ms +# burst: 5 +# max: 10 +# ack_limit: +# interval: 10ms +# burst: 20 +# max: 10 +# enroll_limit: +# interval: 50ms +# burst: 10 +# max: 8 +# ssl: +# enabled: true +# certificate: /creds/cert.pem +# key: /creds/key.pem +# cache: +# num_counters: 500000 # 10x times expected count +# max_cost: 50 * 1024 * 1024 # 50MiB cache size + +logging: + to_stderr: true # Force the logging output to stderr + #level: trace + +# Enables the stats endpoint under http://localhost:5601 by default. +# Additional stats can be found under http://127.0.0.1:5066/stats and http://127.0.0.1:5066/state +http.enabled: true +#http.host: http://127.0.0.1 +#http.port: 5601 +#http.named_pipe.user: +#http.named_pipe.security_descriptor: \ No newline at end of file diff --git a/go.mod b/go.mod index d570bf3d6..8697744ef 100644 --- a/go.mod +++ b/go.mod @@ -1,24 +1,29 @@ module github.com/elastic/fleet-server/v7 -go 1.14 +go 1.16 require ( - github.com/aleksmaus/generate v0.0.0-20201213151810-c5bc68a6a42f - github.com/dgraph-io/ristretto v0.0.3 - github.com/elastic/beats/v7 v7.10.0 - github.com/elastic/go-elasticsearch/v8 v8.0.0-20200728144331-527225d8e836 + github.com/Pallinder/go-randomdata v1.2.0 + github.com/dgraph-io/ristretto v0.1.0 + github.com/elastic/beats/v7 v7.11.1 + github.com/elastic/elastic-agent-client/v7 v7.0.0-20210727140539-f0905d9377f6 + github.com/elastic/go-elasticsearch/v7 v7.5.1-0.20210823155509-845c8efe54a7 github.com/elastic/go-ucfg v0.8.3 github.com/gofrs/uuid v3.3.0+incompatible github.com/google/go-cmp v0.4.0 github.com/hashicorp/go-cleanhttp v0.5.1 + github.com/hashicorp/go-version v1.3.0 github.com/hashicorp/golang-lru v0.5.2-0.20190520140433-59383c442f7d github.com/julienschmidt/httprouter v1.3.0 - github.com/mitchellh/mapstructure v1.3.3 + github.com/mailru/easyjson v0.7.7 + github.com/miolini/datacounter v1.0.2 + github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 + github.com/pkg/errors v0.9.1 github.com/rs/xid v1.2.1 github.com/rs/zerolog v1.19.0 github.com/spf13/cobra v0.0.5 github.com/stretchr/testify v1.6.1 - golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 + go.uber.org/zap v1.14.0 golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e ) diff --git a/go.sum b/go.sum index 2275a17e8..583e177b2 100644 --- a/go.sum +++ b/go.sum @@ -1,4 +1,6 @@ +4d63.com/embedfiles v0.0.0-20190311033909-995e0740726f h1:oyYjGRBNq1TxAIG8aHqtxlvqUfzdZf+MbcRb/oweNfY= 4d63.com/embedfiles v0.0.0-20190311033909-995e0740726f/go.mod h1:HxEsUxoVZyRxsZML/S6e2xAuieFMlGO0756ncWx1aXE= +4d63.com/tz v1.1.1-0.20191124060701-6d37baae851b h1:+TO4EgK74+Qo/ilRDiF2WpY09Jk9VSJSLe3wEn+dJBw= 4d63.com/tz v1.1.1-0.20191124060701-6d37baae851b/go.mod h1:SHGqVdL7hd2ZaX2T9uEiOZ/OFAUfCCLURdLPJsd8ZNs= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= @@ -7,158 +9,270 @@ cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6A cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.51.0 h1:PvKAVQWCtlGUSlZkGW3QLelKaWq7KYv/MW1EboG8bfM= cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= +cloud.google.com/go/bigquery v1.0.1 h1:hL+ycaJpVE9M7nLoiXb/Pn10ENE2u+oddxbD8uu0ZVU= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/datastore v1.0.0 h1:Kt+gOPPp2LEPWp8CSfxhsM8ik9CcyE/gYu+0r+RnZvM= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/pubsub v1.0.1 h1:W9tAK3E57P75u0XLLR82LZyw8VpAnhmyTOxW9qzmyj8= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/storage v1.0.0 h1:VV2nUM3wwLLGh9lSABFgZMjInyUbJeaRSE64WuAIQ+4= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +code.cloudfoundry.org/go-diodes v0.0.0-20190809170250-f77fb823c7ee h1:iAAPf9s7/+BIiGf+RjgcXLm3NoZaLIJsBXJuUa63Lx8= code.cloudfoundry.org/go-diodes v0.0.0-20190809170250-f77fb823c7ee/go.mod h1:Jzi+ccHgo/V/PLQUaQ6hnZcC1c4BS790gx21LRRui4g= +code.cloudfoundry.org/go-loggregator v7.4.0+incompatible h1:KqZYloMQWM5Zg/BQKunOIA4OODh7djZbk48qqbowNFI= code.cloudfoundry.org/go-loggregator v7.4.0+incompatible/go.mod h1:KPBTRqj+y738Nhf1+g4JHFaBU8j7dedirR5ETNHvMXU= +code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f h1:UrKzEwTgeiff9vxdrfdqxibzpWjxLnuXDI5m6z3GJAk= code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f/go.mod h1:sk5LnIjB/nIEU7yP5sDQExVm62wu0pBh3yrElngUisI= +code.cloudfoundry.org/rfc5424 v0.0.0-20180905210152-236a6d29298a h1:8rqv2w8xEceNwckcF5ONeRt0qBHlh5bnNfFnYTrZbxs= code.cloudfoundry.org/rfc5424 v0.0.0-20180905210152-236a6d29298a/go.mod h1:tkZo8GtzBjySJ7USvxm4E36lNQw1D3xM6oKHGqdaAJ4= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9 h1:VpgP7xuJadIUuKccphEpTJnWhS2jkQyMt6Y7pJCD7fY= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-amqp-common-go/v3 v3.0.0 h1:j9tjcwhypb/jek3raNrwlCIl7iKQYOug7CLpSyBBodc= github.com/Azure/azure-amqp-common-go/v3 v3.0.0/go.mod h1:SY08giD/XbhTz07tJdpw1SoxQXHPN30+DI3Z04SYqyg= +github.com/Azure/azure-event-hubs-go/v3 v3.1.2 h1:S/NjCZ1Z2R4rHJd2Hbbad6rIhxJ4lZZebKTsKHweX4A= github.com/Azure/azure-event-hubs-go/v3 v3.1.2/go.mod h1:hR40byNJjKkS74+3RhloPQ8sJ8zFQeJ920Uk3oYY0+k= github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= github.com/Azure/azure-pipeline-go v0.1.9/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= +github.com/Azure/azure-pipeline-go v0.2.1 h1:OLBdZJ3yvOn2MezlWvbrBMTEUQC72zAftRZOMdj5HYo= github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= +github.com/Azure/azure-sdk-for-go v37.1.0+incompatible h1:aFlw3lP7ZHQi4m1kWCpcwYtczhDkGhDoRaMTaxcOf68= github.com/Azure/azure-sdk-for-go v37.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-storage-blob-go v0.6.0/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y= +github.com/Azure/azure-storage-blob-go v0.8.0 h1:53qhf0Oxa0nOjgbDeeYPUeyiNmafAFEY95rZLK0Tj6o= github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= +github.com/Azure/go-amqp v0.12.6 h1:34yItuwhA/nusvq2sPSNPQxZLCf/CtaogYH8n578mnY= github.com/Azure/go-amqp v0.12.6/go.mod h1:qApuH6OFTSKZFmCOxccvAv5rLizBQf4v8pRmG138DPo= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= -github.com/Azure/go-autorest/autorest v0.9.4/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= +github.com/Azure/go-autorest/autorest v0.9.6 h1:5YWtOnckcudzIw8lPPBcWOnmIFWMtHci1ZWAZulMSx0= +github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.8.2 h1:O1X4oexUxnZCaEUGsvMnr8ZGj8HI37tNezwY4npRqA0= +github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 h1:iM6UAvjR97ZIeR93qTcwpKNMpV+/FTWjwEbuPD495Tk= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.1 h1:LXl088ZQlP0SBppGFsRZonW6hSvwgL5gRByMbvUbx8U= github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/autorest/to v0.3.0 h1:zebkZaadz7+wIQYgC7GXaz3Wb28yKYfVkkBKwc38VF8= github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= +github.com/Azure/go-autorest/autorest/validation v0.2.0 h1:15vMO4y76dehZSq7pAaOLQxC6dZYsSrj2GQpflyM/L4= github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802 h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 h1:ygIc8M6trr62pF5DucadTWGdEB4mEyvzi0e2nbcmcyA= github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46 h1:lsxEuwrXEAokXB9qhlbKWPpo3KMLZQ5WB5WLQRW1uq0= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/Pallinder/go-randomdata v1.2.0 h1:DZ41wBchNRb/0GfsePLiSwb0PHZmT67XY00lCDlaYPg= +github.com/Pallinder/go-randomdata v1.2.0/go.mod h1:yHmJgulpD2Nfrm0cR9tI/+oAgRqCQQixsA8HyRZfV9Y= +github.com/PuerkitoBio/purell v1.0.0 h1:0GoNN3taZV6QI81IXgCbxMyEaJDXMSIjArYBCYzVVvs= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2 h1:JCHLVE3B+kJde7bIEo5N4J+ZbLhp0J1Fs+ulyRws4gE= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/StackExchange/wmi v0.0.0-20170221213301-9f32b5905fd6 h1:2Gl9Tray0NEjP9KC0FjdGWlszbmTIsBP3JYzgyFdL4E= github.com/StackExchange/wmi v0.0.0-20170221213301-9f32b5905fd6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/aerospike/aerospike-client-go v1.27.1-0.20170612174108-0f3b54da6bdc h1:9iW/Fbn/R/nyUOiqo6AgwBe8uirqUIoTGF3vKG8qjoc= github.com/aerospike/aerospike-client-go v1.27.1-0.20170612174108-0f3b54da6bdc/go.mod h1:zj8LBEnWBDOVEIJt8LvaRvDG5ARAoa5dBeHaB472NRc= +github.com/akavel/rsrc v0.8.0 h1:zjWn7ukO9Kc5Q62DOJCcxGpXC18RawVtYAGdz2aLlfw= github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/aleksmaus/generate v0.0.0-20201213151810-c5bc68a6a42f h1:wr9LrxkE1Ai416C/mis1gEDsXrbERHGufCmf7xuYwI4= -github.com/aleksmaus/generate v0.0.0-20201213151810-c5bc68a6a42f/go.mod h1:lvlu2Ij1bLmxB8RUWyw5IQ4/JcLX60eYhLiBmvImnhk= +github.com/andrewkroh/goja v0.0.0-20190128172624-dd2ac4456e20 h1:7rj9qZ63knnVo2ZeepYHvHuRdG76f3tRUTdIQDzRBeI= github.com/andrewkroh/goja v0.0.0-20190128172624-dd2ac4456e20/go.mod h1:cI59GRkC2FRaFYtgbYEqMlgnnfvAwXzjojyZKXwklNg= +github.com/andrewkroh/sys v0.0.0-20151128191922-287798fe3e43 h1:WFwa9pqou0Nb4DdfBOyaBTH0GqLE74Qwdf61E7ITHwQ= github.com/andrewkroh/sys v0.0.0-20151128191922-287798fe3e43/go.mod h1:tJPYQG4mnMeUtQvQKNkbsFrnmZOg59Qnf8CcctFv5v4= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6 h1:uZuxRZCz65cG1o6K/xUqImNcYKtmk9ylqaH0itMSvzA= github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= +github.com/antlr/antlr4 v0.0.0-20200820155224-be881fa6b91d h1:OE3kzLBpy7pOJEzE55j9sdgrSilUPzzj++FWvp1cmIs= github.com/antlr/antlr4 v0.0.0-20200820155224-be881fa6b91d/go.mod h1:T7PbCXFs94rrTttyxjbyT5+/1V8T2TYDejxUfHJjw1Y= +github.com/apoydence/eachers v0.0.0-20181020210610-23942921fe77 h1:afT88tB6u9JCKQZVAAaa9ICz/uGn5Uw9ekn6P22mYKM= github.com/apoydence/eachers v0.0.0-20181020210610-23942921fe77/go.mod h1:bXvGk6IkT1Agy7qzJ+DjIw/SJ1AaB3AvAuMDVV+Vkoo= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6 h1:G1bPvciwNyF7IUmKXNt9Ak3m6u9DE1rF+RmtIkBpVdA= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/aws/aws-lambda-go v1.6.0 h1:T+u/g79zPKw1oJM7xYhvpq7i4Sjc0iVsXZUaqRVVSOg= github.com/aws/aws-lambda-go v1.6.0/go.mod h1:zUsUQhAUjYzR8AuduJPCfhBuKWUaDbQiPOG+ouzmE1A= +github.com/aws/aws-sdk-go-v2 v0.9.0 h1:dWtJKGRFv3UZkMBQaIzMsF0/y4ge3iQPWTzeC4r/vl4= github.com/aws/aws-sdk-go-v2 v0.9.0/go.mod h1:sa1GePZ/LfBGI4dSq30f6uR4Tthll8axxtEPvlpXZ8U= +github.com/awslabs/goformation/v3 v3.1.0 h1:1WhWJrMtuwphJ+x1+0wM7v4QPDzcArvX+i4/sK1Z4e4= github.com/awslabs/goformation/v3 v3.1.0/go.mod h1:hQ5RXo3GNm2laHWKizDzU5DsDy+yNcenSca2UxN0850= +github.com/awslabs/goformation/v4 v4.1.0 h1:JRxIW0IjhYpYDrIZOTJGMu2azXKI+OK5dP56ubpywGU= github.com/awslabs/goformation/v4 v4.1.0/go.mod h1:MBDN7u1lMNDoehbFuO4uPvgwPeolTMA2TzX1yO6KlxI= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blakesmith/ar v0.0.0-20150311145944-8bd4349a67f2 h1:oMCHnXa6CCCafdPDbMh/lWRhRByN0VFLvv+g+ayx1SI= github.com/blakesmith/ar v0.0.0-20150311145944-8bd4349a67f2/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI= +github.com/bradleyfalzon/ghinstallation v1.1.0 h1:mwazVinJU0mPyLxIcdtJzu4DhWXFO5lMsWhKyFRIwFk= github.com/bradleyfalzon/ghinstallation v1.1.0/go.mod h1:p7iD8KytOOKg2wCqbwvJlq4JGpYMjwjkiqdyUqOIHLI= +github.com/bsm/sarama-cluster v2.1.14-0.20180625083203-7e67d87a6b3f+incompatible h1:4g18+HnTDwEtO0n7K8B1Kjq+04MEKJRkhJNQ/hb9d5A= github.com/bsm/sarama-cluster v2.1.14-0.20180625083203-7e67d87a6b3f+incompatible/go.mod h1:r7ao+4tTNXvWm+VRpRJchr2kQhqxgmAp2iEX5W96gMM= +github.com/cavaliercoder/badio v0.0.0-20160213150051-ce5280129e9e h1:YYUjy5BRwO5zPtfk+aa2gw255FIIoi93zMmuy19o0bc= github.com/cavaliercoder/badio v0.0.0-20160213150051-ce5280129e9e/go.mod h1:V284PjgVwSk4ETmz84rpu9ehpGg7swlIH8npP9k2bGw= +github.com/cavaliercoder/go-rpm v0.0.0-20190131055624-7a9c54e3d83e h1:Gbx+iVCXG/1m5WSnidDGuHgN+vbIwl+6fR092ANU+Y8= github.com/cavaliercoder/go-rpm v0.0.0-20190131055624-7a9c54e3d83e/go.mod h1:AZIh1CCnMrcVm6afFf96PBvE2MRpWFco91z8ObJtgDY= +github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudfoundry-community/go-cfclient v0.0.0-20190808214049-35bcce23fc5f h1:fK3ikA1s77arBhpDwFuyO0hUZ2Aa8O6o2Uzy8Q6iLbs= github.com/cloudfoundry-community/go-cfclient v0.0.0-20190808214049-35bcce23fc5f/go.mod h1:RtIewdO+K/czvxvIFCMbPyx7jdxSLL1RZ+DA/Vk8Lwg= +github.com/cloudfoundry/noaa v2.1.0+incompatible h1:hr6VnM5VlYRN3YD+NmAedQLW8686sUMknOSe0mFS2vo= github.com/cloudfoundry/noaa v2.1.0+incompatible/go.mod h1:5LmacnptvxzrTvMfL9+EJhgkUfIgcwI61BVSTh47ECo= +github.com/cloudfoundry/sonde-go v0.0.0-20171206171820-b33733203bb4 h1:cWfya7mo/zbnwYVio6eWGsFJHqYw4/k/uhwIJ1eqRPI= github.com/cloudfoundry/sonde-go v0.0.0-20171206171820-b33733203bb4/go.mod h1:GS0pCHd7onIsewbw8Ue9qa9pZPv2V88cUZDttK6KzgI= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f h1:WBZRG4aNOuI15bLRrCgN8fCq8E5Xuty6jGbmSNEvSsU= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= +github.com/containerd/fifo v0.0.0-20190816180239-bda0ff6ed73c h1:KFbqHhDeaHM7IfFtXHfUHMDaUStpM2YwBR+iJCIOsKk= github.com/containerd/fifo v0.0.0-20190816180239-bda0ff6ed73c/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/coreos/etcd v3.3.10+incompatible h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible h1:bXhRBIXoTm9BYHS3gE0TtQuyNZyeEMux2sDi4oo5YOo= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0 h1:XJIw/+VlJ+87J+doOxznsAWIdmWuViOVhkQamW5YV28= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea h1:n2Ltr3SrfQlf/9nOna1DoGKxLx3qTSI8Ttl6Xrqp6mw= github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cucumber/godog v0.8.1 h1:lVb+X41I4YDreE+ibZ50bdXmySxgRviYFgKY6Aw4XE8= github.com/cucumber/godog v0.8.1/go.mod h1:vSh3r/lM+psC1BPXvdkSEuNjmXfpVqrMGYAElF6hxnA= github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-xdr v0.0.0-20161123171359-e6a2ba005892 h1:qg9VbHo1TlL0KDM0vYvBG9EY0X0Yku5WYIPoFWt8f6o= github.com/davecgh/go-xdr v0.0.0-20161123171359-e6a2ba005892/go.mod h1:CTDl0pzVzE5DEzZhPfvhY/9sPFMQIxaJ9VAMs9AagrE= +github.com/denisenkom/go-mssqldb v0.0.0-20200206145737-bbfc9a55622e h1:LzwWXEScfcTu7vUZNlDDWDARoSGEtvlDKK2BYHowNeE= github.com/denisenkom/go-mssqldb v0.0.0-20200206145737-bbfc9a55622e/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= +github.com/devigned/tab v0.1.2-0.20190607222403-0c15cf42f9a2 h1:6+hM8KeYKV0Z9EIINNqIEDyyIRAcNc2FW+/TUYNmWyw= github.com/devigned/tab v0.1.2-0.20190607222403-0c15cf42f9a2/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= +github.com/dgraph-io/badger/v2 v2.2007.3-0.20201012072640-f5a7e0a1c83b h1:mUDs72Rlzv6A4YN8w3Ra3hU9x/plOQPcQjZYL/1f5SM= github.com/dgraph-io/badger/v2 v2.2007.3-0.20201012072640-f5a7e0a1c83b/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= -github.com/dgraph-io/ristretto v0.0.3 h1:jh22xisGBjrEVnRZ1DVTpBVQm0Xndu8sMl0CWDzSIBI= -github.com/dgraph-io/ristretto v0.0.3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI= +github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgrijalva/jwt-go v3.2.1-0.20190620180102-5e25c22bd5d6+incompatible h1:4jGdduO4ceTJFKf0IhgaB8NJapGqKHwC2b4xQ/cXujM= github.com/dgrijalva/jwt-go v3.2.1-0.20190620180102-5e25c22bd5d6+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/digitalocean/go-libvirt v0.0.0-20180301200012-6075ea3c39a1 h1:eG5K5GNAAHvQlFmfIuy0Ocjg5dvyX22g/KknwTpmBko= github.com/digitalocean/go-libvirt v0.0.0-20180301200012-6075ea3c39a1/go.mod h1:PRcPVAAma6zcLpFd4GZrjR/MRpood3TamjKI2m/z/Uw= +github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/dlclark/regexp2 v1.1.7-0.20171009020623-7632a260cbaf h1:uOWCk+L8abzw0BzmnCn7j7VT3g6bv9zW8fkR0yOP0Q4= github.com/dlclark/regexp2 v1.1.7-0.20171009020623-7632a260cbaf/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= +github.com/docker/docker v1.4.2-0.20170802015333-8af4db6f002a h1:pNE/kl/UUSqAi7IiyPjnaIbYBRaEORJY8/RCK9Tx39c= github.com/docker/docker v1.4.2-0.20170802015333-8af4db6f002a/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/go-plugins-helpers v0.0.0-20181025120712-1e6269c305b8 h1:9Hsno4vmXpQ0yVAp07bLxS5dHH24w80xzmUCLil47ME= github.com/docker/go-plugins-helpers v0.0.0-20181025120712-1e6269c305b8/go.mod h1:LFyLie6XcDbyKGeVK6bHe+9aJTYCxWLBg5IrJZOaXKA= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815 h1:bWDMxwH3px2JBh6AyO7hdCn/PkvCZXii8TGj7sbtEbQ= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dop251/goja_nodejs v0.0.0-20171011081505-adff31b136e6 h1:RrkoB0pT3gnjXhL/t10BSP1mcr/0Ldea2uMyuBr2SWk= github.com/dop251/goja_nodejs v0.0.0-20171011081505-adff31b136e6/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q= github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/eclipse/paho.mqtt.golang v1.2.1-0.20200121105743-0d940dd29fd2 h1:DW6WrARxK5J+o8uAKCiACi5wy9EK1UzrsCpGBPsKHAA= github.com/eclipse/paho.mqtt.golang v1.2.1-0.20200121105743-0d940dd29fd2/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= -github.com/elastic/beats/v7 v7.10.0 h1:MpXREz0PzwuHpJnNAHcjmRoQRfVUnJFJvYQdzRjBZKg= -github.com/elastic/beats/v7 v7.10.0/go.mod h1:GV6Gy80eRYpJ4Dk4MZcQFMxXbmOnWrj9ZPK5UhwCkhU= +github.com/elastic/beats/v7 v7.11.1 h1:eYJRKc/mA6rhQNujUV9lUADQ0S9SZvI5d782BnNvgFY= +github.com/elastic/beats/v7 v7.11.1/go.mod h1:2gJ+JvWjTYuMA37chVSfsolz7Z2ca+gL39HpmSLO+z8= +github.com/elastic/ecs v1.6.0 h1:8NmgfnsjmKXh9hVsK3H2tZtfUptepNc3msJOAynhtmc= github.com/elastic/ecs v1.6.0/go.mod h1:pgiLbQsijLOJvFR8OTILLu0Ni/R/foUNg0L+T6mU9b4= github.com/elastic/elastic-agent-client/v7 v7.0.0-20200709172729-d43b7ad5833a/go.mod h1:uh/Gj9a0XEbYoM4NYz4LvaBVARz3QXLmlNjsrKY9fTc= +github.com/elastic/elastic-agent-client/v7 v7.0.0-20210727140539-f0905d9377f6 h1:nFvXHBjYK3e9+xF0WKDeAKK4aOO51uC28s+L9rBmilo= +github.com/elastic/elastic-agent-client/v7 v7.0.0-20210727140539-f0905d9377f6/go.mod h1:uh/Gj9a0XEbYoM4NYz4LvaBVARz3QXLmlNjsrKY9fTc= +github.com/elastic/fsevents v0.0.0-20181029231046-e1d381a4d270 h1:cWPqxlPtir4RoQVCpGSRXmLqjEHpJKbR60rxh1nQZY4= github.com/elastic/fsevents v0.0.0-20181029231046-e1d381a4d270/go.mod h1:Msl1pdboCbArMF/nSCDUXgQuWTeoMmE/z8607X+k7ng= +github.com/elastic/go-concert v0.0.4 h1:pzgYCmJ/xMJsW8PSk33inAWZ065hrwSeP79TpwAbsLE= github.com/elastic/go-concert v0.0.4/go.mod h1:9MtFarjXroUgmm0m6HY3NSe1XiKhdktiNRRj9hWvIaM= -github.com/elastic/go-elasticsearch/v8 v8.0.0-20200728144331-527225d8e836 h1:0ZrGQPGY7QCySD/14ht2UDggGKmqgLouMd5FFimcguA= -github.com/elastic/go-elasticsearch/v8 v8.0.0-20200728144331-527225d8e836/go.mod h1:xe9a/L2aeOgFKKgrO3ibQTnMdpAeL0GC+5/HpGScSa4= -github.com/elastic/go-libaudit/v2 v2.0.2/go.mod h1:MM/l/4xV7ilcl+cIblL8Zn448J7RZaDwgNLE4gNKYPg= +github.com/elastic/go-elasticsearch/v7 v7.5.1-0.20210823155509-845c8efe54a7 h1:Nq382VeELkUSC7y8JIXBNj0YfOqmq/d8mX+crl4xdrM= +github.com/elastic/go-elasticsearch/v7 v7.5.1-0.20210823155509-845c8efe54a7/go.mod h1:OJ4wdbtDNk5g503kvlHLyErCgQwwzmDtaFC4XyOxXA4= +github.com/elastic/go-libaudit/v2 v2.1.0 h1:yWSKoGaoWLGFPjqWrQ4gwtuM77pTk7K4CsPxXss8he4= +github.com/elastic/go-libaudit/v2 v2.1.0/go.mod h1:MM/l/4xV7ilcl+cIblL8Zn448J7RZaDwgNLE4gNKYPg= github.com/elastic/go-licenser v0.3.1 h1:RmRukU/JUmts+rpexAw0Fvt2ly7VVu6mw8z4HrEzObU= github.com/elastic/go-licenser v0.3.1/go.mod h1:D8eNQk70FOCVBl3smCGQt/lv7meBeQno2eI1S5apiHQ= +github.com/elastic/go-lookslike v0.3.0 h1:HDI/DQ65V85ZqM7D/sbxcK2wFFnh3+7iFvBk2v2FTHs= github.com/elastic/go-lookslike v0.3.0/go.mod h1:AhH+rdJux5RlVjs+6ej4jkvYyoNRkj2crxmqeHlj3hA= +github.com/elastic/go-lumber v0.1.0 h1:HUjpyg36v2HoKtXlEC53EJ3zDFiDRn65d7B8dBHNius= github.com/elastic/go-lumber v0.1.0/go.mod h1:8YvjMIRYypWuPvpxx7WoijBYdbB7XIh/9FqSYQZTtxQ= +github.com/elastic/go-perf v0.0.0-20191212140718-9c656876f595 h1:q8n4QjcLa4q39Q3fqHRknTBXBtegjriHFrB42YKgXGI= github.com/elastic/go-perf v0.0.0-20191212140718-9c656876f595/go.mod h1:s09U1b4P1ZxnKx2OsqY7KlHdCesqZWIhyq0Gs/QC/Us= +github.com/elastic/go-seccomp-bpf v1.1.0 h1:jUzzDc6LyCtdolZdvL/26dad6rZ9vsc7xZ2eadKECAU= github.com/elastic/go-seccomp-bpf v1.1.0/go.mod h1:l+89Vy5BzjVcaX8USZRMOwmwwDScE+vxCFzzvQwN7T8= +github.com/elastic/go-structform v0.0.7 h1:ihszOJQryNuIIHE2ZgsbiDq+agKO6V4yK0JYAI3tjzc= github.com/elastic/go-structform v0.0.7/go.mod h1:QrMyP3oM9Sjk92EVGLgRaL2lKt0Qx7ZNDRWDxB6khVs= github.com/elastic/go-sysinfo v1.1.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= github.com/elastic/go-sysinfo v1.3.0 h1:eb2XFGTMlSwG/yyU9Y8jVAYLIzU2sFzWXwo2gmetyrE= github.com/elastic/go-sysinfo v1.3.0/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= +github.com/elastic/go-txfile v0.0.7 h1:Yn28gclW7X0Qy09nSMSsx0uOAvAGMsp6XHydbiLVe2s= github.com/elastic/go-txfile v0.0.7/go.mod h1:H0nCoFae0a4ga57apgxFsgmRjevNCsEaT6g56JoeKAE= github.com/elastic/go-ucfg v0.7.0/go.mod h1:iaiY0NBIYeasNgycLyTvhJftQlQEUO2hpF+FX0JKxzo= github.com/elastic/go-ucfg v0.8.3 h1:leywnFjzr2QneZZWhE6uWd+QN/UpP0sdJRHYyuFvkeo= @@ -166,59 +280,95 @@ github.com/elastic/go-ucfg v0.8.3/go.mod h1:iaiY0NBIYeasNgycLyTvhJftQlQEUO2hpF+F github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0= github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss= -github.com/elastic/gosigar v0.10.6-0.20200715000138-f115143bb233/go.mod h1:cdorVVzy1fhmEqmtgqkoE3bYtCfSCkVyjTyCIo22xvs= +github.com/elastic/gosigar v0.13.0 h1:EIeuQcLPKia759s6mlVztlxUyKiKYHo6y6kOODOLO7A= +github.com/elastic/gosigar v0.13.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/elastic/sarama v1.19.1-0.20200629123429-0e7b69039eec h1:rAHd7DeHIHjSzvnkl197GKh9TCWGKg/z2BBbbGOEiWI= github.com/elastic/sarama v1.19.1-0.20200629123429-0e7b69039eec/go.mod h1:X690XXMxlbtN8c7xcpsENKNlbj8VClCZ2hwSOhSyNmE= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633 h1:H2pdYOb3KQ1/YsqVWoWNLQO+fusocsw354rqGTZtAgw= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4 h1:rEvIZUSZ3fx39WIi3JkQqQBitGwpELBIYWeBVh6wn+E= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.5.0 h1:vBh+kQp8lg9XPr56u1CPrWjFXtdphMoGWVHr9/1c+A0= github.com/fatih/color v1.5.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/frankban/quicktest v1.7.2 h1:2QxQoC1TS09S7fhCPsrvqYdvP1H5M1P1ih5ABm3BTYk= github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/garyburd/redigo v1.0.1-0.20160525165706-b8dc90050f24 h1:nREVDi4H8mwnNqfxFU9NMzZrDCg8TXbEatMvHozxKwU= github.com/garyburd/redigo v1.0.1-0.20160525165706-b8dc90050f24/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72 h1:b+9H1GAsx5RsjvDFLoS5zkNBzIQMuVKUYQDmxU3N5XE= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab h1:xveKWz2iaueeTaUgdetzel+U7exyigDYBryyVfV/rZk= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= +github.com/go-ole/go-ole v1.2.5-0.20190920104607-14974a1cf647 h1:whypLownH338a3Ork2w9t0KUKtVxbXYySuz7V1YGsJo= github.com/go-ole/go-ole v1.2.5-0.20190920104607-14974a1cf647/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1 h1:wSt/4CYxs70xbATrGXhokKF1i0tZjENLOo1ioIO13zk= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9 h1:tF+augKRWlWx0J0B7ZyyKSiTyV6E1zZe+7b3qQlcEf8= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501 h1:C1JKChikHGpXwT5UQDFaryIpDtyyGL/CR6C2kB7F1oc= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87 h1:zP3nY8Tk2E6RTkqGYrarZXuzh+ffyLDljLxCy1iJw80= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-sourcemap/sourcemap v2.1.2+incompatible h1:0b/xya7BKGhXuqFESKM4oIiRo9WOt2ebz7KxfreD6ug= github.com/go-sourcemap/sourcemap v2.1.2+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gobuffalo/here v0.6.0 h1:hYrd0a6gDmWxBM4TnrGw8mQg24iSVoIkHEk7FodQcBI= github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM= +github.com/gocarina/gocsv v0.0.0-20170324095351-ffef3ffc77be h1:zXHeEEJ231bTf/IXqvCfeaqjLpXsq42ybLoT4ROSR6Y= github.com/gocarina/gocsv v0.0.0-20170324095351-ffef3ffc77be/go.mod h1:/oj50ZdPq/cUjA02lMZhijk5kR31SEydKyqah1OgBuo= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e h1:BWhy2j3IXJhjCbC68FptL43tDKIq8FladmaTs3Xs7Z8= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.3 h1:ZqHaoEF7TBzh4jzPmqVhE/5A1z9of6orkAe5uHoAeME= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godror/godror v0.10.4 h1:44FcfzDPp/PJZzen5Hm59SZQBhgrbR6E1KwCjg6gnJo= github.com/godror/godror v0.10.4/go.mod h1:9MVLtu25FBJBMHkPs0m3Ngf/VmwGcLpM2HS8PlNGw9U= github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/flock v0.7.2-0.20190320160742-5135e617513b h1:3QNh5Xo2pmr2nZXENtnztfpjej8XY8EPmvYxF5SzY9M= github.com/gofrs/flock v0.7.2-0.20190320160742-5135e617513b/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/uuid v3.3.0+incompatible h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6rR7UHz84= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -228,166 +378,258 @@ github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:x github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/flatbuffers v1.7.2-0.20170925184458-7a6b2bf521e9 h1:b4EyQBj8pgtcWOr7YCSxK6NUQzJr0n4hxJ3mc+dtKk4= github.com/google/flatbuffers v1.7.2-0.20170925184458-7a6b2bf521e9/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-github/v28 v28.1.1 h1:kORf5ekX5qwXO2mGzXXOjMe/g6ap8ahVe0sBEulhSxo= github.com/google/go-github/v28 v28.1.1/go.mod h1:bsqJWQX05omyWVmc00nEUql9mhQyv38lDZ8kPZcQVoM= +github.com/google/go-github/v29 v29.0.2 h1:opYN6Wc7DOz7Ku3Oh4l7prmkOMwEcQxpFtxdU8N8Pts= github.com/google/go-github/v29 v29.0.2/go.mod h1:CHKiKKPHJ0REzfwc14QMklvtHwCveD0PxlMjLlzAM5E= +github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gopacket v1.1.18-0.20191009163724-0ad7f2610e34 h1:/wV+gZsAEt7vP+fJkT1AltOejfLS3uonB4RTOdXWjVk= github.com/google/gopacket v1.1.18-0.20191009163724-0ad7f2610e34/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= github.com/google/licenseclassifier v0.0.0-20200402202327-879cb1424de0 h1:OggOMmdI0JLwg1FkOKH9S7fVHF0oEm8PX6S8kAdpOps= github.com/google/licenseclassifier v0.0.0-20200402202327-879cb1424de0/go.mod h1:qsqn2hxC+vURpyBRygGUuinTO42MFRLcsmQ/P8v94+M= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc h1:DLpL8pWq0v4JYoRpEhDfsJhhJyGKCcQM2WPW2TJs31c= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0 h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/subcommands v1.0.1 h1:/eqq+otEXm5vhfBrbREPCSVQbvofip6kIz+mX5TUH7k= github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2-0.20190416172445-c2e93f3ae59f h1:XXzyYlFbxK3kWfcmu3Wc+Tv8/QQl/VqwsWuSYF1Rj0s= github.com/google/uuid v1.1.2-0.20190416172445-c2e93f3ae59f/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.3.1-0.20190624222214-25d8b0b66985/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorhill/cronexpr v0.0.0-20161205141322-d520615e531a h1:yNuTIQkXLNAevCwQJ7ur3ZPoZPhbvAi6QXhJ/ylX6+8= github.com/gorhill/cronexpr v0.0.0-20161205141322-d520615e531a/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA= +github.com/gorilla/mux v1.7.2 h1:zoNxOV7WjqXptQOVngLmcSQgXmgk4NMz1HibBchjl/I= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/grpc-gateway v1.13.0 h1:sBDQoHXrOlfPobnKw69FIKa1wg9qsLLvvQ/Y19WtFgI= github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= -github.com/h2non/filetype v1.0.12/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY= +github.com/h2non/filetype v1.1.1-0.20201130172452-f60988ab73d5 h1:xI88renBpIJws9OfEQq4Dng10OppnY5u9bTok/GDFEI= +github.com/h2non/filetype v1.1.1-0.20201130172452-f60988ab73d5/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.0.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.3.0 h1:McDWVJIU/y+u1BRV06dPaLfLCaT7fUTJLp5r04x7iNw= +github.com/hashicorp/go-version v1.3.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.2-0.20190520140433-59383c442f7d h1:Ft6PtvobE9vwkCsuoNO5DZDbhKkKuktAlSsiOi1X5NA= github.com/hashicorp/golang-lru v0.5.2-0.20190520140433-59383c442f7d/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/haya14busa/go-actions-toolkit v0.0.0-20200105081403-ca0307860f01 h1:HiJF8Mek+I7PY0Bm+SuhkwaAZSZP83sw6rrTMrgZ0io= github.com/haya14busa/go-actions-toolkit v0.0.0-20200105081403-ca0307860f01/go.mod h1:1DWDZmeYf0LX30zscWb7K9rUMeirNeBMd5Dum+seUhc= +github.com/haya14busa/go-checkstyle v0.0.0-20170303121022-5e9d09f51fa1 h1:biVg9rs1Vl8LAwrkjlssTaEn2csIl3LKoQVEJrWGmJ8= github.com/haya14busa/go-checkstyle v0.0.0-20170303121022-5e9d09f51fa1/go.mod h1:RsN5RGgVYeXpcXNtWyztD5VIe7VNSEqpJvF2iEH7QvI= +github.com/haya14busa/secretbox v0.0.0-20180525171038-07c7ecf409f5 h1:ylgozezbuxA/i4uFtWCG/qGKYOZydsS8VUNNwfugn2Q= github.com/haya14busa/secretbox v0.0.0-20180525171038-07c7ecf409f5/go.mod h1:FGO/dXIFZnan7KvvUSFk1hYMnoVNzB6NTMPrmke8SSI= +github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 h1:S4qyfL2sEm5Budr4KVMyEniCy+PbS55651I/a+Kn/NQ= github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6 h1:UDMh68UUwekSh5iP2OMhRRZJiiBccgV7axzUG8vi56c= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/insomniacslk/dhcp v0.0.0-20180716145214-633285ba52b2 h1:uJiWD+lXJ+WJ9kldTB6F4T4V+oGIhd0I1ktTXk3P6Ks= github.com/insomniacslk/dhcp v0.0.0-20180716145214-633285ba52b2/go.mod h1:CfMdguCK66I5DAUJgGKyNz8aB6vO5dZzkm9Xep6WGvw= github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmoiron/sqlx v1.2.1-0.20190826204134-d7d95172beb5 h1:lrdPtrORjGv1HbbEvKWDUAy97mPpFm4B8hp77tcCUJY= github.com/jmoiron/sqlx v1.2.1-0.20190826204134-d7d95172beb5/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4= github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= +github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/josephspurrier/goversioninfo v0.0.0-20200309025242-14b0ab84c6ca/go.mod h1:eJTEwMjXb7kZ633hO3Ln9mBUCOjX2+FlTljvpl9SYdE= +github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/josephspurrier/goversioninfo v0.0.0-20190209210621-63e6d1acd3dd h1:KikNiFwUO3QLyeKyN4k9yBH9Pcu/gU/yficWi61cJIw= +github.com/josephspurrier/goversioninfo v0.0.0-20190209210621-63e6d1acd3dd/go.mod h1:eJTEwMjXb7kZ633hO3Ln9mBUCOjX2+FlTljvpl9SYdE= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/justinas/nosurf v1.1.0 h1:qqV6FJmnDBJ6F9pOzhZgZitAZWBYonMOXglof7TtdZw= github.com/justinas/nosurf v1.1.0/go.mod h1:ALpWdSbuNGy2lZWtyXdjkYv4edL23oSEgfBT1gPJ5BQ= +github.com/kardianos/service v1.1.0 h1:QV2SiEeWK42P0aEmGcsAgjApw/lRxkwopvT+Gu6t1/0= github.com/kardianos/service v1.1.0/go.mod h1:RrJI2xn5vve/r32U5suTbeaSGoMU6GbNPoj36CVYcHc= github.com/karrick/godirwalk v1.15.6 h1:Yf2mmR8TJy+8Fa0SuQVto5SYap6IF7lNVX4Jdl8G1qA= github.com/karrick/godirwalk v1.15.6/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= +github.com/kisielk/errcheck v1.2.0 h1:reN85Pxc5larApoH1keMBiu2GWtPqXQ1nc9gx+jOU+E= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.9.8 h1:VMAMUUOh+gaxKTMk+zqbjsSjsIcUcL/LF4o63i82QyA= github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.2-0.20190507191818-2ff3cb3adc01 h1:EPw7R3OAyxHBCyl0oqh3lUZqS5lu3KSxzzGasE0opXQ= github.com/lib/pq v1.1.2-0.20190507191818-2ff3cb3adc01/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/magefile/mage v1.9.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/magefile/mage v1.10.0 h1:3HiXzCUY12kh9bIuyXShaVe529fJfyqoVM42o/uom2g= github.com/magefile/mage v1.10.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= +github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/markbates/pkger v0.17.0 h1:RFfyBPufP2V6cddUyyEVSHBpaAnM1WzaMNyqomeT+iY= github.com/markbates/pkger v0.17.0/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= +github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11 h1:YFh+sjyJTMQSYjKwM4dFKhJPJC/wfo98tPUc17HdoYw= github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11/go.mod h1:Ah2dBMoxZEqk118as2T4u4fjfXarE0pPnMJaArZQZsI= +github.com/mattn/go-colorable v0.0.8 h1:KatiXbcoFpoKmM5pL0yhug+tx/POfZO+0aVsuGhUhgo= github.com/mattn/go-colorable v0.0.8/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-ieproxy v0.0.0-20191113090002-7c0f6868bffe h1:YioO2TiJyAHWHyCRQCP8jk5IzTqmsbGc5qQPIhHo6xs= github.com/mattn/go-ieproxy v0.0.0-20191113090002-7c0f6868bffe/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= +github.com/mattn/go-isatty v0.0.2 h1:F+DnWktyadxnOrohKLNUC9/GjFii5RJgY4GFG6ilggw= github.com/mattn/go-isatty v0.0.2/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-shellwords v1.0.7 h1:KqhVjVZomx2puPACkj9vrGFqnp42Htvo9SEAWePHKOs= github.com/mattn/go-shellwords v1.0.7/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-sqlite3 v1.9.0 h1:pDRiWfl+++eC2FEFRy6jXmQlvp4Yh3z1MJKg4UeYM/4= github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/miekg/dns v1.1.15 h1:CSSIDtllwGLMoA6zjdKnaE6Tx6eVUxQ29LUgGetiDCI= github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miolini/datacounter v1.0.2 h1:mGTL0vqEAtH7mwNJS1JIpd6jwTAP6cBQQ2P8apaCIm8= +github.com/miolini/datacounter v1.0.2/go.mod h1:C45dc2hBumHjDpEU64IqPwR6TDyPVpzOqqRTN7zmBUA= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/gox v1.0.1 h1:x0jD3dcHk9a9xPSDN6YEL4xL6Qz0dvNYm8yZqui5chI= github.com/mitchellh/gox v1.0.1/go.mod h1:ED6BioOGXMswlXa2zxfh/xdd5QhwYliBFn9V18Ap4z4= +github.com/mitchellh/hashstructure v0.0.0-20170116052023-ab25296c0f51 h1:qdHlMllk/PTLUrX3XdtXDrLL1lPSfcqUmJD1eYfbapg= github.com/mitchellh/hashstructure v0.0.0-20170116052023-ab25296c0f51/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= +github.com/mitchellh/iochan v1.0.0 h1:C+X3KsSTLFVBr/tK1eYN/vs4rJcvsiLU338UhYPJWeY= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.3.3 h1:SzB1nHZ2Xi+17FP0zVQBHIZqvwRN9408fJO8h+eeNA8= -github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d h1:7PxY7LVfSZm7PEeBTyK1rj1gABdCO2mbri6GKO1cMDs= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223 h1:F9x/1yl3T2AeKLr2AMdilSD8+f9bvMnNN8VS5iDtovc= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.5.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.2.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/opencontainers/go-digest v1.0.0-rc1.0.20190228220655-ac19fd6e7483 h1:eFd3FsB01m/zNg/yBMYdm/XqiqCztcN9SVRPtGtzDHo= github.com/opencontainers/go-digest v1.0.0-rc1.0.20190228220655-ac19fd6e7483/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 h1:yN8BPXVwMBAm3Cuvh1L5XE8XpvYRMdsVLd82ILprhUU= github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/otiai10/copy v1.2.0 h1:HvG945u96iNadPoG2/Ja2+AUJeW5YuFQMixq9yirC+k= github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= +github.com/otiai10/curr v1.0.0 h1:TJIWdbX0B+kpNagQrjgq8bCMrbhiuX73M2XwgtDMoOI= github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= +github.com/otiai10/mint v1.3.1 h1:BCmzIS3n71sGfHB5NMNDB3lHYPz8fWSkCAErHed//qc= github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= +github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2 h1:CXwSGu/LYmbjEab5aMCs5usQRVBGThelUKBNnoSOuso= github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2/go.mod h1:L3UMQOThbttwfYRNFOWLLVXMhk5Lkio4GGOtw5UrxS0= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pierrec/lz4 v2.4.1+incompatible h1:mFe7ttWaflA46Mhqh+jUfjp2qTbPYxLB2/OyBppH9dg= github.com/pierrec/lz4 v2.4.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrre/gotestcover v0.0.0-20160517101806-924dca7d15f0 h1:i5VIxp6QB8oWZ8IkK8zrDgeT6ORGIUeiN+61iETwJbI= github.com/pierrre/gotestcover v0.0.0-20160517101806-924dca7d15f0/go.mod h1:4xpMLz7RBWyB+ElzHu8Llua96TRCB3YwX+l5EP1wmHk= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1-0.20170505043639-c605e284fe17/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -396,16 +638,20 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/poy/eachers v0.0.0-20181020210610-23942921fe77 h1:SNdqPRvRsVmYR0gKqFvrUKhFizPJ6yDiGQ++VAJIoDg= github.com/poy/eachers v0.0.0-20181020210610-23942921fe77/go.mod h1:x1vqpbcMW9T/KRcQ4b48diSiSVtYgvwQ5xzDByEg4WE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.1.1-0.20190913103102-20428fa0bffc h1:6B8wpniGN4FtqzqWhe2OBOGkeZFbhwZpCh+V/pv/oik= github.com/prometheus/client_golang v1.1.1-0.20190913103102-20428fa0bffc/go.mod h1:ikMPikHu8SMvBGWoKulvvOOZN227amf2E9eMYqyAwAY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -413,99 +659,147 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.11 h1:DhHlBtkHWPYi8O2y31JkK0TF+DGM+51OopZjH/Ia5qI= github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/prometheus v2.5.0+incompatible h1:7QPitgO2kOFG8ecuRn9O/4L9+10He72rVRJvMXrE9Hg= github.com/prometheus/prometheus v2.5.0+incompatible/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= +github.com/rakyll/statik v0.1.6 h1:uICcfUXpgqtw2VopbIncslhAmE5hwc4g20TEyEENBNs= github.com/rakyll/statik v0.1.6/go.mod h1:OEi9wJV/fMUAGx1eNjq75DKDsJVuEv1U0oYdX6GX8Zs= +github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 h1:dY6ETXrvDG7Sa4vE8ZQG4yqWg6UnOcbqTAahkV813vQ= github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/reviewdog/errorformat v0.0.0-20200109134752-8983be9bc7dd h1:fvaEkjpr2NJbtnFRCft7D6y/mQ5/2OQU0pKJLW8dwFA= github.com/reviewdog/errorformat v0.0.0-20200109134752-8983be9bc7dd/go.mod h1:giYAXnpegRDPsXUO7TRpDKXJo1lFGYxyWRfEt5iQ+OA= +github.com/reviewdog/reviewdog v0.9.17 h1:MKb3rlQZgkEXr3d85iqtYNITXn7gDJr2kT0IhgX/X9A= github.com/reviewdog/reviewdog v0.9.17/go.mod h1:Y0yPFDTi9L5ohkoecJdgbvAhq+dUXp+zI7atqVibwKg= +github.com/rogpeppe/fastuuid v1.2.0 h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0 h1:RR9dF3JtopPvtkroDZuVD7qquD0bnHlKSqaQhgwt8yk= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.19.0 h1:hYz4ZVdUgjXTBUmrkrw55j1nHx68LfOKIQk5IYtyScg= github.com/rs/zerolog v1.19.0/go.mod h1:IzD0RJ65iWH0w97OQQebJEvTZYvsCUm9WVLWBQrJRjo= +github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/samuel/go-parser v0.0.0-20130731160455-ca8abbf65d0e h1:hUGyBE/4CXRPThr4b6kt+f1CN90no4Fs5CNrYOKYSIg= github.com/samuel/go-parser v0.0.0-20130731160455-ca8abbf65d0e/go.mod h1:Sb6li54lXV0yYEjI4wX8cucdQ9gqUJV3+Ngg3l9g30I= +github.com/samuel/go-thrift v0.0.0-20140522043831-2187045faa54 h1:jbchLJWyhKcmOjkbC4zDvT/n5EEd7g6hnnF760rEyRA= github.com/samuel/go-thrift v0.0.0-20140522043831-2187045faa54/go.mod h1:Vrkh1pnjV9Bl8c3P9zH0/D4NlOHWP5d4/hF4YTULaec= +github.com/sanathkr/go-yaml v0.0.0-20170819195128-ed9d249f429b h1:jUK33OXuZP/l6babJtnLo1qsGvq6G9so9KMflGAm4YA= github.com/sanathkr/go-yaml v0.0.0-20170819195128-ed9d249f429b/go.mod h1:8458kAagoME2+LN5//WxE71ysZ3B7r22fdgb7qVmXSY= github.com/sanathkr/yaml v0.0.0-20170819201035-0056894fa522/go.mod h1:tQTYKOQgxoH3v6dEmdHiz4JG+nbxWwM5fgPQUpSZqVQ= +github.com/sanathkr/yaml v1.0.1-0.20170819201035-0056894fa522 h1:39BJIaZIhIBmXATIhdlTBlTQpAiGXHnz17CrO7vF2Ss= github.com/sanathkr/yaml v1.0.1-0.20170819201035-0056894fa522/go.mod h1:tQTYKOQgxoH3v6dEmdHiz4JG+nbxWwM5fgPQUpSZqVQ= +github.com/santhosh-tekuri/jsonschema v1.2.4 h1:hNhW8e7t+H1vgY+1QeEQpveR6D4+OwKPXCfD2aieJis= github.com/santhosh-tekuri/jsonschema v1.2.4/go.mod h1:TEAUOeZSmIxTTuHatJzrvARHiuO9LYd+cIxzgEHCQI4= +github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shirou/gopsutil v2.19.11+incompatible h1:lJHR0foqAjI4exXqWsU3DbH7bX1xvdhGdnXTIARA9W4= github.com/shirou/gopsutil v2.19.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2 h1:VUFqw5KcqRf7i70GOzW7N+Q7+gxVBkSSqiXB12+JQ4M= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.1.5-0.20170601210322-f6abca593680/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.0/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/tsg/go-daemon v0.0.0-20200207173439-e704b93fd89b h1:X/8hkb4rQq3+QuOxpJK7gWmAXmZucF0EI1s1BfBLq6U= github.com/tsg/go-daemon v0.0.0-20200207173439-e704b93fd89b/go.mod h1:jAqhj/JBVC1PwcLTWd6rjQyGyItxxrhpiBl8LSuAGmw= +github.com/tsg/gopacket v0.0.0-20200626092518-2ab8e397a786 h1:B/IVHYiI0d04dudYw+CvCAGqSMq8d0yWy56eD6p85BQ= github.com/tsg/gopacket v0.0.0-20200626092518-2ab8e397a786/go.mod h1:RIkfovP3Y7my19aXEjjbNd9E5TlHozzAyt7B8AaEcwg= +github.com/ugorji/go v1.1.8 h1:/D9x7IRpfMHDlizVOgxrag5Fh+/NY+LtI8bsr+AswRA= github.com/ugorji/go v1.1.8/go.mod h1:0lNM99SwWUIRhCXnigEMClngXBk/EmpTXa7mgiewYWA= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.1.8 h1:4dryPvxMP9OtkjIbuNeK2nb27M38XMHLGlfNSNph/5s= github.com/ugorji/go/codec v1.1.8/go.mod h1:X00B19HDtwvKbQY2DcYjvZxKQp8mzrJoQ6EgoIY/D2E= +github.com/urso/diag v0.0.0-20200210123136-21b3cc8eb797 h1:OHNw/6pXODJAB32NujjdQO/KIYQ3KAbHQfCzH81XdCs= github.com/urso/diag v0.0.0-20200210123136-21b3cc8eb797/go.mod h1:pNWFTeQ+V1OYT/TzWpnWb6eQBdoXpdx+H+lrH97/Oyo= +github.com/urso/go-bin v0.0.0-20180220135811-781c575c9f0e h1:NiofbjIUI5gR+ybDsGSVH1fWyjSeDYiYVJHT1+kcsak= github.com/urso/go-bin v0.0.0-20180220135811-781c575c9f0e/go.mod h1:6GfHrdWBQYjFRIznu7XuQH4lYB2w8nO4bnImVKkzPOM= +github.com/urso/magetools v0.0.0-20190919040553-290c89e0c230 h1:Ft1EJ6JL0F/RV6o2qJ1Be+wYxjYUSfRA3srfHgSgojc= github.com/urso/magetools v0.0.0-20190919040553-290c89e0c230/go.mod h1:DFxTNgS/ExCGmmjVjSOgS2WjtfjKXgCyDzAFgbtovSA= +github.com/urso/qcgen v0.0.0-20180131103024-0b059e7db4f4 h1:hhA8EBThzz9PztawVTycKvfETVuBqxAQ5keFlAVtbAw= github.com/urso/qcgen v0.0.0-20180131103024-0b059e7db4f4/go.mod h1:RspW+E2Yb7Fs7HclB2tiDaiu6Rp41BiIG4Wo1YaoXGc= +github.com/urso/sderr v0.0.0-20200210124243-c2a16f3d43ec h1:HkZIDJrMKZHPsYhmH2XjTTSk1pbMCFfpxSnyzZUFm+k= github.com/urso/sderr v0.0.0-20200210124243-c2a16f3d43ec/go.mod h1:Wp40HwmjM59FkDIVFfcCb9LzBbnc0XAMp8++hJuWvSU= +github.com/vmware/govmomi v0.0.0-20170802214208-2cad15190b41 h1:NeNpIvfvaFOh0BH7nMEljE5Rk/VJlxhm58M41SeOD20= github.com/vmware/govmomi v0.0.0-20170802214208-2cad15190b41/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= +github.com/xanzy/go-gitlab v0.22.3 h1:/rNlZ2hquUWNc6rJdntVM03tEOoTmnZ1lcNyJCl0WlU= github.com/xanzy/go-gitlab v0.22.3/go.mod h1:t4Bmvnxj7k37S4Y17lfLx+nLqkf/oQwT2HagfWKv5Og= +github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20181112162635-ac52e6811b56 h1:yhqBHs09SmmUoNOHc9jgK4a60T3XFRtPAkYxVnqgY50= github.com/xeipuuv/gojsonschema v0.0.0-20181112162635-ac52e6811b56/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77 h1:ESFSdwYZvkeru3RtdrYueztKhOBCSAAzS4Gf+k0tEow= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1 h1:ruQGxdhGHe7FWOJPT0mKs5+pD2Xs1Bm/kdGlHO04FmM= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/gopher-lua v0.0.0-20170403160031-b402f3114ec7 h1:0gYLpmzecnaDCoeWxSfEJ7J1b6B/67+NV++4HKQXx+Y= github.com/yuin/gopher-lua v0.0.0-20170403160031-b402f3114ec7/go.mod h1:aEV29XrmTYFr3CiRxZeGHpkvbwq+prZduBqMaascyCU= go.elastic.co/apm v1.7.2/go.mod h1:tCw6CkOJgkWnzEthFN9HUP1uL3Gjc/Ur6m7gRPLaoH0= +go.elastic.co/apm v1.8.1-0.20200909061013-2aef45b9cf4b h1:Sf+V3eV91ZuXjF3824SABFgXU+z4ZEuIX5ikDvt2lCE= go.elastic.co/apm v1.8.1-0.20200909061013-2aef45b9cf4b/go.mod h1:qoOSi09pnzJDh5fKnfY7bPmQgl8yl2tULdOu03xhui0= +go.elastic.co/apm/module/apmelasticsearch v1.7.2 h1:5STGHLZLSeAzxordMc+dFVKiyVtMmxADOV+TgRaXXJg= go.elastic.co/apm/module/apmelasticsearch v1.7.2/go.mod h1:ZyNFuyWdt42GBZkz0SogoLzDBrBGj4orxpiUuxYeYq8= +go.elastic.co/apm/module/apmhttp v1.7.2 h1:2mRh7SwBuEVLmJlX+hsMdcSg9xaielCLElaPn/+i34w= go.elastic.co/apm/module/apmhttp v1.7.2/go.mod h1:sTFWiWejnhSdZv6+dMgxGec2Nxe/ZKfHfz/xtRM+cRY= go.elastic.co/ecszap v0.3.0 h1:Zo/Y4sJLqbWDlqCHI4F4Lzeg0Fs4+n5ldVis4h9xV8w= go.elastic.co/ecszap v0.3.0/go.mod h1:HTUi+QRmr3EuZMqxPX+5fyOdMNfUu5iPebgfhgsTJYQ= go.elastic.co/fastjson v1.0.0/go.mod h1:PmeUOMMtLHQr9ZS9J9owrAVg0FkaZDRZJEFTTGHtchs= +go.elastic.co/fastjson v1.1.0 h1:3MrGBWWVIxe/xvsbpghtkFoPciPhOCmjsR/HfwEeQR4= go.elastic.co/fastjson v1.1.0/go.mod h1:boNGISWMjQsUPy/t6yqt2/1Wx4YNPSe+mZjlyw9vKKI= go.elastic.co/go-licence-detector v0.4.0 h1:it5dP+6LPxLsosdhtbAqk/zJQxzS0QSSpdNkKVuwKMs= go.elastic.co/go-licence-detector v0.4.0/go.mod h1:fSJQU8au4SAgDK+UQFbgUPsXKYNBDv4E/dwWevrMpXU= +go.etcd.io/bbolt v1.3.4 h1:hi1bXHMVrlQh6WwxAy+qZCV/SYIlqo+Ushwdpa4tAKg= go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.uber.org/atomic v1.5.0 h1:OI5t8sDa1Or+q8AeE+yKeB/SDYioSHAgcVljj9JIETY= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/goleak v1.0.0 h1:qsup4IcBdlmsnGfqyLl4Ntn3C2XCCuKAE7DwHpScyUo= go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.3.0 h1:sFPn2GLc3poCkfrpIXGhBD2X0CMIo4Q/zSULXrj/+uc= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= @@ -515,8 +809,6 @@ go.uber.org/zap v1.14.0 h1:/pduUoebOeeJzTDFuoMgC6nRkiasr1sBCIEorly7m4o= go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -524,16 +816,16 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299 h1:zQpM52jfKHG6II1ISZY1ZcpygvuSFZpLwfluuF89XOg= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b h1:+qEpEAPhDZ1o0x3tHzZTQDArnOixOzGD9HUJfcg0mb4= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -546,6 +838,7 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367 h1:0IiAsCRByjO2QjX7ZPkw5oU9x+n1YqRL802rjC0c3Aw= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028 h1:4+4C/Iv2U4fMZBiMCc98MG1In4gJY5YRhtpDNeDeHWs= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= @@ -553,7 +846,6 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -572,13 +864,15 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191021144547-ec77196f6094/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -586,25 +880,24 @@ golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -619,7 +912,7 @@ golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -629,12 +922,15 @@ golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORKTAbhZo2AbWNRCnevdo= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -679,6 +975,7 @@ google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEt google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.15.0 h1:yzlyyDW/J0w8yNFJIhiAJy4kq74S+1DOLdawELNxFMA= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -686,6 +983,7 @@ google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -697,6 +995,8 @@ google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -704,22 +1004,31 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw= gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= @@ -731,10 +1040,11 @@ gopkg.in/jcmturner/gokrb5.v7 v7.5.0 h1:a9tsXlIDD9SKxotJMK3niV7rPZAJeX2aD/0yg3qlI gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU= gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= +gopkg.in/mgo.v2 v2.0.0-20160818020120-3f83fa500528 h1:/saqWwm73dLmuzbNhe92F0QsZ/KiFND+esHco2v1hiY= gopkg.in/mgo.v2 v2.0.0-20160818020120-3f83fa500528/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -744,6 +1054,7 @@ gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -753,17 +1064,25 @@ honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXe honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M= howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= -k8s.io/api v0.18.3/go.mod h1:UOaMwERbqJMfeeeHc8XJKawj4P9TgDRnViIqqBeH2QA= -k8s.io/apimachinery v0.18.3/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= -k8s.io/client-go v0.18.3/go.mod h1:4a/dpQEvzAhT1BbuWW09qvIaGw6Gbu1gZYiQZIi1DMw= -k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= -k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/api v0.19.4 h1:I+1I4cgJYuCDgiLNjKx7SLmIbwgj9w7N7Zr5vSIdwpo= +k8s.io/api v0.19.4/go.mod h1:SbtJ2aHCItirzdJ36YslycFNzWADYH3tgOhvBEFtZAk= +k8s.io/apimachinery v0.19.4 h1:+ZoddM7nbzrDCp0T3SWnyxqf8cbWPT2fkZImoyvHUG0= +k8s.io/apimachinery v0.19.4/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= +k8s.io/client-go v0.19.4 h1:85D3mDNoLF+xqpyE9Dh/OtrJDyJrSRKkHmDXIbEzer8= +k8s.io/client-go v0.19.4/go.mod h1:ZrEy7+wj9PjH5VMBCuu/BDlvtUAku0oVFk4MmnW9mWA= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac h1:sAvhNk5RRuc6FNYGqe7Ygz3PSo/2wGWbulskmzRX8Vs= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 h1:+WnxoVtG8TMiudHBSEtrVL1egv36TkkJm+bA8AxicmQ= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/utils v0.0.0-20200729134348-d5654de09c73 h1:uJmqzgNWG7XyClnU/mLPBWwfKKF1K8Hf8whTseBgJcg= +k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/internal/pkg/action/dispatcher.go b/internal/pkg/action/dispatcher.go index 10bd53a31..756f31db1 100644 --- a/internal/pkg/action/dispatcher.go +++ b/internal/pkg/action/dispatcher.go @@ -9,15 +9,17 @@ import ( "sync" "github.com/elastic/fleet-server/v7/internal/pkg/es" + "github.com/elastic/fleet-server/v7/internal/pkg/logger" "github.com/elastic/fleet-server/v7/internal/pkg/model" "github.com/elastic/fleet-server/v7/internal/pkg/monitor" + "github.com/elastic/fleet-server/v7/internal/pkg/sqn" "github.com/rs/zerolog/log" ) type Sub struct { agentId string - seqNo int64 + seqNo sqn.SeqNo ch chan []model.Action } @@ -50,7 +52,7 @@ func (d *Dispatcher) Run(ctx context.Context) (err error) { } } -func (d *Dispatcher) Subscribe(agentId string, seqNo int64) *Sub { +func (d *Dispatcher) Subscribe(agentId string, seqNo sqn.SeqNo) *Sub { cbCh := make(chan []model.Action, 1) sub := Sub{ @@ -64,7 +66,7 @@ func (d *Dispatcher) Subscribe(agentId string, seqNo int64) *Sub { sz := len(d.subs) d.mx.Unlock() - log.Debug().Str("agentId", agentId).Int("sz", sz).Msg("Subscribed to action dispatcher") + log.Trace().Str(logger.AgentId, agentId).Int("sz", sz).Msg("Subscribed to action dispatcher") return &sub } @@ -79,7 +81,7 @@ func (d *Dispatcher) Unsubscribe(sub *Sub) { sz := len(d.subs) d.mx.Unlock() - log.Debug().Str("agentId", sub.agentId).Int("sz", sz).Msg("Unsubscribed from action dispatcher") + log.Trace().Str(logger.AgentId, sub.agentId).Int("sz", sz).Msg("Unsubscribed from action dispatcher") } func (d *Dispatcher) process(ctx context.Context, hits []es.HitT) { @@ -118,7 +120,7 @@ func (d *Dispatcher) getSub(agentId string) (Sub, bool) { func (d *Dispatcher) dispatch(ctx context.Context, agentId string, acdocs []model.Action) { sub, ok := d.getSub(agentId) if !ok { - log.Debug().Str("agent_id", agentId).Msg("Agent is not currently connected. Not dispatching actions.") + log.Debug().Str(logger.AgentId, agentId).Msg("Agent is not currently connected. Not dispatching actions.") return } select { diff --git a/internal/pkg/apikey/apikey.go b/internal/pkg/apikey/apikey.go index afc02564b..9230cabea 100644 --- a/internal/pkg/apikey/apikey.go +++ b/internal/pkg/apikey/apikey.go @@ -22,6 +22,7 @@ var ( ErrMalformedHeader = errors.New("malformed authorization header") ErrMalformedToken = errors.New("malformed token") ErrInvalidToken = errors.New("token not valid utf8") + ErrApiKeyNotFound = errors.New("api key not found") ) var AuthKey = http.CanonicalHeaderKey("Authorization") diff --git a/internal/pkg/apikey/apikey_integration_test.go b/internal/pkg/apikey/apikey_integration_test.go new file mode 100644 index 000000000..0bb6da14e --- /dev/null +++ b/internal/pkg/apikey/apikey_integration_test.go @@ -0,0 +1,87 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build integration +// +build integration + +package apikey + +import ( + "context" + "errors" + "testing" + + "github.com/elastic/go-elasticsearch/v7" + "github.com/gofrs/uuid" + "github.com/google/go-cmp/cmp" +) + +const testFleetRoles = ` +{ + "fleet-apikey-access": { + "cluster": [], + "applications": [{ + "application": ".fleet", + "privileges": ["no-privileges"], + "resources": ["*"] + }] + } +} +` + +func TestCreateApiKeyWithMetadata(t *testing.T) { + ctx, cn := context.WithCancel(context.Background()) + defer cn() + + cfg := elasticsearch.Config{ + Username: "elastic", + Password: "changeme", + } + + es, err := elasticsearch.NewClient(cfg) + if err != nil { + t.Fatal(err) + } + + // Create the key + agentId := uuid.Must(uuid.NewV4()).String() + name := uuid.Must(uuid.NewV4()).String() + akey, err := Create(ctx, es, name, "", "true", []byte(testFleetRoles), + NewMetadata(agentId, TypeAccess)) + if err != nil { + t.Fatal(err) + } + + // Get the key and verify that metadata was saved correctly + aKeyMeta, err := Read(ctx, es, akey.Id) + if err != nil { + t.Fatal(err) + } + + diff := cmp.Diff(ManagedByFleetServer, aKeyMeta.Metadata.ManagedBy) + if diff != "" { + t.Error(diff) + } + + diff = cmp.Diff(true, aKeyMeta.Metadata.Managed) + if diff != "" { + t.Error(diff) + } + + diff = cmp.Diff(agentId, aKeyMeta.Metadata.AgentId) + if diff != "" { + t.Error(diff) + } + + diff = cmp.Diff(TypeAccess.String(), aKeyMeta.Metadata.Type) + if diff != "" { + t.Error(diff) + } + + // Try to get the key that doesn't exists, expect ErrApiKeyNotFound + aKeyMeta, err = Read(ctx, es, "0000000000000") + if !errors.Is(err, ErrApiKeyNotFound) { + t.Errorf("Unexpected error type: %v", err) + } +} diff --git a/internal/pkg/apikey/apikey_test.go b/internal/pkg/apikey/apikey_test.go new file mode 100644 index 000000000..d9a2dba1b --- /dev/null +++ b/internal/pkg/apikey/apikey_test.go @@ -0,0 +1,24 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build !integration +// +build !integration + +package apikey + +import ( + "encoding/base64" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestMonitorLeadership(t *testing.T) { + rawToken := " foo:bar" + token := base64.StdEncoding.EncodeToString([]byte(rawToken)) + apiKey, err := NewApiKeyFromToken(token) + assert.NoError(t, err) + assert.Equal(t, *apiKey, ApiKey{" foo", "bar"}) + assert.Equal(t, token, apiKey.Token()) +} diff --git a/internal/pkg/apikey/auth.go b/internal/pkg/apikey/auth.go index e097bf185..6306320e3 100644 --- a/internal/pkg/apikey/auth.go +++ b/internal/pkg/apikey/auth.go @@ -9,8 +9,8 @@ import ( "encoding/json" "fmt" - "github.com/elastic/go-elasticsearch/v8" - "github.com/elastic/go-elasticsearch/v8/esapi" + "github.com/elastic/go-elasticsearch/v7" + "github.com/elastic/go-elasticsearch/v7/esapi" ) type SecurityInfo struct { @@ -24,13 +24,9 @@ type SecurityInfo struct { LookupRealm map[string]string `json:"lookup_realm"` } -// Kibana: -// https://github.com/elastic/kibana/blob/master/x-pack/plugins/security/server/authentication/authenticator.ts#L308 -// NOTE: Bulk request currently not available. +// Note: Prefer the bulk wrapper on this API func (k ApiKey) Authenticate(ctx context.Context, es *elasticsearch.Client) (*SecurityInfo, error) { - // TODO: Escape request for safety. Don't depend on ES. - token := fmt.Sprintf("%s%s", authPrefix, k.Token()) req := esapi.SecurityAuthenticateRequest{ @@ -40,7 +36,7 @@ func (k ApiKey) Authenticate(ctx context.Context, es *elasticsearch.Client) (*Se res, err := req.Do(ctx, es) if err != nil { - return nil, err + return nil, fmt.Errorf("apikey auth request %s: %w", k.Id, err) } if res.Body != nil { @@ -48,13 +44,13 @@ func (k ApiKey) Authenticate(ctx context.Context, es *elasticsearch.Client) (*Se } if res.IsError() { - return nil, fmt.Errorf("Fail Auth: %s", res.String()) + return nil, fmt.Errorf("apikey auth response %s: %s", k.Id, res.String()) } var info SecurityInfo decoder := json.NewDecoder(res.Body) if err := decoder.Decode(&info); err != nil { - return nil, fmt.Errorf("Auth: error parsing response body: %s", err) // TODO: Wrap error + return nil, fmt.Errorf("apikey auth parse %s: %w", k.Id, err) } return &info, nil diff --git a/internal/pkg/apikey/create.go b/internal/pkg/apikey/create.go index dc244871d..0371934fe 100644 --- a/internal/pkg/apikey/create.go +++ b/internal/pkg/apikey/create.go @@ -10,20 +10,21 @@ import ( "encoding/json" "fmt" - "github.com/elastic/go-elasticsearch/v8" - "github.com/elastic/go-elasticsearch/v8/esapi" + "github.com/elastic/go-elasticsearch/v7" + "github.com/elastic/go-elasticsearch/v7/esapi" ) -func Create(ctx context.Context, client *elasticsearch.Client, name, ttl string, roles []byte) (*ApiKey, error) { - +func Create(ctx context.Context, client *elasticsearch.Client, name, ttl, refresh string, roles []byte, meta interface{}) (*ApiKey, error) { payload := struct { Name string `json:"name,omitempty"` Expiration string `json:"expiration,omitempty"` Roles json.RawMessage `json:"role_descriptors,omitempty"` + Metadata interface{} `json:"metadata"` }{ - name, - ttl, - roles, + Name: name, + Expiration: ttl, + Roles: roles, + Metadata: meta, } body, err := json.Marshal(&payload) @@ -33,7 +34,7 @@ func Create(ctx context.Context, client *elasticsearch.Client, name, ttl string, opts := []func(*esapi.SecurityCreateAPIKeyRequest){ client.Security.CreateAPIKey.WithContext(ctx), - client.Security.CreateAPIKey.WithRefresh("true"), + client.Security.CreateAPIKey.WithRefresh(refresh), } res, err := client.Security.CreateAPIKey( @@ -48,7 +49,7 @@ func Create(ctx context.Context, client *elasticsearch.Client, name, ttl string, defer res.Body.Close() if res.IsError() { - return nil, fmt.Errorf("Fail CreateAPIKey: %s", res.String()) + return nil, fmt.Errorf("fail CreateAPIKey: %s", res.String()) } type APIKeyResponse struct { diff --git a/internal/pkg/apikey/get.go b/internal/pkg/apikey/get.go new file mode 100644 index 000000000..50ba3a64d --- /dev/null +++ b/internal/pkg/apikey/get.go @@ -0,0 +1,69 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package apikey + +import ( + "context" + "encoding/json" + + "github.com/elastic/go-elasticsearch/v7" + "github.com/elastic/go-elasticsearch/v7/esapi" + "github.com/pkg/errors" +) + +type ApiKeyMetadata struct { + Id string + Metadata Metadata +} + +func Read(ctx context.Context, client *elasticsearch.Client, id string) (apiKey *ApiKeyMetadata, err error) { + + opts := []func(*esapi.SecurityGetAPIKeyRequest){ + client.Security.GetAPIKey.WithContext(ctx), + client.Security.GetAPIKey.WithID(id), + } + + res, err := client.Security.GetAPIKey( + opts..., + ) + + if err != nil { + return + } + + defer res.Body.Close() + + if res.IsError() { + err = errors.Wrap(ErrApiKeyNotFound, res.String()) + return + } + + type APIKeyResponse struct { + Id string `json:"id"` + Metadata Metadata `json:"metadata"` + } + type GetAPIKeyResponse struct { + ApiKeys []APIKeyResponse `json:"api_keys"` + } + + var resp GetAPIKeyResponse + d := json.NewDecoder(res.Body) + if err = d.Decode(&resp); err != nil { + return + } + + if len(resp.ApiKeys) == 0 { + return apiKey, ErrApiKeyNotFound + } + + first := resp.ApiKeys[0] + + apiKey = &ApiKeyMetadata{ + Id: first.Id, + Metadata: first.Metadata, + } + + return +} diff --git a/internal/pkg/apikey/invalidate.go b/internal/pkg/apikey/invalidate.go new file mode 100644 index 000000000..8d284df03 --- /dev/null +++ b/internal/pkg/apikey/invalidate.go @@ -0,0 +1,53 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package apikey + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v7" + "github.com/elastic/go-elasticsearch/v7/esapi" +) + +// Invalidate invalidates the provided API keys by ID. +func Invalidate(ctx context.Context, client *elasticsearch.Client, ids ...string) error { + + payload := struct { + IDs []string `json:"ids,omitempty"` + Owner bool `json:"owner"` + }{ + ids, + true, + } + + body, err := json.Marshal(&payload) + if err != nil { + return fmt.Errorf("InvalidateAPIKey: %w", err) + } + + opts := []func(*esapi.SecurityInvalidateAPIKeyRequest){ + client.Security.InvalidateAPIKey.WithContext(ctx), + } + + res, err := client.Security.InvalidateAPIKey( + bytes.NewReader(body), + opts..., + ) + + if err != nil { + return fmt.Errorf("InvalidateAPIKey: %w", err) + } + + defer res.Body.Close() + + if res.IsError() { + return fmt.Errorf("fail InvalidateAPIKey: %s", res.String()) + } + + return nil +} diff --git a/internal/pkg/apikey/metadata.go b/internal/pkg/apikey/metadata.go new file mode 100644 index 000000000..5e347ecb8 --- /dev/null +++ b/internal/pkg/apikey/metadata.go @@ -0,0 +1,34 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package apikey + +const ManagedByFleetServer = "fleet-server" + +type Type int + +const ( + TypeAccess Type = iota + TypeOutput +) + +func (t Type) String() string { + return []string{"access", "output"}[t] +} + +type Metadata struct { + AgentId string `json:"agent_id,omitempty"` + Managed bool `json:"managed,omitempty"` + ManagedBy string `json:"managed_by,omitempty"` + Type string `json:"type,omitempty"` +} + +func NewMetadata(agentId string, typ Type) Metadata { + return Metadata{ + AgentId: agentId, + Managed: true, + ManagedBy: ManagedByFleetServer, + Type: typ.String(), + } +} diff --git a/internal/pkg/build/build.go b/internal/pkg/build/build.go new file mode 100644 index 000000000..06c168dc0 --- /dev/null +++ b/internal/pkg/build/build.go @@ -0,0 +1,20 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package build + +import "time" + +type Info struct { + Version, Commit string + BuildTime time.Time +} + +func Time(stime string) time.Time { + t, err := time.Parse(time.RFC3339, stime) + if err != nil { + return time.Time{} + } + return t +} diff --git a/internal/pkg/bulk/block.go b/internal/pkg/bulk/block.go new file mode 100644 index 000000000..671703690 --- /dev/null +++ b/internal/pkg/bulk/block.go @@ -0,0 +1,81 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package bulk + +import ( + "github.com/elastic/fleet-server/v7/internal/pkg/danger" +) + +type Buf = danger.Buf + +// bulkT is generally allocated in the bulk engines's 'blkPool' +// However, the multiOp API's will allocate directly in large blocks. + +type bulkT struct { + action actionT // requested actions + flags flagsT // execution flags + idx int32 // idx of originating request, used in mulitOp + ch chan respT // response channel, caller is waiting synchronously + buf Buf // json payload to be sent to elastic + next *bulkT // pointer to next bulkT, used for fast internal queueing +} + +type flagsT int8 + +const ( + flagRefresh flagsT = 1 << iota +) + +func (ft flagsT) Has(f flagsT) bool { + return ft&f != 0 +} + +func (ft *flagsT) Set(f flagsT) { + *ft = *ft | f +} + +type actionT int8 + +const ( + ActionCreate actionT = iota + ActionDelete + ActionIndex + ActionUpdate + ActionRead + ActionSearch +) + +var actionStrings = []string{ + "create", + "delete", + "index", + "update", + "read", + "search", +} + +func (a actionT) String() string { + return actionStrings[a] +} + +func (blk *bulkT) reset() { + blk.action = 0 + blk.flags = 0 + blk.idx = 0 + blk.buf.Reset() + blk.next = nil +} + +func newBlk() interface{} { + return &bulkT{ + ch: make(chan respT, 1), + } +} + +type respT struct { + err error + idx int32 + data interface{} +} diff --git a/internal/pkg/bulk/bulk.go b/internal/pkg/bulk/bulk.go deleted file mode 100644 index 7761a7dca..000000000 --- a/internal/pkg/bulk/bulk.go +++ /dev/null @@ -1,798 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package bulk - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "time" - - "github.com/elastic/fleet-server/v7/internal/pkg/config" - "github.com/elastic/fleet-server/v7/internal/pkg/es" - - "github.com/elastic/go-elasticsearch/v8" - "github.com/elastic/go-elasticsearch/v8/esapi" - "github.com/rs/zerolog/log" - "golang.org/x/sync/semaphore" -) - -type BulkOp struct { - Id string - Index string - Body []byte -} - -type Bulk interface { - Create(ctx context.Context, index, id string, body []byte, opts ...Opt) (string, error) - Index(ctx context.Context, index, id string, body []byte, opts ...Opt) (string, error) - Update(ctx context.Context, index, id string, body []byte, opts ...Opt) error - Read(ctx context.Context, index, id string, opts ...Opt) ([]byte, error) - // Delete (ctx context.Context, index, id string, opts ...Opt) error - - MUpdate(ctx context.Context, ops []BulkOp, opts ...Opt) error - - Search(ctx context.Context, index []string, body []byte, opts ...Opt) (*es.ResultT, error) - - Client() *elasticsearch.Client -} - -type Action string - -func (a Action) Str() string { return string(a) } - -const ( - ActionCreate Action = "create" - ActionDelete = "delete" - ActionIndex = "index" - ActionUpdate = "update" - ActionRead = "read" - ActionSearch = "search" -) - -const kModBulk = "bulk" - -type respT struct { - idx int - err error - data interface{} -} - -type bulkT struct { - idx int - action Action - ch chan respT - data []byte - opts optionsT -} - -type Bulker struct { - es *elasticsearch.Client - ch chan bulkT -} - -const ( - rPrefix = "{\"docs\": [" - rSuffix = "]}" - - defaultFlushInterval = time.Second * 5 - defaultFlushThresholdCnt = 32768 - defaultFlushThresholdSz = 1024 * 1024 * 10 - defaultMaxPending = 32 -) - -func InitES(ctx context.Context, cfg *config.Config, opts ...BulkOpt) (*elasticsearch.Client, Bulk, error) { - - es, err := es.NewClient(ctx, cfg) - if err != nil { - return nil, nil, err - } - - opts = append(opts, WithFlushInterval(cfg.Output.Elasticsearch.BulkFlushInterval)) - - blk := NewBulker(es) - go func() { - err := blk.Run(ctx, opts...) - log.Info().Err(err).Msg("Bulker exit") - }() - - return es, blk, nil -} - -func NewBulker(es *elasticsearch.Client) *Bulker { - return &Bulker{ - es: es, - ch: make(chan bulkT), - } -} - -func (b *Bulker) Client() *elasticsearch.Client { - return b.es -} - -func (b *Bulker) parseBulkOpts(opts ...BulkOpt) bulkOptT { - bopt := bulkOptT{ - flushInterval: defaultFlushInterval, - flushThresholdCnt: defaultFlushThresholdCnt, - flushThresholdSz: defaultFlushThresholdSz, - maxPending: defaultMaxPending, - } - - for _, f := range opts { - f(&bopt) - } - - return bopt -} - -// Stop timer, but don't stall on channel. -// API doesn't not seem to work as specified. -func stopTimer(t *time.Timer) { - if !t.Stop() { - select { - case <-t.C: - default: - } - } -} - -type queueT struct { - action Action - queue []bulkT - pending int -} - -const ( - kQueueBulk = iota - kQueueRead - kQueueSearch - kQueueRefresh - kNumQueues -) - -func (b *Bulker) Run(ctx context.Context, opts ...BulkOpt) error { - var err error - - bopts := b.parseBulkOpts(opts...) - - // Create timer in stopped state - timer := time.NewTimer(bopts.flushInterval) - stopTimer(timer) - defer timer.Stop() - - w := semaphore.NewWeighted(int64(bopts.maxPending)) - - queues := make([]*queueT, 0, kNumQueues) - for i := 0; i < kNumQueues; i++ { - var action Action - switch i { - case kQueueRead: - action = ActionRead - case kQueueSearch: - action = ActionSearch - case kQueueBulk, kQueueRefresh: - // Empty action is correct - default: - // Bad programmer - panic("Unknown bulk queue") - } - - queues = append(queues, &queueT{ - action: action, - queue: make([]bulkT, 0, bopts.flushThresholdCnt), - }) - } - - var itemCnt int - var byteCnt int - - doFlush := func() error { - - for _, q := range queues { - if q.pending > 0 { - if err := b.flushQueue(ctx, w, q.queue, q.pending, q.action); err != nil { - return err - } - - q.pending = 0 - q.queue = make([]bulkT, 0, bopts.flushThresholdCnt) - } - } - - // Reset threshold counters - itemCnt = 0 - byteCnt = 0 - - stopTimer(timer) - return nil - } - -LOOP: - for err == nil { - - select { - - case item := <-b.ch: - - queueIdx := kQueueBulk - - switch item.action { - case ActionRead: - queueIdx = kQueueRead - case ActionSearch: - queueIdx = kQueueSearch - default: - if item.opts.Refresh { - queueIdx = kQueueRefresh - } - } - - q := queues[queueIdx] - q.queue = append(q.queue, item) - q.pending += len(item.data) - - // Update threshold counters - itemCnt += 1 - byteCnt += len(item.data) - - // Start timer on first queued item - if itemCnt == 1 { - timer.Reset(bopts.flushInterval) - } - - // Threshold test, short circuit timer on pending count - if itemCnt >= bopts.flushThresholdCnt || byteCnt >= bopts.flushThresholdSz { - log.Trace(). - Str("mod", kModBulk). - Int("itemCnt", itemCnt). - Int("byteCnt", byteCnt). - Msg("Flush on threshold") - - err = doFlush() - } - - case <-timer.C: - log.Trace(). - Str("mod", kModBulk). - Int("itemCnt", itemCnt). - Int("byteCnt", byteCnt). - Msg("Flush on timer") - err = doFlush() - - case <-ctx.Done(): - err = ctx.Err() - break LOOP - - } - - } - - return err -} - -func (b *Bulker) flushQueue(ctx context.Context, w *semaphore.Weighted, queue []bulkT, szPending int, action Action) error { - start := time.Now() - log.Trace(). - Str("mod", kModBulk). - Int("szPending", szPending). - Int("sz", len(queue)). - Str("action", action.Str()). - Msg("flushQueue Wait") - - if err := w.Acquire(ctx, 1); err != nil { - return err - } - - log.Trace(). - Str("mod", kModBulk). - Dur("tdiff", time.Since(start)). - Int("szPending", szPending). - Int("sz", len(queue)). - Str("action", action.Str()). - Msg("flushQueue Acquired") - - go func() { - start := time.Now() - - defer w.Release(1) - - var err error - switch action { - case ActionRead: - err = b.flushRead(ctx, queue, szPending) - case ActionSearch: - err = b.flushSearch(ctx, queue, szPending) - default: - err = b.flushBulk(ctx, queue, szPending) - } - - if err != nil { - failQueue(queue, err) - } - - log.Trace(). - Err(err). - Str("mod", kModBulk). - Int("szPending", szPending). - Int("sz", len(queue)). - Str("action", action.Str()). - Dur("rtt", time.Since(start)). - Msg("flushQueue Done") - - }() - - return nil -} - -func (b *Bulker) flushRead(ctx context.Context, queue []bulkT, szPending int) error { - start := time.Now() - - buf := bytes.NewBufferString(rPrefix) - buf.Grow(szPending + len(rSuffix)) - - // Each item a JSON array element followed by comma - for _, item := range queue { - buf.Write(item.data) - } - - // Need to strip the last element and append the suffix - payload := buf.Bytes() - payload = append(payload[:len(payload)-1], []byte(rSuffix)...) - - // Do actual bulk request; and send response on chan - req := esapi.MgetRequest{ - Body: bytes.NewReader(payload), - } - res, err := req.Do(ctx, b.es) - - if err != nil { - return err - } - - if res.Body != nil { - defer res.Body.Close() - } - - if res.IsError() { - return fmt.Errorf("flush: %s", res.String()) // TODO: Wrap error - } - - var blk MgetResponse - decoder := json.NewDecoder(res.Body) - if err := decoder.Decode(&blk); err != nil { - return fmt.Errorf("flush: error parsing response body: %s", err) // TODO: Wrap error - } - - log.Trace(). - Err(err). - Str("mod", kModBulk). - Dur("rtt", time.Since(start)). - Int("sz", len(blk.Items)). - Msg("flushRead") - - if len(blk.Items) != len(queue) { - return fmt.Errorf("Mget queue length mismatch") - } - - for i, item := range blk.Items { - citem := item - queue[i].ch <- respT{ - idx: queue[i].idx, - err: item.deriveError(), - data: &citem, - } - - } - - return nil -} - -func (b *Bulker) flushSearch(ctx context.Context, queue []bulkT, szPending int) error { - start := time.Now() - - buf := bytes.Buffer{} - buf.Grow(szPending) - - for _, item := range queue { - buf.Write(item.data) - } - - // Do actual bulk request; and send response on chan - req := esapi.MsearchRequest{ - Body: bytes.NewReader(buf.Bytes()), - } - res, err := req.Do(ctx, b.es) - - if err != nil { - return err - } - - if res.Body != nil { - defer res.Body.Close() - } - - if res.IsError() { - return fmt.Errorf("flush: %s", res.String()) // TODO: Wrap error - } - - var blk MsearchResponse - decoder := json.NewDecoder(res.Body) - if err := decoder.Decode(&blk); err != nil { - return fmt.Errorf("flush: error parsing response body: %s", err) // TODO: Wrap error - } - - log.Trace(). - Err(err). - Str("mod", kModBulk). - Dur("rtt", time.Since(start)). - Int("took", blk.Took). - Int("sz", len(blk.Responses)). - Msg("flushSearch") - - if len(blk.Responses) != len(queue) { - return fmt.Errorf("Bulk queue length mismatch") - } - - for i, response := range blk.Responses { - - cResponse := response - queue[i].ch <- respT{ - idx: queue[i].idx, - err: response.deriveError(), - data: &cResponse, - } - } - - return nil -} - -func (b *Bulker) flushBulk(ctx context.Context, queue []bulkT, szPending int) error { - - buf := bytes.Buffer{} - buf.Grow(szPending) - - doRefresh := "false" - for _, item := range queue { - buf.Write(item.data) - if item.opts.Refresh { - doRefresh = "true" - } - } - - // Do actual bulk request; and send response on chan - req := esapi.BulkRequest{ - Body: bytes.NewReader(buf.Bytes()), - Refresh: doRefresh, - } - res, err := req.Do(ctx, b.es) - - if err != nil { - log.Error().Err(err).Str("mod", kModBulk).Msg("Fail req.Do") - return err - } - - if res.Body != nil { - defer res.Body.Close() - } - - if res.IsError() { - log.Error().Str("mod", kModBulk).Str("err", res.String()).Msg("Fail result") - return fmt.Errorf("flush: %s", res.String()) // TODO: Wrap error - } - - var blk BulkIndexerResponse - decoder := json.NewDecoder(res.Body) - if err := decoder.Decode(&blk); err != nil { - log.Error().Err(err).Str("mod", kModBulk).Msg("Decode error") - return fmt.Errorf("flush: error parsing response body: %s", err) // TODO: Wrap error - } - - log.Trace(). - Err(err). - Bool("refresh", doRefresh == "true"). - Str("mod", kModBulk). - Int("took", blk.Took). - Bool("hasErrors", blk.HasErrors). - Int("sz", len(blk.Items)). - Msg("flushBulk") - - if len(blk.Items) != len(queue) { - return fmt.Errorf("Bulk queue length mismatch") - } - - for i, blkItem := range blk.Items { - - for _, item := range blkItem { - - select { - case queue[i].ch <- respT{ - idx: queue[i].idx, - err: item.deriveError(), - data: &item, - }: - default: - panic("Should not happen") - } - - break - } - } - - return nil -} - -func failQueue(queue []bulkT, err error) { - for _, i := range queue { - i.ch <- respT{ - idx: i.idx, - err: err, - } - } -} - -func (b *Bulker) parseOpts(opts ...Opt) optionsT { - var opt optionsT - for _, o := range opts { - o(&opt) - } - return opt -} - -func (b *Bulker) Create(ctx context.Context, index, id string, body []byte, opts ...Opt) (string, error) { - item, err := b.waitBulkAction(ctx, ActionCreate, index, id, body, opts...) - if err != nil { - return "", err - } - - return item.DocumentID, nil -} - -func (b *Bulker) Index(ctx context.Context, index, id string, body []byte, opts ...Opt) (string, error) { - item, err := b.waitBulkAction(ctx, ActionIndex, index, id, body, opts...) - if err != nil { - return "", err - } - return item.DocumentID, nil -} - -func (b *Bulker) Update(ctx context.Context, index, id string, body []byte, opts ...Opt) error { - _, err := b.waitBulkAction(ctx, ActionUpdate, index, id, body, opts...) - return err -} - -func (b *Bulker) waitBulkAction(ctx context.Context, action Action, index, id string, body []byte, opts ...Opt) (*BulkIndexerResponseItem, error) { - opt := b.parseOpts(opts...) - - // Serialize request - var buf bytes.Buffer - - const kSlop = 64 - buf.Grow(len(body) + kSlop) - - if err := b.writeBulkMeta(&buf, action, index, id); err != nil { - return nil, err - } - - if err := b.writeBulkBody(&buf, body); err != nil { - return nil, err - } - - // Dispatch and wait for response - resp := b.dispatch(ctx, action, opt, buf.Bytes()) - if resp.err != nil { - return nil, resp.err - } - - r := resp.data.(*BulkIndexerResponseItem) - return r, nil -} - -func (b *Bulker) Read(ctx context.Context, index, id string, opts ...Opt) ([]byte, error) { - opt := b.parseOpts(opts...) - - // Serialize request - var buf bytes.Buffer - - const kSlop = 64 - buf.Grow(kSlop) - - if err := b.writeMget(&buf, index, id); err != nil { - return nil, err - } - - // Process response - resp := b.dispatch(ctx, ActionRead, opt, buf.Bytes()) - if resp.err != nil { - return nil, resp.err - } - - // Interpret response, looking for generated id - r := resp.data.(*MgetResponseItem) - return r.Source, nil -} - -func (b *Bulker) Search(ctx context.Context, index []string, body []byte, opts ...Opt) (*es.ResultT, error) { - opt := b.parseOpts(opts...) - - // Serialize request - var buf bytes.Buffer - - const kSlop = 64 - buf.Grow(len(body) + kSlop) - - if err := b.writeMsearchMeta(&buf, index); err != nil { - return nil, err - } - - if err := b.writeMsearchBody(&buf, body); err != nil { - return nil, err - } - - // Process response - resp := b.dispatch(ctx, ActionSearch, opt, buf.Bytes()) - if resp.err != nil { - return nil, resp.err - } - - // Interpret response - r := resp.data.(*MsearchResponseItem) - return &es.ResultT{HitsT: r.Hits, Aggregations: r.Aggregations}, nil -} - -func (b *Bulker) writeMsearchMeta(buf *bytes.Buffer, indices []string) error { - if err := b.validateIndices(indices); err != nil { - return err - } - - switch len(indices) { - case 0: - buf.WriteString("{ }\n") - case 1: - buf.WriteString(`{"index": "`) - buf.WriteString(indices[0]) - buf.WriteString("\"}\n") - default: - buf.WriteString(`{"index": `) - if d, err := json.Marshal(indices); err != nil { - return err - } else { - buf.Write(d) - } - buf.WriteString("}\n") - } - - return nil -} - -func (b *Bulker) writeMsearchBody(buf *bytes.Buffer, body []byte) error { - buf.Write(body) - buf.WriteRune('\n') - - return b.validateBody(body) -} - -func (b *Bulker) validateIndex(index string) error { - // TODO: index - return nil -} - -func (b *Bulker) validateIndices(indices []string) error { - for _, i := range indices { - if err := b.validateIndex(i); err != nil { - return err - } - } - return nil -} - -func (b *Bulker) validateMeta(index, id string) error { - // TODO: validate id and index; not quotes anyhow - return nil -} - -// TODO: Fail on non-escaped line feeds -func (b *Bulker) validateBody(body []byte) error { - if !json.Valid(body) { - return es.ErrInvalidBody - } - - return nil -} - -func (b *Bulker) writeMget(buf *bytes.Buffer, index, id string) error { - if err := b.validateMeta(index, id); err != nil { - return err - } - - buf.WriteString(`{"_index":"`) - buf.WriteString(index) - buf.WriteString(`","_id":"`) - buf.WriteString(id) - buf.WriteString(`"},`) - return nil -} - -func (b *Bulker) writeBulkMeta(buf *bytes.Buffer, action Action, index, id string) error { - if err := b.validateMeta(index, id); err != nil { - return err - } - - buf.WriteString(`{"`) - buf.WriteString(action.Str()) - buf.WriteString(`":{`) - if id != "" { - buf.WriteString(`"_id":"`) - buf.WriteString(id) - buf.WriteString(`",`) - } - - buf.WriteString(`"_index":"`) - buf.WriteString(index) - buf.WriteString("\"}}\n") - return nil -} - -func (b *Bulker) writeBulkBody(buf *bytes.Buffer, body []byte) error { - if body == nil { - return nil - } - - buf.Write(body) - buf.WriteRune('\n') - - return b.validateBody(body) -} - -func (b *Bulker) dispatch(ctx context.Context, action Action, opts optionsT, data []byte) respT { - start := time.Now() - - ch := make(chan respT, 1) - - item := bulkT{ - 0, - action, - ch, - data, - opts, - } - - // Dispatch to bulk Run loop - select { - case b.ch <- item: - case <-ctx.Done(): - log.Error(). - Err(ctx.Err()). - Str("mod", kModBulk). - Str("action", action.Str()). - Bool("refresh", opts.Refresh). - Dur("rtt", time.Since(start)). - Msg("Dispatch abort queue") - return respT{err: ctx.Err()} - } - - // Wait for response - select { - case resp := <-ch: - log.Trace(). - Str("mod", kModBulk). - Str("action", action.Str()). - Bool("refresh", opts.Refresh). - Dur("rtt", time.Since(start)). - Msg("Dispatch OK") - - return resp - case <-ctx.Done(): - log.Error(). - Err(ctx.Err()). - Str("mod", kModBulk). - Str("action", action.Str()). - Bool("refresh", opts.Refresh). - Dur("rtt", time.Since(start)). - Msg("Dispatch abort response") - } - - return respT{err: ctx.Err()} -} diff --git a/internal/pkg/bulk/bulk_integration_test.go b/internal/pkg/bulk/bulk_integration_test.go new file mode 100644 index 000000000..353da82f7 --- /dev/null +++ b/internal/pkg/bulk/bulk_integration_test.go @@ -0,0 +1,439 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build integration +// +build integration + +package bulk + +import ( + "context" + "encoding/json" + "fmt" + "strconv" + "sync" + "testing" + + "github.com/elastic/fleet-server/v7/internal/pkg/es" + + "github.com/google/go-cmp/cmp" + "github.com/rs/zerolog/log" +) + +func TestBulkCreate(t *testing.T) { + ctx, cn := context.WithCancel(context.Background()) + defer cn() + + index, bulker := SetupIndexWithBulk(ctx, t, testPolicy, WithFlushThresholdCount(1)) + + tests := []struct { + Name string + Index string + Id string + Err error + }{ + { + Name: "Empty Id", + Index: index, + }, + { + Name: "Simple Id", + Index: index, + Id: "elastic", + }, + { + Name: "Single quoted Id", + Index: index, + Id: `'singlequotes'`, + }, + { + Name: "Double quoted Id", + Index: index, + Id: `"doublequotes"`, + Err: ErrNoQuotes, + }, + { + Name: "Empty Index", + Index: "", + Err: es.ErrElastic{ + Status: 500, + Type: "string_index_out_of_bounds_exception", + }, + }, + { + Name: "Unicode Index 豆腐", + Index: string([]byte{0xe8, 0xb1, 0x86, 0xe8, 0x85, 0x90}), + }, + { + Name: "Invalid utf-8", + Index: string([]byte{0xfe, 0xfe, 0xff, 0xff}), + Err: es.ErrElastic{ + Status: 400, + Type: "json_parse_exception", + }, + }, + { + Name: "Malformed Index Uppercase", + Index: "UPPERCASE", + Err: es.ErrElastic{ + Status: 400, + Type: "invalid_index_name_exception", + }, + }, + { + Name: "Malformed Index underscore", + Index: "_nope", + Err: es.ErrElastic{ + Status: 400, + Type: "invalid_index_name_exception", + }, + }, + } + + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + + sample := NewRandomSample() + sampleData := sample.marshal(t) + + // Create + id, err := bulker.Create(ctx, test.Index, test.Id, sampleData) + if !EqualElastic(test.Err, err) { + t.Fatal(err) + } + if err != nil { + return + } + + if test.Id != "" && id != test.Id { + t.Error("Expected specified id") + } else if id == "" { + t.Error("Expected non-empty id") + } + + // Read + var dst testT + dst.read(t, bulker, ctx, test.Index, id) + diff := cmp.Diff(sample, dst) + if diff != "" { + t.Fatal(diff) + } + }) + } +} + +func TestBulkCreateBody(t *testing.T) { + ctx, cn := context.WithCancel(context.Background()) + defer cn() + + index, bulker := SetupIndexWithBulk(ctx, t, testPolicy, WithFlushThresholdCount(1)) + + tests := []struct { + Name string + Body []byte + Err error + }{ + { + "Empty Body", + nil, + nil, + }, + { + "Malformed Body", + []byte("{nope}"), + es.ErrInvalidBody, + }, + { + "Overflow", + []byte(`{"overflow": 99999999999999999999}`), + es.ErrElastic{ + Status: 400, + Type: "mapper_parsing_exception", + }, + }, + { + "Invalid utf-8", + []byte{0x7b, 0x22, 0x6f, 0x6b, 0x22, 0x3a, 0x22, 0xfe, 0xfe, 0xff, 0xff, 0x22, 0x7d}, // {"ok":"${BADUTF8}"} + es.ErrElastic{ + Status: 400, + Type: "mapper_parsing_exception", + }, + }, + } + + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + + _, err := bulker.Create(ctx, index, "", test.Body) + if !EqualElastic(test.Err, err) { + t.Fatal(err) + } + if err != nil { + return + } + }) + } +} + +func TestBulkIndex(t *testing.T) { + ctx, cn := context.WithCancel(context.Background()) + defer cn() + + index, bulker := SetupIndexWithBulk(ctx, t, testPolicy, WithFlushThresholdCount(1)) + + sample := NewRandomSample() + + // Index + id, err := bulker.Index(ctx, index, "", sample.marshal(t)) + if err != nil { + t.Fatal(err) + } + + // Read + var dst testT + dst.read(t, bulker, ctx, index, id) + diff := cmp.Diff(sample, dst) + if diff != "" { + t.Fatal(diff) + } +} + +func TestBulkUpdate(t *testing.T) { + ctx, cn := context.WithCancel(context.Background()) + defer cn() + + index, bulker := SetupIndexWithBulk(ctx, t, testPolicy) + + sample := NewRandomSample() + + // Create + id, err := bulker.Create(ctx, index, "", sample.marshal(t)) + if err != nil { + t.Fatal(err) + } + + // Update + nVal := "funkycoldmedina" + fields := UpdateFields{"kwval": nVal} + data, err := fields.Marshal() + if err != nil { + t.Fatal(err) + } + + err = bulker.Update(ctx, index, id, data, WithRefresh()) + if err != nil { + t.Fatal(err) + } + + // Read again, validate update + var dst2 testT + dst2.read(t, bulker, ctx, index, id) + + sample.KWVal = nVal + diff := cmp.Diff(sample, dst2) + if diff != "" { + t.Fatal(diff) + } +} + +func TestBulkSearch(t *testing.T) { + ctx, cn := context.WithCancel(context.Background()) + defer cn() + + index, bulker := SetupIndexWithBulk(ctx, t, testPolicy) + + sample := NewRandomSample() + + // Create + _, err := bulker.Create(ctx, index, "", sample.marshal(t), WithRefresh()) + if err != nil { + t.Fatal(err) + } + + // Search + dsl := fmt.Sprintf(`{"query": { "term": {"kwval": "%s"}}}`, sample.KWVal) + + res, err := bulker.Search(ctx, index, []byte(dsl)) + + if err != nil { + t.Fatal(err) + } + + if res == nil { + t.Fatal(nil) + } + + if len(res.Hits) != 1 { + t.Fatal(fmt.Sprintf("hit mismatch: %d", len(res.Hits))) + } + + var dst3 testT + if err = json.Unmarshal(res.Hits[0].Source, &dst3); err != nil { + t.Fatal(err) + } + + diff := cmp.Diff(sample, dst3) + if diff != "" { + t.Fatal(diff) + } +} + +func TestBulkDelete(t *testing.T) { + ctx, cn := context.WithCancel(context.Background()) + defer cn() + + index, bulker := SetupIndexWithBulk(ctx, t, testPolicy) + + sample := NewRandomSample() + + // Create + id, err := bulker.Create(ctx, index, "", sample.marshal(t)) + if err != nil { + t.Fatal(err) + } + + // Delete + err = bulker.Delete(ctx, index, id) + if err != nil { + t.Fatal(err) + } + + data, err := bulker.Read(ctx, index, id) + if err != es.ErrElasticNotFound || data != nil { + t.Fatal(err) + } + + // Attempt to delete again, should not be found + err = bulker.Delete(ctx, index, id) + if e, ok := err.(*es.ErrElastic); !ok || e.Status != 404 { + t.Fatal(err) + } +} + +// This runs a series of CRUD operations through elastic. +// Not a particularly useful benchmark, but gives some idea of memory overhead. + +func benchmarkCreate(n int, b *testing.B) { + b.ReportAllocs() + defer (QuietLogger())() + + ctx, cn := context.WithCancel(context.Background()) + defer cn() + + index, bulker := SetupIndexWithBulk(ctx, b, testPolicy, WithFlushThresholdCount(n)) + + var wait sync.WaitGroup + wait.Add(n) + for i := 0; i < n; i++ { + + go func() { + defer wait.Done() + + sample := NewRandomSample() + sampleData := sample.marshal(b) + + for j := 0; j < b.N; j++ { + + // Create + _, err := bulker.Create(ctx, index, "", sampleData) + if err != nil { + b.Fatal(err) + } + } + }() + } + + wait.Wait() +} + +func BenchmarkCreate(b *testing.B) { + + benchmarks := []int{1, 64, 8192, 16384, 32768, 65536} + + for _, n := range benchmarks { + + bindFunc := func(n int) func(b *testing.B) { + return func(b *testing.B) { + benchmarkCreate(n, b) + } + } + b.Run(strconv.Itoa(n), bindFunc(n)) + } +} + +// This runs a series of CRUD operations through elastic. +// Not a particularly useful benchmark, but gives some idea of memory overhead. + +func benchmarkCRUD(n int, b *testing.B) { + b.ReportAllocs() + defer (QuietLogger())() + + ctx, cn := context.WithCancel(context.Background()) + defer cn() + + index, bulker := SetupIndexWithBulk(ctx, b, testPolicy, WithFlushThresholdCount(n)) + + fieldUpdate := UpdateFields{"kwval": "funkycoldmedina"} + fieldData, err := fieldUpdate.Marshal() + if err != nil { + b.Fatal(err) + } + + var wait sync.WaitGroup + wait.Add(n) + for i := 0; i < n; i++ { + + go func() { + defer wait.Done() + + sample := NewRandomSample() + sampleData := sample.marshal(b) + + for j := 0; j < b.N; j++ { + + // Create + id, err := bulker.Create(ctx, index, "", sampleData) + if err != nil { + b.Fatal(err) + } + + // Read + _, err = bulker.Read(ctx, index, id) + if err != nil { + b.Fatal(err) + } + + // Update + err = bulker.Update(ctx, index, id, fieldData) + if err != nil { + b.Fatal(err) + } + + // Delete + err = bulker.Delete(ctx, index, id) + if err != nil { + log.Info().Str("index", index).Str("id", id).Msg("dlete fail") + b.Fatal(err) + } + } + }() + } + + wait.Wait() +} + +func BenchmarkCRUD(b *testing.B) { + + benchmarks := []int{1, 64, 8192, 16384, 32768, 65536} + + for _, n := range benchmarks { + + bindFunc := func(n int) func(b *testing.B) { + return func(b *testing.B) { + benchmarkCRUD(n, b) + } + } + b.Run(strconv.Itoa(n), bindFunc(n)) + } +} diff --git a/internal/pkg/bulk/bulk_test.go b/internal/pkg/bulk/bulk_test.go new file mode 100644 index 000000000..e2977605b --- /dev/null +++ b/internal/pkg/bulk/bulk_test.go @@ -0,0 +1,381 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package bulk + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "io/ioutil" + "net/http" + "strconv" + "sync" + "testing" + "time" + + "github.com/rs/zerolog/log" +) + +// TODO: +// WithREfresh() options +// Delete not found? + +type stubTransport struct { + cb func(*http.Request) (*http.Response, error) +} + +func (s *stubTransport) Perform(req *http.Request) (*http.Response, error) { + return s.cb(req) +} + +type mockBulkTransport struct { + b *testing.B +} + +func (m *mockBulkTransport) Perform(req *http.Request) (*http.Response, error) { + + type mockFrameT struct { + Index json.RawMessage `json:"index,omitempty"` + Delete json.RawMessage `json:"delete,omitempty"` + Create json.RawMessage `json:"create,omitempty"` + Update json.RawMessage `json:"update,omitempty"` + } + + type mockEmptyT struct { + } + + mockResponse := []byte(`{"index":{"_index":"test","_type":"_doc","_id":"1","_version":1,"result":"created","_shards":{"total":2,"successful":1,"failed":0},"status":201,"_seq_no":0,"_primary_term":1}},`) + + var body bytes.Buffer + + // Write framing + body.WriteString(`{"items": [`) + + cnt := 0 + + skip := false + decoder := json.NewDecoder(req.Body) + for decoder.More() { + if skip { + skip = false + var e mockEmptyT + if err := decoder.Decode(&e); err != nil { + return nil, err + } + } else { + var frame mockFrameT + if err := decoder.Decode(&frame); err != nil { + return nil, err + } + + // Which op + switch { + case frame.Index != nil: + skip = true + case frame.Delete != nil: + case frame.Create != nil: + skip = true + case frame.Update != nil: + skip = true + default: + return nil, errors.New("Unknown op") + } + + // write mocked response + _, err := body.Write(mockResponse) + + if err != nil { + return nil, err + } + + cnt += 1 + } + } + + if cnt > 0 { + body.Truncate(body.Len() - 1) + } + + // Write trailer + body.WriteString(`], "took": 1, "errors": false}`) + + resp := &http.Response{ + Request: req, + StatusCode: 200, + Status: "200 OK", + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Body: ioutil.NopCloser(&body), + } + + return resp, nil +} + +// API should exit quickly if cancelled. +// Note: In the real world, the transaction may already be in flight, +// cancelling a call does not mean the transaction did not occur. +func TestCancelCtx(t *testing.T) { + + // create a bulker, but don't bother running it + bulker := NewBulker(nil) + + tests := []struct { + name string + test func(t *testing.T, ctx context.Context) + }{ + { + "create", + func(t *testing.T, ctx context.Context) { + id, err := bulker.Create(ctx, "testidx", "", []byte(`{"hey":"now"}`)) + + if id != "" { + t.Error("Expected empty id on context cancel:", id) + } + + if err != context.Canceled { + t.Error("Expected context cancel err: ", err) + } + }, + }, + { + "read", + func(t *testing.T, ctx context.Context) { + data, err := bulker.Read(ctx, "testidx", "11") + + if data != nil { + t.Error("Expected empty data on context cancel:", data) + } + + if err != context.Canceled { + t.Error("Expected context cancel err: ", err) + } + }, + }, + { + "update", + func(t *testing.T, ctx context.Context) { + err := bulker.Update(ctx, "testidx", "11", []byte(`{"now":"hey"}`)) + + if err != context.Canceled { + t.Error("Expected context cancel err: ", err) + } + }, + }, + { + "delete", + func(t *testing.T, ctx context.Context) { + err := bulker.Delete(ctx, "testidx", "11") + + if err != context.Canceled { + t.Error("Expected context cancel err: ", err) + } + }, + }, + { + "index", + func(t *testing.T, ctx context.Context) { + id, err := bulker.Index(ctx, "testidx", "", []byte(`{"hey":"now"}`)) + + if id != "" { + t.Error("Expected empty id on context cancel:", id) + } + + if err != context.Canceled { + t.Error("Expected context cancel err: ", err) + } + }, + }, + { + "search", + func(t *testing.T, ctx context.Context) { + res, err := bulker.Search(ctx, "testidx", []byte(`{"hey":"now"}`)) + + if res != nil { + t.Error("Expected empty result on context cancel:", res) + } + + if err != context.Canceled { + t.Error("Expected context cancel err: ", err) + } + }, + }, + { + "mcreate", + func(t *testing.T, ctx context.Context) { + res, err := bulker.MCreate(ctx, []MultiOp{{Index: "testidx", Body: []byte(`{"hey":"now"}`)}}) + + if res != nil { + t.Error("Expected empty result on context cancel:", res) + } + + if err != context.Canceled { + t.Error("Expected context cancel err: ", err) + } + }, + }, + { + "mindex", + func(t *testing.T, ctx context.Context) { + res, err := bulker.MIndex(ctx, []MultiOp{{Index: "testidx", Body: []byte(`{"hey":"now"}`)}}) + + if res != nil { + t.Error("Expected empty result on context cancel:", res) + } + + if err != context.Canceled { + t.Error("Expected context cancel err: ", err) + } + }, + }, + { + "mupdate", + func(t *testing.T, ctx context.Context) { + res, err := bulker.MUpdate(ctx, []MultiOp{{Index: "testidx", Id: "umm", Body: []byte(`{"hey":"now"}`)}}) + + if res != nil { + t.Error("Expected empty result on context cancel:", res) + } + + if err != context.Canceled { + t.Error("Expected context cancel err: ", err) + } + }, + }, + { + "mdelete", + func(t *testing.T, ctx context.Context) { + res, err := bulker.MDelete(ctx, []MultiOp{{Index: "testidx", Id: "myid"}}) + + if res != nil { + t.Error("Expected empty result on context cancel:", res) + } + + if err != context.Canceled { + t.Error("Expected context cancel err: ", err) + } + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + + ctx, cancelF := context.WithCancel(context.Background()) + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + + test.test(t, ctx) + }() + + time.Sleep(time.Millisecond) + cancelF() + + wg.Wait() + }) + } +} + +func benchmarkMockBulk(b *testing.B, samples [][]byte) { + b.ReportAllocs() + defer (QuietLogger())() + + mock := &mockBulkTransport{} + + ctx, cancelF := context.WithCancel(context.Background()) + defer cancelF() + + n := len(samples) + bulker := NewBulker(mock, WithFlushThresholdCount(n)) + + var waitBulker sync.WaitGroup + waitBulker.Add(1) + go func() { + defer waitBulker.Done() + if err := bulker.Run(ctx); err != context.Canceled { + b.Error(err) + } + }() + + fieldUpdate := UpdateFields{"kwval": "funkycoldmedina"} + fieldData, err := fieldUpdate.Marshal() + if err != nil { + b.Fatal(err) + } + + index := "fakeIndex" + + var wait sync.WaitGroup + wait.Add(n) + for i := 0; i < n; i++ { + + go func(sampleData []byte) { + defer wait.Done() + + for j := 0; j < b.N; j++ { + // Create + id, err := bulker.Create(ctx, index, "", sampleData) + if err != nil { + b.Error(err) + } + // Index + _, err = bulker.Index(ctx, index, id, sampleData) + if err != nil { + b.Error(err) + } + + // Update + err = bulker.Update(ctx, index, id, fieldData) + if err != nil { + b.Error(err) + } + + // Delete + err = bulker.Delete(ctx, index, id) + if err != nil { + log.Info().Str("index", index).Str("id", id).Msg("delete fail") + b.Error(err) + } + } + }(samples[i]) + } + + wait.Wait() + cancelF() + waitBulker.Wait() +} + +func BenchmarkMockBulk(b *testing.B) { + + benchmarks := []int{1, 8, 64, 4096, 32768} + + // Create the samples outside the loop to avoid accounting + max := 0 + for _, v := range benchmarks { + if max < v { + max = v + } + } + + samples := make([][]byte, 0, max) + for i := 0; i < max; i++ { + s := NewRandomSample() + samples = append(samples, s.marshal(b)) + } + + for _, n := range benchmarks { + + bindFunc := func(n int) func(b *testing.B) { + return func(b *testing.B) { + benchmarkMockBulk(b, samples[:n]) + } + } + b.Run(strconv.Itoa(n), bindFunc(n)) + } +} diff --git a/internal/pkg/bulk/engine.go b/internal/pkg/bulk/engine.go new file mode 100644 index 000000000..c574ab460 --- /dev/null +++ b/internal/pkg/bulk/engine.go @@ -0,0 +1,403 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package bulk + +import ( + "context" + "encoding/json" + "errors" + "strings" + "sync" + "time" + + "github.com/elastic/fleet-server/v7/internal/pkg/apikey" + "github.com/elastic/fleet-server/v7/internal/pkg/es" + + "github.com/elastic/go-elasticsearch/v7" + "github.com/elastic/go-elasticsearch/v7/esapi" + "github.com/rs/zerolog/log" + "golang.org/x/sync/semaphore" +) + +type ApiKey = apikey.ApiKey +type SecurityInfo = apikey.SecurityInfo +type ApiKeyMetadata = apikey.ApiKeyMetadata + +var ( + ErrNoQuotes = errors.New("quoted literal not supported") +) + +type MultiOp struct { + Id string + Index string + Body []byte +} + +type Bulk interface { + + // Synchronous operations run in the bulk engine + Create(ctx context.Context, index, id string, body []byte, opts ...Opt) (string, error) + Read(ctx context.Context, index, id string, opts ...Opt) ([]byte, error) + Update(ctx context.Context, index, id string, body []byte, opts ...Opt) error + Delete(ctx context.Context, index, id string, opts ...Opt) error + Index(ctx context.Context, index, id string, body []byte, opts ...Opt) (string, error) + Search(ctx context.Context, index string, body []byte, opts ...Opt) (*es.ResultT, error) + + // Multi Operation API's run in the bulk engine + MCreate(ctx context.Context, ops []MultiOp, opts ...Opt) ([]BulkIndexerResponseItem, error) + MIndex(ctx context.Context, ops []MultiOp, opts ...Opt) ([]BulkIndexerResponseItem, error) + MUpdate(ctx context.Context, ops []MultiOp, opts ...Opt) ([]BulkIndexerResponseItem, error) + MDelete(ctx context.Context, ops []MultiOp, opts ...Opt) ([]BulkIndexerResponseItem, error) + + // APIKey operations + ApiKeyCreate(ctx context.Context, name, ttl string, roles []byte, meta interface{}) (*ApiKey, error) + ApiKeyRead(ctx context.Context, id string) (*ApiKeyMetadata, error) + ApiKeyAuth(ctx context.Context, key ApiKey) (*SecurityInfo, error) + ApiKeyInvalidate(ctx context.Context, ids ...string) error + + // Accessor used to talk to elastic search direcly bypassing bulk engine + Client() *elasticsearch.Client +} + +const kModBulk = "bulk" + +type Bulker struct { + es esapi.Transport + ch chan *bulkT + opts bulkOptT + blkPool sync.Pool + apikeyLimit *semaphore.Weighted +} + +const ( + defaultFlushInterval = time.Second * 5 + defaultFlushThresholdCnt = 32768 + defaultFlushThresholdSz = 1024 * 1024 * 10 + defaultMaxPending = 32 + defaultBlockQueueSz = 32 // Small capacity to allow multiOp to spin fast + defaultApiKeyMaxParallel = 32 +) + +func NewBulker(es esapi.Transport, opts ...BulkOpt) *Bulker { + + bopts := parseBulkOpts(opts...) + + poolFunc := func() interface{} { + return &bulkT{ch: make(chan respT, 1)} + } + + return &Bulker{ + opts: bopts, + es: es, + ch: make(chan *bulkT, bopts.blockQueueSz), + blkPool: sync.Pool{New: poolFunc}, + apikeyLimit: semaphore.NewWeighted(int64(bopts.apikeyMaxParallel)), + } +} + +func (b *Bulker) Client() *elasticsearch.Client { + client, ok := b.es.(*elasticsearch.Client) + if !ok { + panic("Client is not an elastic search pointer") + } + return client +} + +// Stop timer, but don't stall on channel. +// API doesn't not seem to work as specified. +func stopTimer(t *time.Timer) { + if !t.Stop() { + select { + case <-t.C: + default: + } + } +} + +func blkToQueueType(blk *bulkT) queueType { + queueIdx := kQueueBulk + + forceRefresh := blk.flags.Has(flagRefresh) + + switch blk.action { + case ActionSearch: + queueIdx = kQueueSearch + case ActionRead: + if forceRefresh { + queueIdx = kQueueRefreshRead + } else { + queueIdx = kQueueRead + } + default: + if forceRefresh { + queueIdx = kQueueRefreshBulk + } + } + + return queueIdx +} + +func (b *Bulker) Run(ctx context.Context) error { + var err error + + log.Info().Interface("opts", &b.opts).Msg("Run bulker with options") + + // Create timer in stopped state + timer := time.NewTimer(b.opts.flushInterval) + stopTimer(timer) + defer timer.Stop() + + w := semaphore.NewWeighted(int64(b.opts.maxPending)) + + var queues [kNumQueues]queueT + + var i queueType + for ; i < kNumQueues; i++ { + queues[i].ty = i + } + + var itemCnt int + var byteCnt int + + doFlush := func() error { + + for i := range queues { + q := &queues[i] + if q.pending > 0 { + + // Pass queue structure by value + if err := b.flushQueue(ctx, w, *q); err != nil { + return err + } + + // Reset local queue stored in array + q.cnt = 0 + q.head = nil + q.pending = 0 + } + } + + // Reset threshold counters + itemCnt = 0 + byteCnt = 0 + + return nil + } + + for err == nil { + + select { + + case blk := <-b.ch: + + queueIdx := blkToQueueType(blk) + q := &queues[queueIdx] + + // Prepend block to head of target queue + blk.next = q.head + q.head = blk + + // Update pending count on target queue + q.cnt += 1 + q.pending += blk.buf.Len() + + // Update threshold counters + itemCnt += 1 + byteCnt += blk.buf.Len() + + // Start timer on first queued item + if itemCnt == 1 { + timer.Reset(b.opts.flushInterval) + } + + // Threshold test, short circuit timer on pending count + if itemCnt >= b.opts.flushThresholdCnt || byteCnt >= b.opts.flushThresholdSz { + log.Trace(). + Str("mod", kModBulk). + Int("itemCnt", itemCnt). + Int("byteCnt", byteCnt). + Msg("Flush on threshold") + + err = doFlush() + + stopTimer(timer) + } + + case <-timer.C: + log.Trace(). + Str("mod", kModBulk). + Int("itemCnt", itemCnt). + Int("byteCnt", byteCnt). + Msg("Flush on timer") + err = doFlush() + + case <-ctx.Done(): + err = ctx.Err() + } + + } + + return err +} + +func (b *Bulker) flushQueue(ctx context.Context, w *semaphore.Weighted, queue queueT) error { + start := time.Now() + log.Trace(). + Str("mod", kModBulk). + Int("cnt", queue.cnt). + Int("szPending", queue.pending). + Str("queue", queue.Type()). + Msg("flushQueue Wait") + + if err := w.Acquire(ctx, 1); err != nil { + return err + } + + log.Trace(). + Str("mod", kModBulk). + Int("cnt", queue.cnt). + Dur("tdiff", time.Since(start)). + Int("szPending", queue.pending). + Str("queue", queue.Type()). + Msg("flushQueue Acquired") + + go func() { + start := time.Now() + + defer w.Release(1) + + var err error + switch queue.ty { + case kQueueRead, kQueueRefreshRead: + err = b.flushRead(ctx, queue) + case kQueueSearch: + err = b.flushSearch(ctx, queue) + default: + err = b.flushBulk(ctx, queue) + } + + if err != nil { + failQueue(queue, err) + } + + log.Trace(). + Err(err). + Str("mod", kModBulk). + Int("cnt", queue.cnt). + Int("szPending", queue.pending). + Str("queue", queue.Type()). + Dur("rtt", time.Since(start)). + Msg("flushQueue Done") + + }() + + return nil +} + +func failQueue(queue queueT, err error) { + for n := queue.head; n != nil; { + next := n.next // 'n' is invalid immediately on channel send + n.ch <- respT{ + err: err, + } + n = next + } +} + +func (b *Bulker) parseOpts(opts ...Opt) optionsT { + var opt optionsT + for _, o := range opts { + o(&opt) + } + return opt +} + +func (b *Bulker) newBlk(action actionT, opts optionsT) *bulkT { + blk := b.blkPool.Get().(*bulkT) + blk.action = action + if opts.Refresh { + blk.flags.Set(flagRefresh) + } + return blk +} + +func (b *Bulker) freeBlk(blk *bulkT) { + blk.reset() + b.blkPool.Put(blk) +} + +func (b *Bulker) validateIndex(index string) error { + // TODO: index + return nil +} + +func (b *Bulker) validateIndices(indices []string) error { + for _, i := range indices { + if err := b.validateIndex(i); err != nil { + return err + } + } + return nil +} + +func (b *Bulker) validateMeta(index, id string) error { + + // Quotes on id are legal, but weird. Disallow for now. + if strings.IndexByte(index, '"') != -1 || strings.IndexByte(id, '"') != -1 { + return ErrNoQuotes + } + return nil +} + +// TODO: Fail on non-escaped line feeds +func (b *Bulker) validateBody(body []byte) error { + if !json.Valid(body) { + return es.ErrInvalidBody + } + + return nil +} + +func (b *Bulker) dispatch(ctx context.Context, blk *bulkT) respT { + start := time.Now() + + // Dispatch to bulk Run loop + select { + case b.ch <- blk: + case <-ctx.Done(): + log.Error(). + Err(ctx.Err()). + Str("mod", kModBulk). + Str("action", blk.action.String()). + Bool("refresh", blk.flags.Has(flagRefresh)). + Dur("rtt", time.Since(start)). + Msg("Dispatch abort queue") + return respT{err: ctx.Err()} + } + + // Wait for response + select { + case resp := <-blk.ch: + log.Trace(). + Err(resp.err). + Str("mod", kModBulk). + Str("action", blk.action.String()). + Bool("refresh", blk.flags.Has(flagRefresh)). + Dur("rtt", time.Since(start)). + Msg("Dispatch OK") + + return resp + case <-ctx.Done(): + log.Error(). + Err(ctx.Err()). + Str("mod", kModBulk). + Str("action", blk.action.String()). + Bool("refresh", blk.flags.Has(flagRefresh)). + Dur("rtt", time.Since(start)). + Msg("Dispatch abort response") + } + + return respT{err: ctx.Err()} +} diff --git a/internal/pkg/bulk/helpers.go b/internal/pkg/bulk/helpers.go new file mode 100644 index 000000000..d4ad7ef35 --- /dev/null +++ b/internal/pkg/bulk/helpers.go @@ -0,0 +1,51 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package bulk + +import ( + "encoding/json" + "io/ioutil" + + "github.com/elastic/fleet-server/v7/internal/pkg/es" + "github.com/elastic/go-elasticsearch/v7/esapi" + "github.com/rs/zerolog/log" +) + +type UpdateFields map[string]interface{} + +func (u UpdateFields) Marshal() ([]byte, error) { + doc := struct { + Doc map[string]interface{} `json:"doc"` + }{ + u, + } + + return json.Marshal(doc) +} + +// Attempt to interpret the response as an elastic error, +// otherwise return generic elastic error. +func parseError(res *esapi.Response) error { + + var e struct { + Err *es.ErrorT `json:"error"` + } + + decoder := json.NewDecoder(res.Body) + + if err := decoder.Decode(&e); err != nil { + log.Error().Err(err).Msg("Cannot decode Elasticsearch error body") + bodyBytes, readErr := ioutil.ReadAll(res.Body) + if readErr != nil { + log.Debug().Err(readErr).Msg("Error reading error response body from Elasticsearch") + } else { + log.Debug().Err(err).Bytes("body", bodyBytes).Msg("Error content") + } + + return err + } + + return es.TranslateError(res.StatusCode, e.Err) +} diff --git a/internal/pkg/bulk/multi.go b/internal/pkg/bulk/multi.go deleted file mode 100644 index c484f1ba9..000000000 --- a/internal/pkg/bulk/multi.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package bulk - -import ( - "bytes" - "context" - - "github.com/rs/zerolog/log" -) - -func (b *Bulker) MUpdate(ctx context.Context, ops []BulkOp, opts ...Opt) error { - _, err := b.multiWaitBulkAction(ctx, ActionUpdate, ops) - return err -} - -func (b *Bulker) multiWaitBulkAction(ctx context.Context, action Action, ops []BulkOp, opts ...Opt) ([]BulkIndexerResponseItem, error) { - opt := b.parseOpts(opts...) - - // Serialize requests - nops := make([]BulkOp, 0, len(ops)) - for _, op := range ops { - - // Prealloc buffer - const kSlop = 64 - var buf bytes.Buffer - buf.Grow(len(op.Body) + kSlop) - - if err := b.writeBulkMeta(&buf, action, op.Index, op.Id); err != nil { - return nil, err - } - - if err := b.writeBulkBody(&buf, op.Body); err != nil { - return nil, err - } - - nops = append(nops, BulkOp{ - Id: op.Id, - Index: op.Index, - Body: buf.Bytes(), - }) - } - - // Dispatch and wait for response - resps, err := b.multiDispatch(ctx, action, opt, nops) - if err != nil { - return nil, err - } - - items := make([]BulkIndexerResponseItem, len(resps)) - for i, r := range resps { - if r.err != nil { - // TODO: well this is not great; handle this better - log.Error().Err(r.err).Msg("Fail muliDispatch") - return nil, r.err - } - items[i] = *r.data.(*BulkIndexerResponseItem) - } - - return items, nil -} - -func (b *Bulker) multiDispatch(ctx context.Context, action Action, opts optionsT, ops []BulkOp) ([]respT, error) { - var err error - - ch := make(chan respT, len(ops)) - - for i, op := range ops { - item := bulkT{ - i, - action, - ch, - op.Body, - opts, - } - - // Dispatch to bulk Run loop - select { - case b.ch <- item: - case <-ctx.Done(): - return nil, ctx.Err() - } - } - - // Wait for response - responses := make([]respT, 0, len(ops)) - -LOOP: - for len(responses) < len(ops) { - select { - case resp := <-ch: - responses = append(responses, resp) - case <-ctx.Done(): - err = ctx.Err() - responses = nil - break LOOP - } - } - - return responses, err -} diff --git a/internal/pkg/bulk/opApiKey.go b/internal/pkg/bulk/opApiKey.go new file mode 100644 index 000000000..190cbe8bb --- /dev/null +++ b/internal/pkg/bulk/opApiKey.go @@ -0,0 +1,51 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package bulk + +import ( + "context" + + "github.com/elastic/fleet-server/v7/internal/pkg/apikey" +) + +// The ApiKey API's are not yet bulk enabled. Stub the calls in the bulker +// and limit parallel access to prevent many requests from overloading +// the connection pool in the elastic search client. + +func (b *Bulker) ApiKeyAuth(ctx context.Context, key ApiKey) (*SecurityInfo, error) { + if err := b.apikeyLimit.Acquire(ctx, 1); err != nil { + return nil, err + } + defer b.apikeyLimit.Release(1) + + return key.Authenticate(ctx, b.Client()) +} + +func (b *Bulker) ApiKeyCreate(ctx context.Context, name, ttl string, roles []byte, meta interface{}) (*ApiKey, error) { + if err := b.apikeyLimit.Acquire(ctx, 1); err != nil { + return nil, err + } + defer b.apikeyLimit.Release(1) + + return apikey.Create(ctx, b.Client(), name, ttl, "false", roles, meta) +} + +func (b *Bulker) ApiKeyRead(ctx context.Context, id string) (*ApiKeyMetadata, error) { + if err := b.apikeyLimit.Acquire(ctx, 1); err != nil { + return nil, err + } + defer b.apikeyLimit.Release(1) + + return apikey.Read(ctx, b.Client(), id) +} + +func (b *Bulker) ApiKeyInvalidate(ctx context.Context, ids ...string) error { + if err := b.apikeyLimit.Acquire(ctx, 1); err != nil { + return err + } + defer b.apikeyLimit.Release(1) + + return apikey.Invalidate(ctx, b.Client(), ids...) +} diff --git a/internal/pkg/bulk/opBulk.go b/internal/pkg/bulk/opBulk.go new file mode 100644 index 000000000..53dabeae7 --- /dev/null +++ b/internal/pkg/bulk/opBulk.go @@ -0,0 +1,265 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package bulk + +import ( + "bytes" + "context" + "fmt" + "time" + + "github.com/elastic/go-elasticsearch/v7/esapi" + "github.com/mailru/easyjson" + "github.com/rs/zerolog/log" +) + +func (b *Bulker) Create(ctx context.Context, index, id string, body []byte, opts ...Opt) (string, error) { + item, err := b.waitBulkAction(ctx, ActionCreate, index, id, body, opts...) + if err != nil { + return "", err + } + + return item.DocumentID, nil +} + +func (b *Bulker) Index(ctx context.Context, index, id string, body []byte, opts ...Opt) (string, error) { + item, err := b.waitBulkAction(ctx, ActionIndex, index, id, body, opts...) + if err != nil { + return "", err + } + return item.DocumentID, nil +} + +func (b *Bulker) Update(ctx context.Context, index, id string, body []byte, opts ...Opt) error { + _, err := b.waitBulkAction(ctx, ActionUpdate, index, id, body, opts...) + return err +} + +func (b *Bulker) Delete(ctx context.Context, index, id string, opts ...Opt) error { + _, err := b.waitBulkAction(ctx, ActionDelete, index, id, nil, opts...) + return err +} + +func (b *Bulker) waitBulkAction(ctx context.Context, action actionT, index, id string, body []byte, opts ...Opt) (*BulkIndexerResponseItem, error) { + var opt optionsT + if len(opts) > 0 { + opt = b.parseOpts(opts...) + } + + blk := b.newBlk(action, opt) + + // Serialize request + const kSlop = 64 + blk.buf.Grow(len(body) + kSlop) + + if err := b.writeBulkMeta(&blk.buf, action.String(), index, id, opt.RetryOnConflict); err != nil { + return nil, err + } + + if err := b.writeBulkBody(&blk.buf, action, body); err != nil { + return nil, err + } + + // Dispatch and wait for response + resp := b.dispatch(ctx, blk) + if resp.err != nil { + return nil, resp.err + } + b.freeBlk(blk) + + r := resp.data.(*BulkIndexerResponseItem) + return r, nil +} + +func (b *Bulker) writeMget(buf *Buf, index, id string) error { + if err := b.validateMeta(index, id); err != nil { + return err + } + + buf.WriteString(`{"_index":"`) + buf.WriteString(index) + buf.WriteString(`","_id":"`) + buf.WriteString(id) + buf.WriteString(`"},`) + return nil +} + +func (b *Bulker) writeBulkMeta(buf *Buf, action, index, id, retry string) error { + if err := b.validateMeta(index, id); err != nil { + return err + } + + buf.WriteString(`{"`) + buf.WriteString(action) + buf.WriteString(`":{`) + if id != "" { + buf.WriteString(`"_id":"`) + buf.WriteString(id) + buf.WriteString(`",`) + } + if retry != "" { + buf.WriteString(`"retry_on_conflict":`) + buf.WriteString(retry) + buf.WriteString(`,`) + } + + buf.WriteString(`"_index":"`) + buf.WriteString(index) + buf.WriteString("\"}}\n") + + return nil +} + +func (b *Bulker) writeBulkBody(buf *Buf, action actionT, body []byte) error { + if len(body) == 0 { + if action == ActionDelete { + return nil + } + + // Weird to index, create, or update empty, but will allow + buf.WriteString("{}\n") + return nil + } + + if err := b.validateBody(body); err != nil { + return err + } + + buf.Write(body) + buf.WriteRune('\n') + return nil +} + +func (b *Bulker) calcBulkSz(action, idx, id, retry string, body []byte) int { + const kFraming = 19 + metaSz := kFraming + len(action) + len(idx) + + if retry != "" { + metaSz += 21 + len(retry) + } + + var idSz int + if id != "" { + const kIdFraming = 9 + idSz = kIdFraming + len(id) + } + + var bodySz int + if len(body) != 0 { + const kBodyFraming = 1 + bodySz = kBodyFraming + len(body) + } + + return metaSz + idSz + bodySz +} + +func (b *Bulker) flushBulk(ctx context.Context, queue queueT) error { + start := time.Now() + + const kRoughEstimatePerItem = 200 + + bufSz := queue.cnt * kRoughEstimatePerItem + if bufSz < queue.pending { + bufSz = queue.pending + } + + var buf bytes.Buffer + buf.Grow(bufSz) + + queueCnt := 0 + for n := queue.head; n != nil; n = n.next { + buf.Write(n.buf.Bytes()) + queueCnt += 1 + } + + // Do actual bulk request; defer to the client + req := esapi.BulkRequest{ + Body: bytes.NewReader(buf.Bytes()), + } + + if queue.ty == kQueueRefreshBulk { + req.Refresh = "true" + } + + res, err := req.Do(ctx, b.es) + + if err != nil { + log.Error().Err(err).Str("mod", kModBulk).Msg("Fail BulkRequest req.Do") + return err + } + + if res.Body != nil { + defer res.Body.Close() + } + + if res.IsError() { + log.Error().Str("mod", kModBulk).Str("err", res.String()).Msg("Fail BulkRequest result") + return parseError(res) + } + + // Reuse buffer + buf.Reset() + + bodySz, err := buf.ReadFrom(res.Body) + if err != nil { + log.Error(). + Err(err). + Str("mod", kModBulk). + Msg("Response error") + return err + } + + var blk bulkIndexerResponse + blk.Items = make([]bulkStubItem, 0, queueCnt) + + if err = easyjson.Unmarshal(buf.Bytes(), &blk); err != nil { + log.Error(). + Err(err). + Str("mod", kModBulk). + Msg("Unmarshal error") + return err + } + + log.Trace(). + Err(err). + Bool("refresh", queue.ty == kQueueRefreshBulk). + Str("mod", kModBulk). + Int("took", blk.Took). + Dur("rtt", time.Since(start)). + Bool("hasErrors", blk.HasErrors). + Int("cnt", len(blk.Items)). + Int("bufSz", bufSz). + Int64("bodySz", bodySz). + Msg("flushBulk") + + if len(blk.Items) != queueCnt { + return fmt.Errorf("Bulk queue length mismatch") + } + + // WARNING: Once we start pushing items to + // the queue, the node pointers are invalid. + // Do NOT return a non-nil value or failQueue + // up the stack will fail. + + n := queue.head + for i, _ := range blk.Items { + next := n.next // 'n' is invalid immediately on channel send + + item := blk.Items[i].Choose() + select { + case n.ch <- respT{ + err: item.deriveError(), + idx: n.idx, + data: item, + }: + default: + panic("Unexpected blocked response channel on flushBulk") + } + + n = next + } + + return nil +} diff --git a/internal/pkg/bulk/opMulti.go b/internal/pkg/bulk/opMulti.go new file mode 100644 index 000000000..b4fe72e05 --- /dev/null +++ b/internal/pkg/bulk/opMulti.go @@ -0,0 +1,126 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package bulk + +import ( + "context" + "errors" + "math" +) + +func (b *Bulker) MCreate(ctx context.Context, ops []MultiOp, opts ...Opt) ([]BulkIndexerResponseItem, error) { + return b.multiWaitBulkOp(ctx, ActionCreate, ops) +} + +func (b *Bulker) MIndex(ctx context.Context, ops []MultiOp, opts ...Opt) ([]BulkIndexerResponseItem, error) { + return b.multiWaitBulkOp(ctx, ActionIndex, ops) +} + +func (b *Bulker) MUpdate(ctx context.Context, ops []MultiOp, opts ...Opt) ([]BulkIndexerResponseItem, error) { + return b.multiWaitBulkOp(ctx, ActionUpdate, ops) +} + +func (b *Bulker) MDelete(ctx context.Context, ops []MultiOp, opts ...Opt) ([]BulkIndexerResponseItem, error) { + return b.multiWaitBulkOp(ctx, ActionDelete, ops) +} + +func (b *Bulker) multiWaitBulkOp(ctx context.Context, action actionT, ops []MultiOp, opts ...Opt) ([]BulkIndexerResponseItem, error) { + if len(ops) == 0 { + return nil, nil + } + + if uint(len(ops)) > math.MaxUint32 { + return nil, errors.New("too many bulk ops") + } + + opt := b.parseOpts(opts...) + + // Contract is that consumer never blocks, so must preallocate. + // Could consider making the response channel *respT to limit memory usage. + ch := make(chan respT, len(ops)) + + actionStr := action.String() + + // O(n) Determine how much space we need + var byteCnt int + for _, op := range ops { + byteCnt += b.calcBulkSz(actionStr, op.Index, op.Id, opt.RetryOnConflict, op.Body) + } + + // Create one bulk buffer to serialize each piece. + // This decreases pressure on the heap. If we calculculate wrong, + // the Buf objectect has the property that previously cached slices + // are still valid. However, underestimating the buffer size + // can lead to mulitple copies, which undermines the optimization. + var bulkBuf Buf + bulkBuf.Grow(byteCnt) + + // Serialize requests + bulks := make([]bulkT, len(ops)) + for i := range ops { + + bufIdx := bulkBuf.Len() + + op := &ops[i] + + if err := b.writeBulkMeta(&bulkBuf, actionStr, op.Index, op.Id, opt.RetryOnConflict); err != nil { + return nil, err + } + + if err := b.writeBulkBody(&bulkBuf, action, op.Body); err != nil { + return nil, err + } + + bodySlice := bulkBuf.Bytes()[bufIdx:] + + bulk := &bulks[i] + bulk.ch = ch + bulk.idx = int32(i) + bulk.action = action + bulk.buf.Set(bodySlice) + if opt.Refresh { + bulk.flags.Set(flagRefresh) + } + } + + // Dispatch requests + if err := b.multiDispatch(ctx, bulks); err != nil { + return nil, err + } + + // Wait for response and populate return slice + var lastErr error + items := make([]BulkIndexerResponseItem, len(ops)) + + for i := 0; i < len(ops); i++ { + select { + case r := <-ch: + if r.err != nil { + lastErr = r.err + } + if r.data != nil { + items[r.idx] = *r.data.(*BulkIndexerResponseItem) + } + case <-ctx.Done(): + return nil, ctx.Err() + } + } + + return items, lastErr +} + +func (b *Bulker) multiDispatch(ctx context.Context, blks []bulkT) error { + + // Dispatch to bulk Run loop; Iterate by reference. + for i := range blks { + select { + case b.ch <- &blks[i]: + case <-ctx.Done(): + return ctx.Err() + } + } + + return nil +} diff --git a/internal/pkg/bulk/opMulti_integration_test.go b/internal/pkg/bulk/opMulti_integration_test.go new file mode 100644 index 000000000..45d13fefa --- /dev/null +++ b/internal/pkg/bulk/opMulti_integration_test.go @@ -0,0 +1,86 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build integration +// +build integration + +package bulk + +import ( + "context" + "strconv" + "testing" + "time" + + "github.com/rs/zerolog" +) + +// This runs a series of CRUD operations through elastic. +// Not a particularly useful benchmark, but gives some idea of memory overhead. + +func benchmarkMultiUpdate(n int, b *testing.B) { + b.ReportAllocs() + defer (QuietLogger())() + + l := zerolog.GlobalLevel() + defer zerolog.SetGlobalLevel(l) + + zerolog.SetGlobalLevel(zerolog.ErrorLevel) + + ctx, cn := context.WithCancel(context.Background()) + defer cn() + + index, bulker := SetupIndexWithBulk(ctx, b, testPolicy, WithFlushThresholdCount(n), WithFlushInterval(time.Millisecond*10)) + + // Create N samples + var ops []MultiOp + for i := 0; i < n; i++ { + sample := NewRandomSample() + ops = append(ops, MultiOp{ + Index: index, + Body: sample.marshal(b), + }) + } + + items, err := bulker.MCreate(ctx, ops) + if err != nil { + b.Fatal(err) + } + + for j := 0; j < b.N; j++ { + fields := UpdateFields{ + "dateval": time.Now().Format(time.RFC3339), + } + + body, err := fields.Marshal() + if err != nil { + b.Fatal(err) + } + + for i := range ops { + ops[i].Id = items[i].DocumentID + ops[i].Body = body + } + + _, err = bulker.MUpdate(ctx, ops) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkMultiUpdate(b *testing.B) { + + benchmarks := []int{1, 64, 8192, 37268, 131072} + + for _, n := range benchmarks { + + bindFunc := func(n int) func(b *testing.B) { + return func(b *testing.B) { + benchmarkMultiUpdate(n, b) + } + } + b.Run(strconv.Itoa(n), bindFunc(n)) + } +} diff --git a/internal/pkg/bulk/opMulti_test.go b/internal/pkg/bulk/opMulti_test.go new file mode 100644 index 000000000..26fca3252 --- /dev/null +++ b/internal/pkg/bulk/opMulti_test.go @@ -0,0 +1,61 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package bulk + +import ( + "context" + "strconv" + "testing" +) + +const payload = `{"_id" : "1", "_index" : "test"}` + +// Test throughput of creating multiOps +func BenchmarkMultiUpdateMock(b *testing.B) { + defer (QuietLogger())() + + // Allocate, but don't run. Stub the client. + bulker := NewBulker(nil) + defer close(bulker.ch) + + go func() { + for v := range bulker.ch { + v.ch <- respT{nil, v.idx, nil} + } + }() + + body := []byte(payload) + + benchmarks := []int{1, 8, 64, 4096, 32768, 131072} + + // Create the samples outside the loop to avoid accounting + max := 0 + for _, v := range benchmarks { + if max < v { + max = v + } + } + + // Create the ops + ops := make([]MultiOp, 0, max) + for i := 0; i < max; i++ { + ops = append(ops, MultiOp{ + Id: "abba", + Index: "bogus", + Body: body, + }) + } + + for _, n := range benchmarks { + b.Run(strconv.Itoa(n), func(b *testing.B) { + b.ReportAllocs() + ctx := context.Background() + for i := 0; i < b.N; i++ { + bulker.MUpdate(ctx, ops[:n]) + } + }) + } + +} diff --git a/internal/pkg/bulk/opRead.go b/internal/pkg/bulk/opRead.go new file mode 100644 index 000000000..6c6de1255 --- /dev/null +++ b/internal/pkg/bulk/opRead.go @@ -0,0 +1,155 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package bulk + +import ( + "bytes" + "context" + "fmt" + "time" + + "github.com/elastic/go-elasticsearch/v7/esapi" + "github.com/mailru/easyjson" + "github.com/rs/zerolog/log" +) + +const ( + rPrefix = "{\"docs\": [" + rSuffix = "]}" +) + +func (b *Bulker) Read(ctx context.Context, index, id string, opts ...Opt) ([]byte, error) { + var opt optionsT + if len(opts) > 0 { + opt = b.parseOpts(opts...) + } + + blk := b.newBlk(ActionRead, opt) + + // Serialize request + const kSlop = 64 + blk.buf.Grow(kSlop) + + if err := b.writeMget(&blk.buf, index, id); err != nil { + return nil, err + } + + // Process response + resp := b.dispatch(ctx, blk) + if resp.err != nil { + return nil, resp.err + } + b.freeBlk(blk) + + // Interpret response, looking for generated id + r := resp.data.(*MgetResponseItem) + return r.Source, nil +} + +func (b *Bulker) flushRead(ctx context.Context, queue queueT) error { + start := time.Now() + + const kRoughEstimatePerItem = 256 + + bufSz := queue.cnt * kRoughEstimatePerItem + if bufSz < queue.pending+len(rSuffix) { + bufSz = queue.pending + len(rSuffix) + } + + buf := bytes.NewBufferString(rPrefix) + buf.Grow(bufSz) + + // Each item a JSON array element followed by comma + queueCnt := 0 + for n := queue.head; n != nil; n = n.next { + buf.Write(n.buf.Bytes()) + queueCnt += 1 + } + + // Need to strip the last element and append the suffix + payload := buf.Bytes() + payload = append(payload[:len(payload)-1], []byte(rSuffix)...) + + // Do actual bulk request; and send response on chan + req := esapi.MgetRequest{ + Body: bytes.NewReader(payload), + } + + var refresh bool + if queue.ty == kQueueRefreshRead { + refresh = true + req.Refresh = &refresh + } + + res, err := req.Do(ctx, b.es) + + if err != nil { + log.Error().Err(err).Str("mod", kModBulk).Msg("Error sending mget request to Elasticsearch") + return err + } + + if res.Body != nil { + defer res.Body.Close() + } + + if res.IsError() { + log.Error().Str("mod", kModBulk).Str("err", res.String()).Msg("Error in mget request result to Elasticsearch") + return parseError(res) + } + + // Reuse buffer + buf.Reset() + + bodySz, err := buf.ReadFrom(res.Body) + if err != nil { + log.Error().Err(err).Str("mod", kModBulk).Msg("Response error") + } + + // prealloc slice + var blk MgetResponse + blk.Items = make([]MgetResponseItem, 0, queueCnt) + + if err = easyjson.Unmarshal(buf.Bytes(), &blk); err != nil { + log.Error().Err(err).Str("mod", kModBulk).Msg("Unmarshal error") + return err + } + + log.Trace(). + Err(err). + Bool("refresh", refresh). + Str("mod", kModBulk). + Dur("rtt", time.Since(start)). + Int("cnt", len(blk.Items)). + Int("bufSz", bufSz). + Int64("bodySz", bodySz). + Msg("flushRead") + + if len(blk.Items) != queueCnt { + return fmt.Errorf("Mget queue length mismatch") + } + + // WARNING: Once we start pushing items to + // the queue, the node pointers are invalid. + // Do NOT return a non-nil value or failQueue + // up the stack will fail. + + n := queue.head + for i := range blk.Items { + next := n.next // 'n' is invalid immediately on channel send + item := &blk.Items[i] + select { + case n.ch <- respT{ + err: item.deriveError(), + idx: n.idx, + data: item, + }: + default: + panic("Unexpected blocked response channel on flushRead") + } + n = next + } + + return nil +} diff --git a/internal/pkg/bulk/opSearch.go b/internal/pkg/bulk/opSearch.go new file mode 100644 index 000000000..e0d7b341f --- /dev/null +++ b/internal/pkg/bulk/opSearch.go @@ -0,0 +1,185 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package bulk + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "time" + + "github.com/elastic/fleet-server/v7/internal/pkg/es" + "github.com/elastic/go-elasticsearch/v7/esapi" + "github.com/mailru/easyjson" + "github.com/rs/zerolog/log" +) + +func (b *Bulker) Search(ctx context.Context, index string, body []byte, opts ...Opt) (*es.ResultT, error) { + var opt optionsT + if len(opts) > 0 { + opt = b.parseOpts(opts...) + } + + blk := b.newBlk(ActionSearch, opt) + + // Serialize request + const kSlop = 64 + blk.buf.Grow(len(body) + kSlop) + + if err := b.writeMsearchMeta(&blk.buf, index, opt.Indices); err != nil { + return nil, err + } + + if err := b.writeMsearchBody(&blk.buf, body); err != nil { + return nil, err + } + + // Process response + resp := b.dispatch(ctx, blk) + if resp.err != nil { + return nil, resp.err + } + b.freeBlk(blk) + + // Interpret response + r := resp.data.(*MsearchResponseItem) + return &es.ResultT{HitsT: r.Hits, Aggregations: r.Aggregations}, nil +} + +func (b *Bulker) writeMsearchMeta(buf *Buf, index string, moreIndices []string) error { + if err := b.validateIndex(index); err != nil { + return err + } + + if len(moreIndices) > 0 { + if err := b.validateIndices(moreIndices); err != nil { + return err + } + + indices := []string{index} + indices = append(indices, moreIndices...) + + buf.WriteString(`{"index": `) + if d, err := json.Marshal(indices); err != nil { + return err + } else { + buf.Write(d) + } + buf.WriteString("}\n") + } else if len(index) == 0 { + buf.WriteString("{ }\n") + } else { + buf.WriteString(`{"index": "`) + buf.WriteString(index) + buf.WriteString("\"}\n") + } + + return nil +} + +func (b *Bulker) writeMsearchBody(buf *Buf, body []byte) error { + buf.Write(body) + buf.WriteRune('\n') + + return b.validateBody(body) +} + +func (b *Bulker) flushSearch(ctx context.Context, queue queueT) error { + start := time.Now() + + const kRoughEstimatePerItem = 256 + + bufSz := queue.cnt * kRoughEstimatePerItem + if bufSz < queue.pending { + bufSz = queue.pending + } + + var buf bytes.Buffer + buf.Grow(bufSz) + + queueCnt := 0 + for n := queue.head; n != nil; n = n.next { + buf.Write(n.buf.Bytes()) + + queueCnt += 1 + } + + // Do actual bulk request; and send response on chan + req := esapi.MsearchRequest{ + Body: bytes.NewReader(buf.Bytes()), + } + res, err := req.Do(ctx, b.es) + + if err != nil { + return err + } + + if res.Body != nil { + defer res.Body.Close() + } + + if res.IsError() { + log.Error().Err(err).Str("mod", kModBulk).Msg("Fail writeMsearchBody") + return parseError(res) + } + + // Reuse buffer + buf.Reset() + + bodySz, err := buf.ReadFrom(res.Body) + if err != nil { + log.Error().Err(err).Str("mod", kModBulk).Msg("MsearchResponse error") + return err + } + + // prealloc slice + var blk MsearchResponse + blk.Responses = make([]MsearchResponseItem, 0, queueCnt) + + if err = easyjson.Unmarshal(buf.Bytes(), &blk); err != nil { + log.Error().Err(err).Str("mod", kModBulk).Msg("Unmarshal error") + return err + } + + log.Trace(). + Err(err). + Str("mod", kModBulk). + Dur("rtt", time.Since(start)). + Int("took", blk.Took). + Int("cnt", len(blk.Responses)). + Int("bufSz", bufSz). + Int64("bodySz", bodySz). + Msg("flushSearch") + + if len(blk.Responses) != queueCnt { + return fmt.Errorf("Bulk queue length mismatch") + } + + // WARNING: Once we start pushing items to + // the queue, the node pointers are invalid. + // Do NOT return a non-nil value or failQueue + // up the stack will fail. + + n := queue.head + for i := range blk.Responses { + next := n.next // 'n' is invalid immediately on channel send + + response := &blk.Responses[i] + + select { + case n.ch <- respT{ + err: response.deriveError(), + idx: n.idx, + data: response, + }: + default: + panic("Unexpected blocked response channel on flushSearch") + } + n = next + } + + return nil +} diff --git a/internal/pkg/bulk/opt.go b/internal/pkg/bulk/opt.go index c2d1896e5..f7bbc4cd9 100644 --- a/internal/pkg/bulk/opt.go +++ b/internal/pkg/bulk/opt.go @@ -5,14 +5,20 @@ package bulk import ( + "github.com/rs/zerolog" + "strconv" "time" + + "github.com/elastic/fleet-server/v7/internal/pkg/config" ) //----- // Transaction options type optionsT struct { - Refresh bool + Refresh bool + RetryOnConflict string + Indices []string } type Opt func(*optionsT) @@ -23,6 +29,19 @@ func WithRefresh() Opt { } } +func WithRetryOnConflict(n int) Opt { + return func(opt *optionsT) { + opt.RetryOnConflict = strconv.Itoa(n) + } +} + +// Applicable to search +func WithIndex(idx string) Opt { + return func(opt *optionsT) { + opt.Indices = append(opt.Indices, idx) + } +} + //----- // Bulk API options @@ -31,6 +50,8 @@ type bulkOptT struct { flushThresholdCnt int flushThresholdSz int maxPending int + blockQueueSz int + apikeyMaxParallel int } type BulkOpt func(*bulkOptT) @@ -49,7 +70,7 @@ func WithFlushThresholdCount(cnt int) BulkOpt { } } -// Cummulative size of pending transactions that will force flush before interval +// Cummulative size in bytes of pending transactions that will force flush before interval func WithFlushThresholdSize(sz int) BulkOpt { return func(opt *bulkOptT) { opt.flushThresholdSz = sz @@ -62,3 +83,63 @@ func WithMaxPending(max int) BulkOpt { opt.maxPending = max } } + +// Size of internal block queue (ie. channel) +func WithBlockQueueSize(sz int) BulkOpt { + return func(opt *bulkOptT) { + opt.blockQueueSz = sz + } +} + +// Max number of api key operations outstanding +func WithApiKeyMaxParallel(max int) BulkOpt { + return func(opt *bulkOptT) { + opt.apikeyMaxParallel = max + } +} + +func parseBulkOpts(opts ...BulkOpt) bulkOptT { + bopt := bulkOptT{ + flushInterval: defaultFlushInterval, + flushThresholdCnt: defaultFlushThresholdCnt, + flushThresholdSz: defaultFlushThresholdSz, + maxPending: defaultMaxPending, + apikeyMaxParallel: defaultApiKeyMaxParallel, + blockQueueSz: defaultBlockQueueSz, + } + + for _, f := range opts { + f(&bopt) + } + + return bopt +} + +func (o *bulkOptT) MarshalZerologObject(e *zerolog.Event) { + e.Dur("flushInterval", o.flushInterval) + e.Int("flushThresholdCnt", o.flushThresholdCnt) + e.Int("flushThresholdSz", o.flushThresholdSz) + e.Int("maxPending", o.maxPending) + e.Int("blockQueueSz", o.blockQueueSz) + e.Int("apikeyMaxParallel", o.apikeyMaxParallel) +} + +// Bridge to configuration subsystem +func BulkOptsFromCfg(cfg *config.Config) []BulkOpt { + + bulkCfg := cfg.Inputs[0].Server.Bulk + + // Attempt to slice the max number of connections to leave room for the bulk flush queues + maxKeyParallel := cfg.Output.Elasticsearch.MaxConnPerHost + if cfg.Output.Elasticsearch.MaxConnPerHost > bulkCfg.FlushMaxPending { + maxKeyParallel = cfg.Output.Elasticsearch.MaxConnPerHost - bulkCfg.FlushMaxPending + } + + return []BulkOpt{ + WithFlushInterval(bulkCfg.FlushInterval), + WithFlushThresholdCount(bulkCfg.FlushThresholdCount), + WithFlushThresholdSize(bulkCfg.FlushThresholdSize), + WithMaxPending(bulkCfg.FlushMaxPending), + WithApiKeyMaxParallel(maxKeyParallel), + } +} diff --git a/internal/pkg/bulk/queue.go b/internal/pkg/bulk/queue.go new file mode 100644 index 000000000..60f7b0bee --- /dev/null +++ b/internal/pkg/bulk/queue.go @@ -0,0 +1,39 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package bulk + +type queueT struct { + ty queueType + cnt int + head *bulkT + pending int +} + +type queueType int + +const ( + kQueueBulk queueType = iota + kQueueRead + kQueueSearch + kQueueRefreshBulk + kQueueRefreshRead + kNumQueues +) + +func (q queueT) Type() string { + switch q.ty { + case kQueueBulk: + return "bulk" + case kQueueRead: + return "read" + case kQueueSearch: + return "search" + case kQueueRefreshBulk: + return "refreshBulk" + case kQueueRefreshRead: + return "refreshRead" + } + panic("unknown") +} diff --git a/internal/pkg/bulk/schema.go b/internal/pkg/bulk/schema.go index bd6df93dd..fe68c9717 100644 --- a/internal/pkg/bulk/schema.go +++ b/internal/pkg/bulk/schema.go @@ -6,13 +6,37 @@ package bulk import ( "encoding/json" + "errors" "github.com/elastic/fleet-server/v7/internal/pkg/es" ) -type BulkIndexerResponse struct { - Took int `json:"took"` - HasErrors bool `json:"errors"` - Items []map[string]BulkIndexerResponseItem `json:"items,omitempty"` +type bulkStubItem struct { + Index *BulkIndexerResponseItem `json:"index"` + Delete *BulkIndexerResponseItem `json:"delete"` + Create *BulkIndexerResponseItem `json:"create"` + Update *BulkIndexerResponseItem `json:"update"` +} + +func (bi bulkStubItem) Choose() *BulkIndexerResponseItem { + switch { + case bi.Update != nil: + return bi.Update + case bi.Create != nil: + return bi.Create + case bi.Index != nil: + return bi.Index + case bi.Delete != nil: + return bi.Delete + } + + return nil +} + +//easyjson:json +type bulkIndexerResponse struct { + Took int `json:"took"` + HasErrors bool `json:"errors"` + Items []bulkStubItem `json:"items,omitempty"` } // Comment out fields we don't use; no point decoding. @@ -31,9 +55,18 @@ type BulkIndexerResponseItem struct { // Failed int `json:"failed"` // } `json:"_shards"` - Error es.ErrorT `json:"error,omitempty"` + Error *es.ErrorT `json:"error,omitempty"` } +func (b *BulkIndexerResponseItem) deriveError() error { + if b == nil { + return errors.New("Unknown bulk operator") + } + + return es.TranslateError(b.Status, b.Error) +} + +//easyjson:json type MgetResponse struct { Items []MgetResponseItem `json:"docs"` } @@ -72,18 +105,15 @@ type MsearchResponseItem struct { Hits es.HitsT `json:"hits"` Aggregations map[string]es.Aggregation `json:"aggregations,omitempty"` - Error es.ErrorT `json:"error,omitempty"` + Error *es.ErrorT `json:"error,omitempty"` } +//easyjson:json type MsearchResponse struct { Responses []MsearchResponseItem `json:"responses"` Took int `json:"took"` } -func (b *BulkIndexerResponseItem) deriveError() error { - return es.TranslateError(b.Status, b.Error) -} - func (b *MsearchResponseItem) deriveError() error { return es.TranslateError(b.Status, b.Error) } diff --git a/internal/pkg/bulk/schema_easyjson.go b/internal/pkg/bulk/schema_easyjson.go new file mode 100644 index 000000000..7b1dd1bd2 --- /dev/null +++ b/internal/pkg/bulk/schema_easyjson.go @@ -0,0 +1,1261 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// Code generated by easyjson for marshaling/unmarshaling. DO NOT EDIT. + +package bulk + +import ( + json "encoding/json" + es "github.com/elastic/fleet-server/v7/internal/pkg/es" + easyjson "github.com/mailru/easyjson" + jlexer "github.com/mailru/easyjson/jlexer" + jwriter "github.com/mailru/easyjson/jwriter" +) + +// suppress unused package warning +var ( + _ *json.RawMessage + _ *jlexer.Lexer + _ *jwriter.Writer + _ easyjson.Marshaler +) + +func easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgBulk(in *jlexer.Lexer, out *bulkIndexerResponse) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "took": + out.Took = int(in.Int()) + case "errors": + out.HasErrors = bool(in.Bool()) + case "items": + if in.IsNull() { + in.Skip() + out.Items = nil + } else { + in.Delim('[') + if out.Items == nil { + if !in.IsDelim(']') { + out.Items = make([]bulkStubItem, 0, 2) + } else { + out.Items = []bulkStubItem{} + } + } else { + out.Items = (out.Items)[:0] + } + for !in.IsDelim(']') { + var v1 bulkStubItem + easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgBulk1(in, &v1) + out.Items = append(out.Items, v1) + in.WantComma() + } + in.Delim(']') + } + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgBulk(out *jwriter.Writer, in bulkIndexerResponse) { + out.RawByte('{') + first := true + _ = first + { + const prefix string = ",\"took\":" + out.RawString(prefix[1:]) + out.Int(int(in.Took)) + } + { + const prefix string = ",\"errors\":" + out.RawString(prefix) + out.Bool(bool(in.HasErrors)) + } + if len(in.Items) != 0 { + const prefix string = ",\"items\":" + out.RawString(prefix) + { + out.RawByte('[') + for v2, v3 := range in.Items { + if v2 > 0 { + out.RawByte(',') + } + easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgBulk1(out, v3) + } + out.RawByte(']') + } + } + out.RawByte('}') +} + +// MarshalJSON supports json.Marshaler interface +func (v bulkIndexerResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgBulk(&w, v) + return w.Buffer.BuildBytes(), w.Error +} + +// MarshalEasyJSON supports easyjson.Marshaler interface +func (v bulkIndexerResponse) MarshalEasyJSON(w *jwriter.Writer) { + easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgBulk(w, v) +} + +// UnmarshalJSON supports json.Unmarshaler interface +func (v *bulkIndexerResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgBulk(&r, v) + return r.Error() +} + +// UnmarshalEasyJSON supports easyjson.Unmarshaler interface +func (v *bulkIndexerResponse) UnmarshalEasyJSON(l *jlexer.Lexer) { + easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgBulk(l, v) +} +func easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgBulk1(in *jlexer.Lexer, out *bulkStubItem) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "index": + if in.IsNull() { + in.Skip() + out.Index = nil + } else { + if out.Index == nil { + out.Index = new(BulkIndexerResponseItem) + } + easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgBulk2(in, out.Index) + } + case "delete": + if in.IsNull() { + in.Skip() + out.Delete = nil + } else { + if out.Delete == nil { + out.Delete = new(BulkIndexerResponseItem) + } + easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgBulk2(in, out.Delete) + } + case "create": + if in.IsNull() { + in.Skip() + out.Create = nil + } else { + if out.Create == nil { + out.Create = new(BulkIndexerResponseItem) + } + easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgBulk2(in, out.Create) + } + case "update": + if in.IsNull() { + in.Skip() + out.Update = nil + } else { + if out.Update == nil { + out.Update = new(BulkIndexerResponseItem) + } + easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgBulk2(in, out.Update) + } + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgBulk1(out *jwriter.Writer, in bulkStubItem) { + out.RawByte('{') + first := true + _ = first + { + const prefix string = ",\"index\":" + out.RawString(prefix[1:]) + if in.Index == nil { + out.RawString("null") + } else { + easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgBulk2(out, *in.Index) + } + } + { + const prefix string = ",\"delete\":" + out.RawString(prefix) + if in.Delete == nil { + out.RawString("null") + } else { + easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgBulk2(out, *in.Delete) + } + } + { + const prefix string = ",\"create\":" + out.RawString(prefix) + if in.Create == nil { + out.RawString("null") + } else { + easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgBulk2(out, *in.Create) + } + } + { + const prefix string = ",\"update\":" + out.RawString(prefix) + if in.Update == nil { + out.RawString("null") + } else { + easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgBulk2(out, *in.Update) + } + } + out.RawByte('}') +} +func easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgBulk2(in *jlexer.Lexer, out *BulkIndexerResponseItem) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "_id": + out.DocumentID = string(in.String()) + case "status": + out.Status = int(in.Int()) + case "error": + if in.IsNull() { + in.Skip() + out.Error = nil + } else { + if out.Error == nil { + out.Error = new(es.ErrorT) + } + easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgEs(in, out.Error) + } + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgBulk2(out *jwriter.Writer, in BulkIndexerResponseItem) { + out.RawByte('{') + first := true + _ = first + { + const prefix string = ",\"_id\":" + out.RawString(prefix[1:]) + out.String(string(in.DocumentID)) + } + { + const prefix string = ",\"status\":" + out.RawString(prefix) + out.Int(int(in.Status)) + } + if in.Error != nil { + const prefix string = ",\"error\":" + out.RawString(prefix) + easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgEs(out, *in.Error) + } + out.RawByte('}') +} +func easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgEs(in *jlexer.Lexer, out *es.ErrorT) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "type": + out.Type = string(in.String()) + case "reason": + out.Reason = string(in.String()) + case "caused_by": + easyjsonCef4e921Decode(in, &out.Cause) + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgEs(out *jwriter.Writer, in es.ErrorT) { + out.RawByte('{') + first := true + _ = first + { + const prefix string = ",\"type\":" + out.RawString(prefix[1:]) + out.String(string(in.Type)) + } + { + const prefix string = ",\"reason\":" + out.RawString(prefix) + out.String(string(in.Reason)) + } + { + const prefix string = ",\"caused_by\":" + out.RawString(prefix) + easyjsonCef4e921Encode(out, in.Cause) + } + out.RawByte('}') +} +func easyjsonCef4e921Decode(in *jlexer.Lexer, out *struct { + Type string `json:"type"` + Reason string `json:"reason"` +}) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "type": + out.Type = string(in.String()) + case "reason": + out.Reason = string(in.String()) + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjsonCef4e921Encode(out *jwriter.Writer, in struct { + Type string `json:"type"` + Reason string `json:"reason"` +}) { + out.RawByte('{') + first := true + _ = first + { + const prefix string = ",\"type\":" + out.RawString(prefix[1:]) + out.String(string(in.Type)) + } + { + const prefix string = ",\"reason\":" + out.RawString(prefix) + out.String(string(in.Reason)) + } + out.RawByte('}') +} +func easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgBulk3(in *jlexer.Lexer, out *MsearchResponse) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "responses": + if in.IsNull() { + in.Skip() + out.Responses = nil + } else { + in.Delim('[') + if out.Responses == nil { + if !in.IsDelim(']') { + out.Responses = make([]MsearchResponseItem, 0, 0) + } else { + out.Responses = []MsearchResponseItem{} + } + } else { + out.Responses = (out.Responses)[:0] + } + for !in.IsDelim(']') { + var v4 MsearchResponseItem + easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgBulk4(in, &v4) + out.Responses = append(out.Responses, v4) + in.WantComma() + } + in.Delim(']') + } + case "took": + out.Took = int(in.Int()) + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgBulk3(out *jwriter.Writer, in MsearchResponse) { + out.RawByte('{') + first := true + _ = first + { + const prefix string = ",\"responses\":" + out.RawString(prefix[1:]) + if in.Responses == nil && (out.Flags&jwriter.NilSliceAsEmpty) == 0 { + out.RawString("null") + } else { + out.RawByte('[') + for v5, v6 := range in.Responses { + if v5 > 0 { + out.RawByte(',') + } + easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgBulk4(out, v6) + } + out.RawByte(']') + } + } + { + const prefix string = ",\"took\":" + out.RawString(prefix) + out.Int(int(in.Took)) + } + out.RawByte('}') +} + +// MarshalJSON supports json.Marshaler interface +func (v MsearchResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgBulk3(&w, v) + return w.Buffer.BuildBytes(), w.Error +} + +// MarshalEasyJSON supports easyjson.Marshaler interface +func (v MsearchResponse) MarshalEasyJSON(w *jwriter.Writer) { + easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgBulk3(w, v) +} + +// UnmarshalJSON supports json.Unmarshaler interface +func (v *MsearchResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgBulk3(&r, v) + return r.Error() +} + +// UnmarshalEasyJSON supports easyjson.Unmarshaler interface +func (v *MsearchResponse) UnmarshalEasyJSON(l *jlexer.Lexer) { + easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgBulk3(l, v) +} +func easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgBulk4(in *jlexer.Lexer, out *MsearchResponseItem) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "status": + out.Status = int(in.Int()) + case "took": + out.Took = uint64(in.Uint64()) + case "timed_out": + out.TimedOut = bool(in.Bool()) + case "_shards": + easyjsonCef4e921Decode1(in, &out.Shards) + case "hits": + easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgEs1(in, &out.Hits) + case "aggregations": + if in.IsNull() { + in.Skip() + } else { + in.Delim('{') + if !in.IsDelim('}') { + out.Aggregations = make(map[string]es.Aggregation) + } else { + out.Aggregations = nil + } + for !in.IsDelim('}') { + key := string(in.String()) + in.WantColon() + var v7 es.Aggregation + easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgEs2(in, &v7) + (out.Aggregations)[key] = v7 + in.WantComma() + } + in.Delim('}') + } + case "error": + if in.IsNull() { + in.Skip() + out.Error = nil + } else { + if out.Error == nil { + out.Error = new(es.ErrorT) + } + easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgEs(in, out.Error) + } + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgBulk4(out *jwriter.Writer, in MsearchResponseItem) { + out.RawByte('{') + first := true + _ = first + { + const prefix string = ",\"status\":" + out.RawString(prefix[1:]) + out.Int(int(in.Status)) + } + { + const prefix string = ",\"took\":" + out.RawString(prefix) + out.Uint64(uint64(in.Took)) + } + { + const prefix string = ",\"timed_out\":" + out.RawString(prefix) + out.Bool(bool(in.TimedOut)) + } + { + const prefix string = ",\"_shards\":" + out.RawString(prefix) + easyjsonCef4e921Encode1(out, in.Shards) + } + { + const prefix string = ",\"hits\":" + out.RawString(prefix) + easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgEs1(out, in.Hits) + } + if len(in.Aggregations) != 0 { + const prefix string = ",\"aggregations\":" + out.RawString(prefix) + { + out.RawByte('{') + v8First := true + for v8Name, v8Value := range in.Aggregations { + if v8First { + v8First = false + } else { + out.RawByte(',') + } + out.String(string(v8Name)) + out.RawByte(':') + easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgEs2(out, v8Value) + } + out.RawByte('}') + } + } + if in.Error != nil { + const prefix string = ",\"error\":" + out.RawString(prefix) + easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgEs(out, *in.Error) + } + out.RawByte('}') +} +func easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgEs2(in *jlexer.Lexer, out *es.Aggregation) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "value": + out.Value = float64(in.Float64()) + case "doc_count_error_upper_bound": + out.DocCountErrorUpperBound = int64(in.Int64()) + case "sum_other_doc_count": + out.SumOtherDocCount = int64(in.Int64()) + case "buckets": + if in.IsNull() { + in.Skip() + out.Buckets = nil + } else { + in.Delim('[') + if out.Buckets == nil { + if !in.IsDelim(']') { + out.Buckets = make([]es.Bucket, 0, 2) + } else { + out.Buckets = []es.Bucket{} + } + } else { + out.Buckets = (out.Buckets)[:0] + } + for !in.IsDelim(']') { + var v9 es.Bucket + if data := in.Raw(); in.Ok() { + in.AddError((v9).UnmarshalJSON(data)) + } + out.Buckets = append(out.Buckets, v9) + in.WantComma() + } + in.Delim(']') + } + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgEs2(out *jwriter.Writer, in es.Aggregation) { + out.RawByte('{') + first := true + _ = first + { + const prefix string = ",\"value\":" + out.RawString(prefix[1:]) + out.Float64(float64(in.Value)) + } + { + const prefix string = ",\"doc_count_error_upper_bound\":" + out.RawString(prefix) + out.Int64(int64(in.DocCountErrorUpperBound)) + } + { + const prefix string = ",\"sum_other_doc_count\":" + out.RawString(prefix) + out.Int64(int64(in.SumOtherDocCount)) + } + if len(in.Buckets) != 0 { + const prefix string = ",\"buckets\":" + out.RawString(prefix) + { + out.RawByte('[') + for v10, v11 := range in.Buckets { + if v10 > 0 { + out.RawByte(',') + } + easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgEs3(out, v11) + } + out.RawByte(']') + } + } + out.RawByte('}') +} +func easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgEs3(in *jlexer.Lexer, out *es.Bucket) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "key": + out.Key = string(in.String()) + case "doc_count": + out.DocCount = int64(in.Int64()) + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgEs3(out *jwriter.Writer, in es.Bucket) { + out.RawByte('{') + first := true + _ = first + { + const prefix string = ",\"key\":" + out.RawString(prefix[1:]) + out.String(string(in.Key)) + } + { + const prefix string = ",\"doc_count\":" + out.RawString(prefix) + out.Int64(int64(in.DocCount)) + } + out.RawByte('}') +} +func easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgEs1(in *jlexer.Lexer, out *es.HitsT) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "hits": + if in.IsNull() { + in.Skip() + out.Hits = nil + } else { + in.Delim('[') + if out.Hits == nil { + if !in.IsDelim(']') { + out.Hits = make([]es.HitT, 0, 0) + } else { + out.Hits = []es.HitT{} + } + } else { + out.Hits = (out.Hits)[:0] + } + for !in.IsDelim(']') { + var v12 es.HitT + easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgEs4(in, &v12) + out.Hits = append(out.Hits, v12) + in.WantComma() + } + in.Delim(']') + } + case "total": + easyjsonCef4e921Decode2(in, &out.Total) + case "max_score": + if in.IsNull() { + in.Skip() + out.MaxScore = nil + } else { + if out.MaxScore == nil { + out.MaxScore = new(float64) + } + *out.MaxScore = float64(in.Float64()) + } + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgEs1(out *jwriter.Writer, in es.HitsT) { + out.RawByte('{') + first := true + _ = first + { + const prefix string = ",\"hits\":" + out.RawString(prefix[1:]) + if in.Hits == nil && (out.Flags&jwriter.NilSliceAsEmpty) == 0 { + out.RawString("null") + } else { + out.RawByte('[') + for v13, v14 := range in.Hits { + if v13 > 0 { + out.RawByte(',') + } + easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgEs4(out, v14) + } + out.RawByte(']') + } + } + { + const prefix string = ",\"total\":" + out.RawString(prefix) + easyjsonCef4e921Encode2(out, in.Total) + } + { + const prefix string = ",\"max_score\":" + out.RawString(prefix) + if in.MaxScore == nil { + out.RawString("null") + } else { + out.Float64(float64(*in.MaxScore)) + } + } + out.RawByte('}') +} +func easyjsonCef4e921Decode2(in *jlexer.Lexer, out *struct { + Relation string `json:"relation"` + Value uint64 `json:"value"` +}) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "relation": + out.Relation = string(in.String()) + case "value": + out.Value = uint64(in.Uint64()) + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjsonCef4e921Encode2(out *jwriter.Writer, in struct { + Relation string `json:"relation"` + Value uint64 `json:"value"` +}) { + out.RawByte('{') + first := true + _ = first + { + const prefix string = ",\"relation\":" + out.RawString(prefix[1:]) + out.String(string(in.Relation)) + } + { + const prefix string = ",\"value\":" + out.RawString(prefix) + out.Uint64(uint64(in.Value)) + } + out.RawByte('}') +} +func easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgEs4(in *jlexer.Lexer, out *es.HitT) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "_id": + out.Id = string(in.String()) + case "_seq_no": + out.SeqNo = int64(in.Int64()) + case "version": + out.Version = int64(in.Int64()) + case "_index": + out.Index = string(in.String()) + case "_source": + if data := in.Raw(); in.Ok() { + in.AddError((out.Source).UnmarshalJSON(data)) + } + case "_score": + if in.IsNull() { + in.Skip() + out.Score = nil + } else { + if out.Score == nil { + out.Score = new(float64) + } + *out.Score = float64(in.Float64()) + } + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgEs4(out *jwriter.Writer, in es.HitT) { + out.RawByte('{') + first := true + _ = first + { + const prefix string = ",\"_id\":" + out.RawString(prefix[1:]) + out.String(string(in.Id)) + } + { + const prefix string = ",\"_seq_no\":" + out.RawString(prefix) + out.Int64(int64(in.SeqNo)) + } + { + const prefix string = ",\"version\":" + out.RawString(prefix) + out.Int64(int64(in.Version)) + } + { + const prefix string = ",\"_index\":" + out.RawString(prefix) + out.String(string(in.Index)) + } + { + const prefix string = ",\"_source\":" + out.RawString(prefix) + out.Raw((in.Source).MarshalJSON()) + } + { + const prefix string = ",\"_score\":" + out.RawString(prefix) + if in.Score == nil { + out.RawString("null") + } else { + out.Float64(float64(*in.Score)) + } + } + out.RawByte('}') +} +func easyjsonCef4e921Decode1(in *jlexer.Lexer, out *struct { + Total uint64 `json:"total"` + Successful uint64 `json:"successful"` + Skipped uint64 `json:"skipped"` + Failed uint64 `json:"failed"` +}) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "total": + out.Total = uint64(in.Uint64()) + case "successful": + out.Successful = uint64(in.Uint64()) + case "skipped": + out.Skipped = uint64(in.Uint64()) + case "failed": + out.Failed = uint64(in.Uint64()) + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjsonCef4e921Encode1(out *jwriter.Writer, in struct { + Total uint64 `json:"total"` + Successful uint64 `json:"successful"` + Skipped uint64 `json:"skipped"` + Failed uint64 `json:"failed"` +}) { + out.RawByte('{') + first := true + _ = first + { + const prefix string = ",\"total\":" + out.RawString(prefix[1:]) + out.Uint64(uint64(in.Total)) + } + { + const prefix string = ",\"successful\":" + out.RawString(prefix) + out.Uint64(uint64(in.Successful)) + } + { + const prefix string = ",\"skipped\":" + out.RawString(prefix) + out.Uint64(uint64(in.Skipped)) + } + { + const prefix string = ",\"failed\":" + out.RawString(prefix) + out.Uint64(uint64(in.Failed)) + } + out.RawByte('}') +} +func easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgBulk5(in *jlexer.Lexer, out *MgetResponse) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "docs": + if in.IsNull() { + in.Skip() + out.Items = nil + } else { + in.Delim('[') + if out.Items == nil { + if !in.IsDelim(']') { + out.Items = make([]MgetResponseItem, 0, 2) + } else { + out.Items = []MgetResponseItem{} + } + } else { + out.Items = (out.Items)[:0] + } + for !in.IsDelim(']') { + var v15 MgetResponseItem + easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgBulk6(in, &v15) + out.Items = append(out.Items, v15) + in.WantComma() + } + in.Delim(']') + } + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgBulk5(out *jwriter.Writer, in MgetResponse) { + out.RawByte('{') + first := true + _ = first + { + const prefix string = ",\"docs\":" + out.RawString(prefix[1:]) + if in.Items == nil && (out.Flags&jwriter.NilSliceAsEmpty) == 0 { + out.RawString("null") + } else { + out.RawByte('[') + for v16, v17 := range in.Items { + if v16 > 0 { + out.RawByte(',') + } + easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgBulk6(out, v17) + } + out.RawByte(']') + } + } + out.RawByte('}') +} + +// MarshalJSON supports json.Marshaler interface +func (v MgetResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgBulk5(&w, v) + return w.Buffer.BuildBytes(), w.Error +} + +// MarshalEasyJSON supports easyjson.Marshaler interface +func (v MgetResponse) MarshalEasyJSON(w *jwriter.Writer) { + easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgBulk5(w, v) +} + +// UnmarshalJSON supports json.Unmarshaler interface +func (v *MgetResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgBulk5(&r, v) + return r.Error() +} + +// UnmarshalEasyJSON supports easyjson.Unmarshaler interface +func (v *MgetResponse) UnmarshalEasyJSON(l *jlexer.Lexer) { + easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgBulk5(l, v) +} +func easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgBulk6(in *jlexer.Lexer, out *MgetResponseItem) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "found": + out.Found = bool(in.Bool()) + case "_source": + if data := in.Raw(); in.Ok() { + in.AddError((out.Source).UnmarshalJSON(data)) + } + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgBulk6(out *jwriter.Writer, in MgetResponseItem) { + out.RawByte('{') + first := true + _ = first + { + const prefix string = ",\"found\":" + out.RawString(prefix[1:]) + out.Bool(bool(in.Found)) + } + { + const prefix string = ",\"_source\":" + out.RawString(prefix) + out.Raw((in.Source).MarshalJSON()) + } + out.RawByte('}') +} diff --git a/internal/pkg/bulk/setup_test.go b/internal/pkg/bulk/setup_test.go new file mode 100644 index 000000000..ff94da5ee --- /dev/null +++ b/internal/pkg/bulk/setup_test.go @@ -0,0 +1,175 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package bulk + +import ( + "context" + "encoding/base64" + "encoding/json" + "math/rand" + "testing" + "time" + + "github.com/elastic/go-ucfg/yaml" + "github.com/rs/xid" + + "github.com/Pallinder/go-randomdata" + "github.com/elastic/fleet-server/v7/internal/pkg/config" + "github.com/elastic/fleet-server/v7/internal/pkg/es" + "github.com/elastic/fleet-server/v7/internal/pkg/testing/esutil" + "github.com/rs/zerolog" +) + +var defaultCfg config.Config +var defaultCfgData = []byte(` +output: + elasticsearch: + hosts: '${ELASTICSEARCH_HOSTS:localhost:9200}' + username: '${ELASTICSEARCH_USERNAME:elastic}' + password: '${ELASTICSEARCH_PASSWORD:changeme}' +fleet: + agent: + id: 1e4954ce-af37-4731-9f4a-407b08e69e42 +`) + +const testPolicy = `{ + "properties": { + "intval": { + "type": "integer" + }, + "objval": { + "type": "object" + }, + "boolval": { + "type": "boolean" + }, + "kwval": { + "type": "keyword" + }, + "binaryval": { + "type": "binary" + }, + "dateval": { + "type": "date" + } + } +}` + +type subT struct { + SubString string `json:"substring"` +} + +type testT struct { + IntVal int `json:"intval"` + ObjVal subT `json:"objval"` + BoolVal bool `json:"boolval"` + KWVal string `json:"kwval"` + BinaryVal string `json:"binaryval"` + DateVal string `json:"dateval"` +} + +func NewRandomSample() testT { + + return testT{ + IntVal: int(rand.Int31()), + ObjVal: subT{SubString: randomdata.SillyName()}, + BoolVal: (rand.Intn(1) == 1), + KWVal: randomdata.SillyName(), + BinaryVal: base64.StdEncoding.EncodeToString([]byte(randomdata.SillyName())), + DateVal: time.Now().Format(time.RFC3339), + } +} + +func (ts testT) marshal(t testing.TB) []byte { + data, err := json.Marshal(&ts) + if err != nil { + t.Fatal(err) + } + return data +} + +func (ts *testT) read(t testing.TB, bulker Bulk, ctx context.Context, index, id string) { + data, err := bulker.Read(ctx, index, id) + if err != nil { + t.Fatal(err) + } + + err = json.Unmarshal(data, ts) + if err != nil { + t.Fatal(err) + } +} + +func init() { + c, err := yaml.NewConfig(defaultCfgData, config.DefaultOptions...) + if err != nil { + panic(err) + } + err = c.Unpack(&defaultCfg, config.DefaultOptions...) + if err != nil { + panic(err) + } +} + +func SetupBulk(ctx context.Context, t testing.TB, opts ...BulkOpt) Bulk { + t.Helper() + + cli, err := es.NewClient(ctx, &defaultCfg, false) + if err != nil { + t.Fatal(err) + } + + opts = append(opts, BulkOptsFromCfg(&defaultCfg)...) + + bulker := NewBulker(cli, opts...) + go bulker.Run(ctx) + + return bulker +} + +func SetupIndex(ctx context.Context, t testing.TB, bulker Bulk, mapping string) string { + t.Helper() + index := xid.New().String() + err := esutil.EnsureIndex(ctx, bulker.Client(), index, mapping) + if err != nil { + t.Fatal(err) + } + return index +} + +func SetupIndexWithBulk(ctx context.Context, t testing.TB, mapping string, opts ...BulkOpt) (string, Bulk) { + t.Helper() + bulker := SetupBulk(ctx, t, opts...) + index := SetupIndex(ctx, t, bulker, mapping) + return index, bulker +} + +func QuietLogger() func() { + l := zerolog.GlobalLevel() + + zerolog.SetGlobalLevel(zerolog.ErrorLevel) + + return func() { + zerolog.SetGlobalLevel(l) + } +} + +func EqualElastic(werr, gerr error) bool { + if werr == gerr { + return true + } + + wantErr, ok1 := werr.(es.ErrElastic) + gotErr, ok2 := gerr.(*es.ErrElastic) + + if !ok2 { + if tryAgain, ok3 := gerr.(es.ErrElastic); ok3 { + gotErr = &tryAgain + ok2 = true + } + } + + return (ok1 && ok2 && wantErr.Status == gotErr.Status && wantErr.Type == gotErr.Type) +} diff --git a/internal/pkg/cache/cache.go b/internal/pkg/cache/cache.go index b03bb4410..52e676ab8 100644 --- a/internal/pkg/cache/cache.go +++ b/internal/pkg/cache/cache.go @@ -5,20 +5,61 @@ package cache import ( + "fmt" + "math/rand" + "sync" "time" - "github.com/dgraph-io/ristretto" + "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/elastic/fleet-server/v7/internal/pkg/apikey" "github.com/elastic/fleet-server/v7/internal/pkg/model" ) +type Cache interface { + Reconfigure(Config) error + + SetAction(model.Action) + GetAction(id string) (model.Action, bool) + + SetApiKey(key ApiKey, enabled bool) + ValidApiKey(key ApiKey) bool + + SetEnrollmentApiKey(id string, key model.EnrollmentApiKey, cost int64) + GetEnrollmentApiKey(id string) (model.EnrollmentApiKey, bool) + + SetArtifact(artifact model.Artifact) + GetArtifact(ident, sha2 string) (model.Artifact, bool) +} + type ApiKey = apikey.ApiKey type SecurityInfo = apikey.SecurityInfo -type Cache struct { - cache *ristretto.Cache +type CacheT struct { + cache Cacher + cfg Config + mut sync.RWMutex +} + +type Config struct { + NumCounters int64 // number of keys to track frequency of + MaxCost int64 // maximum cost of cache in 'cost' units + ActionTTL time.Duration + ApiKeyTTL time.Duration + EnrollKeyTTL time.Duration + ArtifactTTL time.Duration + ApiKeyJitter time.Duration +} + +func (c *Config) MarshalZerologObject(e *zerolog.Event) { + e.Int64("numCounters", c.NumCounters) + e.Int64("maxCost", c.MaxCost) + e.Dur("actionTTL", c.ActionTTL) + e.Dur("enrollTTL", c.EnrollKeyTTL) + e.Dur("artifactTTL", c.ArtifactTTL) + e.Dur("apiKeyTTL", c.ApiKeyTTL) + e.Dur("apiKeyJitter", c.ApiKeyJitter) } type actionCache struct { @@ -27,29 +68,55 @@ type actionCache struct { } // New creates a new cache. -func New() (Cache, error) { - cfg := &ristretto.Config{ - NumCounters: 1000000, // number of keys to track frequency of - MaxCost: 100 * 1024 * 1024, // maximum cost of cache (100MB) - BufferItems: 64, +func New(cfg Config) (*CacheT, error) { + cache, err := newCache(cfg) + if err != nil { + return nil, err } - cache, err := ristretto.NewCache(cfg) - return Cache{cache}, err + c := CacheT{ + cache: cache, + cfg: cfg, + } + + return &c, nil +} + +// Reconfigure will drop cache +func (c *CacheT) Reconfigure(cfg Config) error { + c.mut.Lock() + defer c.mut.Unlock() + + cache, err := newCache(cfg) + if err != nil { + return err + } + + // Close down previous cache + c.cache.Close() + + // And assign new one + c.cfg = cfg + c.cache = cache + return nil } // SetAction sets an action in the cache. // // This will only cache the action ID and action Type. So `GetAction` will only // return a `model.Action` with `ActionId` and `Type` set. -func (c Cache) SetAction(action model.Action) { +func (c *CacheT) SetAction(action model.Action) { + c.mut.RLock() + defer c.mut.RUnlock() + scopedKey := "action:" + action.ActionId v := actionCache{ actionId: action.ActionId, actionType: action.Type, } cost := len(action.ActionId) + len(action.Type) - ok := c.cache.Set(scopedKey, v, int64(cost)) + ttl := c.cfg.ActionTTL + ok := c.cache.SetWithTTL(scopedKey, v, int64(cost), ttl) log.Trace(). Bool("ok", ok). Str("id", action.ActionId). @@ -61,7 +128,10 @@ func (c Cache) SetAction(action model.Action) { // // This will only return a `model.Action` with the action ID and action Type set. // This is because `SetAction` So `GetAction` will only cache the action ID and action Type. -func (c Cache) GetAction(id string) (model.Action, bool) { +func (c *CacheT) GetAction(id string) (model.Action, bool) { + c.mut.RLock() + defer c.mut.RUnlock() + scopedKey := "action:" + id if v, ok := c.cache.Get(scopedKey); ok { log.Trace().Str("id", id).Msg("Action cache HIT") @@ -81,12 +151,36 @@ func (c Cache) GetAction(id string) (model.Action, bool) { } // SetApiKey sets the API key in the cache. -func (c Cache) SetApiKey(key ApiKey, ttl time.Duration) { +func (c *CacheT) SetApiKey(key ApiKey, enabled bool) { + c.mut.RLock() + defer c.mut.RUnlock() + scopedKey := "api:" + key.Id - cost := len(scopedKey) + len(key.Key) - ok := c.cache.SetWithTTL(scopedKey, key.Key, int64(cost), ttl) + + // Use the valid key as the payload of the record; + // If caller has marked key as not enabled, use empty string. + val := key.Key + if !enabled { + val = "" + } + + // If enabled, jitter allows us to randomize the expirtion of the artifact + // across time, which is helpful if a bunch of agents came on at the same time, + // say during a network restoration. With some jitter, we avoid having to + // revalidate the API Keys all at the same time, which we know causes load on Elastic. + ttl := c.cfg.ApiKeyTTL + if c.cfg.ApiKeyJitter != 0 { + jitter := time.Duration(rand.Int63n(int64(c.cfg.ApiKeyJitter))) + if jitter < ttl { + ttl = ttl - jitter + } + } + + cost := len(scopedKey) + len(val) + ok := c.cache.SetWithTTL(scopedKey, val, int64(cost), ttl) log.Trace(). Bool("ok", ok). + Bool("enabled", enabled). Str("key", key.Id). Dur("ttl", ttl). Int("cost", cost). @@ -94,13 +188,19 @@ func (c Cache) SetApiKey(key ApiKey, ttl time.Duration) { } // ValidApiKey returns true if the ApiKey is valid (aka. also present in cache). -func (c Cache) ValidApiKey(key ApiKey) bool { +func (c *CacheT) ValidApiKey(key ApiKey) bool { + c.mut.RLock() + defer c.mut.RUnlock() + scopedKey := "api:" + key.Id v, ok := c.cache.Get(scopedKey) if ok { - if v == key.Key { + switch v { + case "": + log.Trace().Str("id", key.Id).Msg("ApiKey cache HIT on disabled KEY") + case key.Key: log.Trace().Str("id", key.Id).Msg("ApiKey cache HIT") - } else { + default: log.Trace().Str("id", key.Id).Msg("ApiKey cache MISMATCH") ok = false } @@ -111,7 +211,10 @@ func (c Cache) ValidApiKey(key ApiKey) bool { } // GetEnrollmentApiKey returns the enrollment API key by ID. -func (c Cache) GetEnrollmentApiKey(id string) (model.EnrollmentApiKey, bool) { +func (c *CacheT) GetEnrollmentApiKey(id string) (model.EnrollmentApiKey, bool) { + c.mut.RLock() + defer c.mut.RUnlock() + scopedKey := "record:" + id if v, ok := c.cache.Get(scopedKey); ok { log.Trace().Str("id", id).Msg("Enrollment cache HIT") @@ -129,8 +232,12 @@ func (c Cache) GetEnrollmentApiKey(id string) (model.EnrollmentApiKey, bool) { } // SetEnrollmentApiKey adds the enrollment API key into the cache. -func (c Cache) SetEnrollmentApiKey(id string, key model.EnrollmentApiKey, cost int64, ttl time.Duration) { +func (c *CacheT) SetEnrollmentApiKey(id string, key model.EnrollmentApiKey, cost int64) { + c.mut.RLock() + defer c.mut.RUnlock() + scopedKey := "record:" + id + ttl := c.cfg.EnrollKeyTTL ok := c.cache.SetWithTTL(scopedKey, key, cost, ttl) log.Trace(). Bool("ok", ok). @@ -139,3 +246,45 @@ func (c Cache) SetEnrollmentApiKey(id string, key model.EnrollmentApiKey, cost i Dur("ttl", ttl). Msg("EnrollmentApiKey cache SET") } + +func makeArtifactKey(ident, sha2 string) string { + return fmt.Sprintf("artifact:%s:%s", ident, sha2) +} + +func (c *CacheT) GetArtifact(ident, sha2 string) (model.Artifact, bool) { + c.mut.RLock() + defer c.mut.RUnlock() + + scopedKey := makeArtifactKey(ident, sha2) + if v, ok := c.cache.Get(scopedKey); ok { + log.Trace().Str("key", scopedKey).Msg("Artifact cache HIT") + key, ok := v.(model.Artifact) + + if !ok { + log.Error().Str("sha2", sha2).Msg("Artifact cache cast fail") + return model.Artifact{}, false + } + return key, ok + } + + log.Trace().Str("key", scopedKey).Msg("Artifact cache MISS") + return model.Artifact{}, false +} + +// TODO: strip body and spool to on disk cache if larger than a size threshold +func (c *CacheT) SetArtifact(artifact model.Artifact) { + c.mut.RLock() + defer c.mut.RUnlock() + + scopedKey := makeArtifactKey(artifact.Identifier, artifact.DecodedSha256) + cost := int64(len(artifact.Body)) + ttl := c.cfg.ArtifactTTL + + ok := c.cache.SetWithTTL(scopedKey, artifact, cost, ttl) + log.Trace(). + Bool("ok", ok). + Str("key", scopedKey). + Int64("cost", cost). + Dur("ttl", ttl). + Msg("Artifact cache SET") +} diff --git a/internal/pkg/cache/impl.go b/internal/pkg/cache/impl.go new file mode 100644 index 000000000..ef7a70d8a --- /dev/null +++ b/internal/pkg/cache/impl.go @@ -0,0 +1,16 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package cache + +import ( + "time" +) + +type Cacher interface { + Get(key interface{}) (interface{}, bool) + Set(key, value interface{}, cost int64) bool + SetWithTTL(key, value interface{}, cost int64, ttl time.Duration) bool + Close() +} diff --git a/internal/pkg/cache/impl_integration.go b/internal/pkg/cache/impl_integration.go new file mode 100644 index 000000000..013b4a6f5 --- /dev/null +++ b/internal/pkg/cache/impl_integration.go @@ -0,0 +1,33 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build integration +// +build integration + +package cache + +import ( + "time" +) + +func newCache(_ Config) (Cacher, error) { + return &NoCache{}, nil +} + +type NoCache struct{} + +func (c *NoCache) Get(_ interface{}) (interface{}, bool) { + return nil, false +} + +func (c *NoCache) Set(_ interface{}, _ interface{}, _ int64) bool { + return true +} + +func (c *NoCache) SetWithTTL(_, _ interface{}, _ int64, _ time.Duration) bool { + return true +} + +func (c *NoCache) Close() { +} diff --git a/internal/pkg/cache/impl_ristretto.go b/internal/pkg/cache/impl_ristretto.go new file mode 100644 index 000000000..582ba23e7 --- /dev/null +++ b/internal/pkg/cache/impl_ristretto.go @@ -0,0 +1,22 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build !integration +// +build !integration + +package cache + +import ( + "github.com/dgraph-io/ristretto" +) + +func newCache(cfg Config) (Cacher, error) { + rcfg := &ristretto.Config{ + NumCounters: cfg.NumCounters, + MaxCost: cfg.MaxCost, + BufferItems: 64, + } + + return ristretto.NewCache(rcfg) +} diff --git a/internal/pkg/checkin/bulk.go b/internal/pkg/checkin/bulk.go new file mode 100644 index 000000000..0d070e409 --- /dev/null +++ b/internal/pkg/checkin/bulk.go @@ -0,0 +1,248 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package checkin + +import ( + "context" + "encoding/json" + "sync" + "time" + + "github.com/elastic/fleet-server/v7/internal/pkg/bulk" + "github.com/elastic/fleet-server/v7/internal/pkg/dl" + "github.com/elastic/fleet-server/v7/internal/pkg/sqn" + + "github.com/rs/zerolog/log" +) + +const defaultFlushInterval = 10 * time.Second + +type optionsT struct { + flushInterval time.Duration +} + +type Opt func(*optionsT) + +func WithFlushInterval(d time.Duration) Opt { + return func(opt *optionsT) { + opt.flushInterval = d + } +} + +type extraT struct { + meta []byte + seqNo sqn.SeqNo + ver string +} + +// Minimize the size of this structure. +// There will be 10's of thousands of items +// in the map at any point. +type pendingT struct { + ts string + status string + extra *extraT +} + +type Bulk struct { + opts optionsT + bulker bulk.Bulk + mut sync.Mutex + pending map[string]pendingT + + ts string + unix int64 +} + +func NewBulk(bulker bulk.Bulk, opts ...Opt) *Bulk { + parsedOpts := parseOpts(opts...) + + return &Bulk{ + opts: parsedOpts, + bulker: bulker, + pending: make(map[string]pendingT), + } +} + +func parseOpts(opts ...Opt) optionsT { + + outOpts := optionsT{ + flushInterval: defaultFlushInterval, + } + + for _, f := range opts { + f(&outOpts) + } + + return outOpts +} + +// Generate and cache timestamp on seconds change. +// Avoid thousands of formats of an identical string. +func (bc *Bulk) timestamp() string { + + // WARNING: Expects mutex locked. + now := time.Now() + if now.Unix() != bc.unix { + bc.unix = now.Unix() + bc.ts = now.UTC().Format(time.RFC3339) + } + + return bc.ts +} + +// WARNING: Bulk will take ownership of fields, +// so do not use after passing in. +func (bc *Bulk) CheckIn(id string, status string, meta []byte, seqno sqn.SeqNo, newVer string) error { + + // Separate out the extra data to minimize + // the memory footprint of the 90% case of just + // updating the timestamp. + var extra *extraT + if meta != nil || seqno.IsSet() || newVer != "" { + extra = &extraT{ + meta: meta, + seqNo: seqno, + ver: newVer, + } + } + + bc.mut.Lock() + + bc.pending[id] = pendingT{ + ts: bc.timestamp(), + status: status, + extra: extra, + } + + bc.mut.Unlock() + return nil +} + +func (bc *Bulk) Run(ctx context.Context) error { + + tick := time.NewTicker(bc.opts.flushInterval) + defer tick.Stop() + + var err error +LOOP: + for { + select { + case <-tick.C: + if err = bc.flush(ctx); err != nil { + log.Error().Err(err).Msg("Eat bulk checkin error; Keep on truckin'") + err = nil + } + + case <-ctx.Done(): + err = ctx.Err() + break LOOP + } + } + + return err +} + +func (bc *Bulk) flush(ctx context.Context) error { + start := time.Now() + + bc.mut.Lock() + pending := bc.pending + bc.pending = make(map[string]pendingT, len(pending)) + bc.mut.Unlock() + + if len(pending) == 0 { + return nil + } + + updates := make([]bulk.MultiOp, 0, len(pending)) + + simpleCache := make(map[pendingT][]byte) + + nowTimestamp := start.UTC().Format(time.RFC3339) + + var err error + var needRefresh bool + for id, pendingData := range pending { + + // In the simple case, there are no fields and no seqNo. + // When that is true, we can reuse an already generated + // JSON body containing just the timestamp updates. + var body []byte + if pendingData.extra == nil { + + var ok bool + body, ok = simpleCache[pendingData] + if !ok { + fields := bulk.UpdateFields{ + dl.FieldLastCheckin: pendingData.ts, + dl.FieldUpdatedAt: nowTimestamp, + dl.FieldLastCheckinStatus: pendingData.status, + } + if body, err = fields.Marshal(); err != nil { + return err + } + simpleCache[pendingData] = body + } + } else { + + fields := bulk.UpdateFields{ + dl.FieldLastCheckin: pendingData.ts, // Set the checkin timestamp + dl.FieldUpdatedAt: nowTimestamp, // Set "updated_at" to the current timestamp + dl.FieldLastCheckinStatus: pendingData.status, // Set the pending status + } + + // If the agent version is not empty it needs to be updated + // Assuming the agent can by upgraded keeping the same id, but incrementing the version + if pendingData.extra.ver != "" { + fields[dl.FieldAgent] = map[string]interface{}{ + dl.FieldAgentVersion: pendingData.extra.ver, + } + } + + // Update local metadata if provided + if pendingData.extra.meta != nil { + // Surprise: The json encodeer compacts this raw JSON during + // the encode process, so there my be unexpected memory overhead: + // https://github.com/golang/go/blob/go1.16.3/src/encoding/json/encode.go#L499 + fields[dl.FieldLocalMetadata] = json.RawMessage(pendingData.extra.meta) + } + + // If seqNo changed, set the field appropriately + if pendingData.extra.seqNo.IsSet() { + fields[dl.FieldActionSeqNo] = pendingData.extra.seqNo + + // Only refresh if seqNo changed; dropping metadata not important. + needRefresh = true + } + + if body, err = fields.Marshal(); err != nil { + return err + } + } + + updates = append(updates, bulk.MultiOp{ + Id: id, + Body: body, + Index: dl.FleetAgents, + }) + } + + var opts []bulk.Opt + if needRefresh { + opts = append(opts, bulk.WithRefresh()) + } + + _, err = bc.bulker.MUpdate(ctx, updates, opts...) + + log.Trace(). + Err(err). + Dur("rtt", time.Since(start)). + Int("cnt", len(updates)). + Bool("refresh", needRefresh). + Msg("Flush updates") + + return err +} diff --git a/internal/pkg/checkin/bulk_test.go b/internal/pkg/checkin/bulk_test.go new file mode 100644 index 000000000..8a11a1ff3 --- /dev/null +++ b/internal/pkg/checkin/bulk_test.go @@ -0,0 +1,245 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package checkin + +import ( + "bytes" + "context" + "encoding/json" + "testing" + "time" + + "github.com/elastic/fleet-server/v7/internal/pkg/bulk" + "github.com/elastic/fleet-server/v7/internal/pkg/dl" + "github.com/elastic/fleet-server/v7/internal/pkg/sqn" + "github.com/google/go-cmp/cmp" + + tst "github.com/elastic/fleet-server/v7/internal/pkg/testing" + "github.com/rs/xid" + "github.com/rs/zerolog" +) + +type CustomBulk struct { + tst.MockBulk + + ops []bulk.MultiOp +} + +func (m *CustomBulk) MUpdate(ctx context.Context, ops []bulk.MultiOp, opts ...bulk.Opt) ([]bulk.BulkIndexerResponseItem, error) { + m.ops = append(m.ops, ops...) + return nil, nil +} + +// Test simple, +// Test with fields +// Test with seq no + +func TestBulkSimple(t *testing.T) { + start := time.Now() + + var mockBulk CustomBulk + + bc := NewBulk(&mockBulk) + + const ver = "8.0.0" + cases := []struct { + desc string + id string + status string + meta []byte + seqno sqn.SeqNo + ver string + }{ + { + "Simple case", + "simpleId", + "online", + nil, + nil, + "", + }, + { + "Singled field case", + "singleFieldId", + "online", + []byte(`{"hey":"now"}`), + nil, + "", + }, + { + "Multi field case", + "multiFieldId", + "online", + []byte(`{"hey":"now","brown":"cow"}`), + nil, + ver, + }, + { + "Multi field nested case", + "multiFieldNestedId", + "online", + []byte(`{"hey":"now","wee":{"little":"doggie"}}`), + nil, + "", + }, + { + "Simple case with seqNo", + "simpleseqno", + "online", + nil, + sqn.SeqNo{1, 2, 3, 4}, + ver, + }, + { + "Field case with seqNo", + "simpleseqno", + "online", + []byte(`{"uncle":"fester"}`), + sqn.SeqNo{5, 6, 7, 8}, + ver, + }, + { + "Unusual status", + "singleFieldId", + "unusual", + nil, + nil, + "", + }, + { + "Empty status", + "singleFieldId", + "", + nil, + nil, + "", + }, + } + + for _, c := range cases { + t.Run(c.desc, func(t *testing.T) { + + if err := bc.CheckIn(c.id, c.status, c.meta, c.seqno, c.ver); err != nil { + t.Fatal(err) + } + + if err := bc.flush(context.Background()); err != nil { + t.Fatal(err) + } + + if len(mockBulk.ops) != 1 { + t.Fatal("Expected one op") + } + + op := mockBulk.ops[0] + + mockBulk.ops = nil + + // deserialize the response + if op.Id != c.id { + t.Error("Wrong id") + } + + if op.Index != dl.FleetAgents { + t.Error("Wrong index") + } + + type updateT struct { + LastCheckin string `json:"last_checkin"` + Status string `json:"last_checkin_status"` + UpdatedAt string `json:"updated_at"` + Meta json.RawMessage `json:"local_metadata"` + SeqNo sqn.SeqNo `json:"action_seq_no"` + } + + m := make(map[string]updateT) + if err := json.Unmarshal(op.Body, &m); err != nil { + t.Error(err) + } + + sub, ok := m["doc"] + if !ok { + t.Fatal("expected doc") + } + + validateTimestamp(t, start.Truncate(time.Second), sub.LastCheckin) + validateTimestamp(t, start.Truncate(time.Second), sub.UpdatedAt) + + if c.seqno != nil { + if cdiff := cmp.Diff(c.seqno, sub.SeqNo); cdiff != "" { + t.Error(cdiff) + } + } + + if c.meta != nil && bytes.Compare(c.meta, sub.Meta) != 0 { + t.Error("meta doesn't match up") + } + + if c.status != sub.Status { + t.Error("status mismatch") + } + + }) + } +} + +func validateTimestamp(t *testing.T, start time.Time, ts string) { + + if t1, err := time.Parse(time.RFC3339, ts); err != nil { + t.Error("expected rfc3999") + } else if start.After(t1) { + t.Error("timestamp in the past") + } +} + +func benchmarkBulk(n int, flush bool, b *testing.B) { + b.ReportAllocs() + + l := zerolog.GlobalLevel() + defer zerolog.SetGlobalLevel(l) + + zerolog.SetGlobalLevel(zerolog.ErrorLevel) + + var mockBulk tst.MockBulk + + bc := NewBulk(mockBulk) + + ids := make([]string, 0, n) + for i := 0; i < n; i++ { + id := xid.New().String() + ids = append(ids, id) + } + + for i := 0; i < b.N; i++ { + + for _, id := range ids { + err := bc.CheckIn(id, "", nil, nil, "") + if err != nil { + b.Fatal(err) + } + } + + if flush { + err := bc.flush(context.Background()) + if err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkBulk_1(b *testing.B) { benchmarkBulk(1, false, b) } +func BenchmarkBulk_64(b *testing.B) { benchmarkBulk(64, false, b) } +func BenchmarkBulk_8192(b *testing.B) { benchmarkBulk(8192, false, b) } +func BenchmarkBulk_37268(b *testing.B) { benchmarkBulk(37268, false, b) } +func BenchmarkBulk_131072(b *testing.B) { benchmarkBulk(131072, false, b) } +func BenchmarkBulk_262144(b *testing.B) { benchmarkBulk(262144, false, b) } + +func BenchmarkBulkFlush_1(b *testing.B) { benchmarkBulk(1, true, b) } +func BenchmarkBulkFlush_64(b *testing.B) { benchmarkBulk(64, true, b) } +func BenchmarkBulkFlush_8192(b *testing.B) { benchmarkBulk(8192, true, b) } +func BenchmarkBulkFlush_37268(b *testing.B) { benchmarkBulk(37268, true, b) } +func BenchmarkBulkFlush_131072(b *testing.B) { benchmarkBulk(131072, true, b) } +func BenchmarkBulkFlush_262144(b *testing.B) { benchmarkBulk(262144, true, b) } diff --git a/internal/pkg/config/cache.go b/internal/pkg/config/cache.go new file mode 100644 index 000000000..c1d5a67cf --- /dev/null +++ b/internal/pkg/config/cache.go @@ -0,0 +1,39 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package config + +import ( + "time" +) + +const ( + defaultActionTTL = time.Minute * 5 + defaultEnrollKeyTTL = time.Minute + defaultArtifactTTL = time.Hour * 24 + defaultApiKeyTTL = time.Minute * 15 // ApiKey validation is a bottleneck. + defaultApiKeyJitter = time.Minute * 5 // Jitter allows some randomness on ApiKeyTTL, zero to disable +) + +type Cache struct { + NumCounters int64 `config:"num_counters"` + MaxCost int64 `config:"max_cost"` + ActionTTL time.Duration `config:"ttl_action"` + EnrollKeyTTL time.Duration `config:"ttl_enroll_key"` + ArtifactTTL time.Duration `config:"ttl_artifact"` + ApiKeyTTL time.Duration `config:"ttl_api_key"` + ApiKeyJitter time.Duration `config:"jitter_api_key"` +} + +func (c *Cache) InitDefaults() { + l := loadLimits().Cache + + c.NumCounters = l.NumCounters + c.MaxCost = l.MaxCost + c.ActionTTL = defaultActionTTL + c.EnrollKeyTTL = defaultEnrollKeyTTL + c.ArtifactTTL = defaultArtifactTTL + c.ApiKeyTTL = defaultApiKeyTTL + c.ApiKeyJitter = defaultApiKeyJitter +} diff --git a/internal/pkg/config/config.go b/internal/pkg/config/config.go index a868a1da6..2f636792b 100644 --- a/internal/pkg/config/config.go +++ b/internal/pkg/config/config.go @@ -5,9 +5,10 @@ package config import ( - "fmt" + "errors" "github.com/elastic/go-ucfg" + "github.com/elastic/go-ucfg/flag" "github.com/elastic/go-ucfg/yaml" ) @@ -21,38 +22,114 @@ var DefaultOptions = []ucfg.Option{ // Config is the global configuration. type Config struct { - Fleet Fleet `config:"fleet"` - Output Output `config:"output"` - Inputs []Input `config:"inputs"` + Fleet Fleet `config:"fleet"` + Output Output `config:"output"` + Inputs []Input `config:"inputs"` + Logging Logging `config:"logging"` + HTTP HTTP `config:"http"` } // InitDefaults initializes the defaults for the configuration. func (c *Config) InitDefaults() { c.Inputs = make([]Input, 1) c.Inputs[0].InitDefaults() + c.HTTP.InitDefaults() } // Validate ensures that the configuration is valid. func (c *Config) Validate() error { - if c.Inputs == nil || len(c.Inputs) == 0 { - return fmt.Errorf("a fleet-server input can be defined") + if len(c.Inputs) == 0 { + return errors.New("a fleet-server input must be defined") } if len(c.Inputs) > 1 { - return fmt.Errorf("only 1 fleet-server input can be defined") + return errors.New("only 1 fleet-server input can be defined") } return nil } -// LoadFile take a path and load the file and return a new configuration. -func LoadFile(path string) (*Config, error) { +// Merge merges two configurations together. +func (c *Config) Merge(other *Config) (*Config, error) { + repr, err := ucfg.NewFrom(c, DefaultOptions...) + if err != nil { + return nil, err + } + err = repr.Merge(other, DefaultOptions...) + if err != nil { + return nil, err + } cfg := &Config{} - c, err := yaml.NewConfigWithFile(path, DefaultOptions...) + err = repr.Unpack(cfg, DefaultOptions...) if err != nil { return nil, err } - err = c.Unpack(cfg, DefaultOptions...) + return cfg, nil +} + +// FromConfig returns Config from the ucfg.Config. +func FromConfig(c *ucfg.Config) (*Config, error) { + cfg := &Config{} + err := c.Unpack(cfg, DefaultOptions...) if err != nil { return nil, err } return cfg, nil } + +// LoadFile take a path and load the file and return a new configuration. +func LoadFile(path string) (*Config, error) { + c, err := yaml.NewConfigWithFile(path, DefaultOptions...) + if err != nil { + return nil, err + } + return FromConfig(c) +} + +// Flag captures key/values pairs into an ucfg.Config object. +type Flag flag.FlagValue + +// NewFlag creates an instance that allows the `-E` flag to overwrite +// the configuration from the command-line. +func NewFlag() *Flag { + opts := append( + []ucfg.Option{ + ucfg.MetaData(ucfg.Meta{Source: "command line flag"}), + }, + DefaultOptions..., + ) + + tmp := flag.NewFlagKeyValue(ucfg.New(), true, opts...) + return (*Flag)(tmp) +} + +func (f *Flag) access() *flag.FlagValue { + return (*flag.FlagValue)(f) +} + +// Config returns the config object the Flag stores applied settings to. +func (f *Flag) Config() *ucfg.Config { + return f.access().Config() +} + +// Set sets a settings value in the Config object. The input string must be a +// key-value pair like `key=value`. If the value is missing, the value is set +// to the boolean value `true`. +func (f *Flag) Set(s string) error { + return f.access().Set(s) +} + +// Get returns the Config object used to store values. +func (f *Flag) Get() interface{} { + return f.Config() +} + +// String always returns an empty string. It is required to fulfil +// the flag.Value interface. +func (f *Flag) String() string { + return "" +} + +// Type reports the type of contents (setting=value) expected to be parsed by Set. +// It is used to build the CLI usage string. +func (f *Flag) Type() string { + return "setting=value" +} diff --git a/internal/pkg/config/config_test.go b/internal/pkg/config/config_test.go index ad0539a10..4c7b5c1c9 100644 --- a/internal/pkg/config/config_test.go +++ b/internal/pkg/config/config_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build !integration // +build !integration package config @@ -24,44 +25,23 @@ func TestConfig(t *testing.T) { }{ "basic": { cfg: &Config{ - Fleet: Fleet{ - Agent: Agent{ - ID: "1e4954ce-af37-4731-9f4a-407b08e69e42", - Logging: AgentLogging{ - Level: "info", - }, - }, - }, + Fleet: defaultFleet(), Output: Output{ - Elasticsearch: Elasticsearch{ - Protocol: "http", - Hosts: []string{"localhost:9200"}, - Username: "elastic", - Password: "changeme", - MaxRetries: 3, - MaxConnPerHost: 128, - BulkFlushInterval: 250 * time.Millisecond, - Timeout: 90 * time.Second, - }, + Elasticsearch: defaultElastic(), }, Inputs: []Input{ { - Type: "fleet-server", - Server: Server{ - Host: "localhost", - Port: 8000, - Timeouts: ServerTimeouts{ - Read: 5 * time.Second, - Write: 60 * 10 * time.Second, - }, - MaxHeaderByteSize: 8192, - MaxEnrollPending: 64, - RateLimitBurst: 1024, - RateLimitInterval: 5 * time.Millisecond, - Profile: ServerProfile{Bind: "localhost:6060"}, + Type: "fleet-server", + Server: defaultServer(), + Cache: defaultCache(), + Monitor: Monitor{ + FetchSize: defaultFetchSize, + PollTimeout: defaultPollTimeout, }, }, }, + Logging: defaultLogging(), + HTTP: defaultHTTP(), }, }, "fleet-logging": { @@ -75,119 +55,84 @@ func TestConfig(t *testing.T) { }, }, Output: Output{ - Elasticsearch: Elasticsearch{ - Protocol: "http", - Hosts: []string{"localhost:9200"}, - Username: "elastic", - Password: "changeme", - MaxRetries: 3, - MaxConnPerHost: 128, - BulkFlushInterval: 250 * time.Millisecond, - Timeout: 90 * time.Second, - }, + Elasticsearch: defaultElastic(), }, Inputs: []Input{ { - Type: "fleet-server", - Server: Server{ - Host: "localhost", - Port: 8000, - Timeouts: ServerTimeouts{ - Read: 5 * time.Second, - Write: 60 * 10 * time.Second, - }, - MaxHeaderByteSize: 8192, - MaxEnrollPending: 64, - RateLimitBurst: 1024, - RateLimitInterval: 5 * time.Millisecond, - Profile: ServerProfile{Bind: "localhost:6060"}, + Type: "fleet-server", + Server: defaultServer(), + Cache: defaultCache(), + Monitor: Monitor{ + FetchSize: defaultFetchSize, + PollTimeout: defaultPollTimeout, }, }, }, + Logging: defaultLogging(), + HTTP: defaultHTTP(), }, }, "input": { cfg: &Config{ - Fleet: Fleet{ - Agent: Agent{ - ID: "1e4954ce-af37-4731-9f4a-407b08e69e42", - Logging: AgentLogging{ - Level: "info", - }, - }, - }, + Fleet: defaultFleet(), Output: Output{ - Elasticsearch: Elasticsearch{ - Protocol: "http", - Hosts: []string{"localhost:9200"}, - Username: "elastic", - Password: "changeme", - MaxRetries: 3, - MaxConnPerHost: 128, - BulkFlushInterval: 250 * time.Millisecond, - Timeout: 90 * time.Second, - }, + Elasticsearch: defaultElastic(), }, Inputs: []Input{ { - Type: "fleet-server", - Server: Server{ - Host: "localhost", - Port: 8000, - Timeouts: ServerTimeouts{ - Read: 5 * time.Second, - Write: 60 * 10 * time.Second, - }, - MaxHeaderByteSize: 8192, - MaxEnrollPending: 64, - RateLimitBurst: 1024, - RateLimitInterval: 5 * time.Millisecond, - Profile: ServerProfile{Bind: "localhost:6060"}, + Type: "fleet-server", + Server: defaultServer(), + Cache: defaultCache(), + Monitor: Monitor{ + FetchSize: defaultFetchSize, + PollTimeout: defaultPollTimeout, }, }, }, + Logging: defaultLogging(), + HTTP: defaultHTTP(), }, }, "input-config": { cfg: &Config{ - Fleet: Fleet{ - Agent: Agent{ - ID: "1e4954ce-af37-4731-9f4a-407b08e69e42", - Logging: AgentLogging{ - Level: "info", - }, - }, - }, + Fleet: defaultFleet(), Output: Output{ - Elasticsearch: Elasticsearch{ - Protocol: "http", - Hosts: []string{"localhost:9200"}, - Username: "elastic", - Password: "changeme", - MaxRetries: 3, - MaxConnPerHost: 128, - BulkFlushInterval: 250 * time.Millisecond, - Timeout: 90 * time.Second, - }, + Elasticsearch: defaultElastic(), }, Inputs: []Input{ { Type: "fleet-server", Server: Server{ - Host: "localhost", - Port: 8888, + Host: "localhost", + Port: 8888, + InternalPort: 8221, Timeouts: ServerTimeouts{ - Read: 20 * time.Second, - Write: 5 * time.Second, + Read: 20 * time.Second, + ReadHeader: 5 * time.Second, + Idle: 30 * time.Second, + Write: 5 * time.Second, + CheckinTimestamp: 30 * time.Second, + CheckinLongPoll: 5 * time.Minute, + CheckinJitter: 30 * time.Second, + }, + Profiler: ServerProfiler{ + Enabled: false, + Bind: "localhost:6060", }, - MaxHeaderByteSize: 8192, - MaxEnrollPending: 64, - RateLimitBurst: 1024, - RateLimitInterval: 5 * time.Millisecond, - Profile: ServerProfile{Bind: "localhost:6060"}, + CompressionLevel: 1, + CompressionThresh: 1024, + Limits: defaultServerLimits(), + Bulk: defaultServerBulk(), + }, + Cache: defaultCache(), + Monitor: Monitor{ + FetchSize: defaultFetchSize, + PollTimeout: defaultPollTimeout, }, }, }, + Logging: defaultLogging(), + HTTP: defaultHTTP(), }, }, "bad-input": { @@ -197,17 +142,11 @@ func TestConfig(t *testing.T) { err: "only 1 fleet-server input can be defined", }, "bad-logging": { - err: "invalid log level; must be one of: debug, info, warning, error", + err: "invalid log level; must be one of: trace, debug, info, warning, error", }, "bad-output": { err: "can only contain elasticsearch key", }, - "bad-no-output": { - err: "cannot connect to elasticsearch without username/password", - }, - "bad-no-agent-id": { - err: "string value is not set", - }, } for name, test := range testcases { @@ -233,3 +172,68 @@ func TestConfig(t *testing.T) { }) } } + +// Stub out the defaults so that the above is easier to maintain + +func defaultCache() Cache { + var d Cache + d.InitDefaults() + return d +} + +func defaultServerTimeouts() ServerTimeouts { + var d ServerTimeouts + d.InitDefaults() + return d +} + +func defaultServerLimits() ServerLimits { + var d ServerLimits + d.InitDefaults() + return d +} + +func defaultServerBulk() ServerBulk { + var d ServerBulk + d.InitDefaults() + return d +} + +func defaultLogging() Logging { + var d Logging + d.InitDefaults() + return d +} + +func defaultHTTP() HTTP { + var d HTTP + d.InitDefaults() + return d +} + +func defaultFleet() Fleet { + return Fleet{ + Agent: Agent{ + ID: "1e4954ce-af37-4731-9f4a-407b08e69e42", + Logging: AgentLogging{}, + }, + } +} + +func defaultElastic() Elasticsearch { + return Elasticsearch{ + Protocol: "http", + Hosts: []string{"localhost:9200"}, + Username: "elastic", + Password: "changeme", + MaxRetries: 3, + MaxConnPerHost: 128, + Timeout: 90 * time.Second, + } +} + +func defaultServer() Server { + var d Server + d.InitDefaults() + return d +} diff --git a/internal/pkg/config/defaults/1024_limits.yml b/internal/pkg/config/defaults/1024_limits.yml new file mode 100644 index 000000000..cb6f82390 --- /dev/null +++ b/internal/pkg/config/defaults/1024_limits.yml @@ -0,0 +1,25 @@ +ram: + min: 1024 + max: 2048 +cache_limits: + num_counters: 20000 + max_cost: 20971520 +server_limits: + policy_throttle: 50ms + max_connections: 7000 + checkin_limit: + interval: 5ms + burst: 500 + max: 5001 + artifact_limit: + interval: 5ms + burst: 500 + max: 1000 + enroll_limit: + interval: 20ms + burst: 50 + max: 100 + ack_limit: + interval: 4ms + burst: 500 + max: 1000 \ No newline at end of file diff --git a/internal/pkg/config/defaults/2048_limits.yml b/internal/pkg/config/defaults/2048_limits.yml new file mode 100644 index 000000000..5e5462cfc --- /dev/null +++ b/internal/pkg/config/defaults/2048_limits.yml @@ -0,0 +1,25 @@ +ram: + min: 2048 + max: 4096 +cache_limits: + num_counters: 40000 + max_cost: 50971520 +server_limits: + policy_throttle: 10ms + max_connections: 10000 + checkin_limit: + interval: 2ms + burst: 1000 + max: 7501 + artifact_limit: + interval: 2ms + burst: 1000 + max: 2000 + enroll_limit: + interval: 10ms + burst: 100 + max: 200 + ack_limit: + interval: 2ms + burst: 1000 + max: 2000 \ No newline at end of file diff --git a/internal/pkg/config/defaults/4096_limits.yml b/internal/pkg/config/defaults/4096_limits.yml new file mode 100644 index 000000000..622460411 --- /dev/null +++ b/internal/pkg/config/defaults/4096_limits.yml @@ -0,0 +1,25 @@ +ram: + min: 4096 + max: 8192 +cache_limits: + num_counters: 80000 + max_cost: 104857600 +server_limits: + policy_throttle: 5ms + max_connections: 20000 + checkin_limit: + interval: 1ms + burst: 2000 + max: 10001 + artifact_limit: + interval: 1ms + burst: 2000 + max: 4000 + enroll_limit: + interval: 10ms + burst: 100 + max: 200 + ack_limit: + interval: 1ms + burst: 2000 + max: 4000 \ No newline at end of file diff --git a/internal/pkg/config/defaults/8192_limits.yml b/internal/pkg/config/defaults/8192_limits.yml new file mode 100644 index 000000000..9dfcf3784 --- /dev/null +++ b/internal/pkg/config/defaults/8192_limits.yml @@ -0,0 +1,25 @@ +ram: + min: 8192 + max: 16384 +cache_limits: + num_counters: 160000 + max_cost: 209715200 +server_limits: + policy_throttle: 5ms + max_connections: 32000 + checkin_limit: + interval: 500us + burst: 4000 + max: 12501 + artifact_limit: + interval: 500us + burst: 4000 + max: 8000 + enroll_limit: + interval: 10ms + burst: 100 + max: 200 + ack_limit: + interval: 500us + burst: 4000 + max: 8000 \ No newline at end of file diff --git a/internal/pkg/config/defaults/base_limits.yml b/internal/pkg/config/defaults/base_limits.yml new file mode 100644 index 000000000..c47f669f0 --- /dev/null +++ b/internal/pkg/config/defaults/base_limits.yml @@ -0,0 +1,24 @@ +ram: + max: 1024 +cache_limits: + num_counters: 2000 + max_cost: 2097152 +server_limits: + policy_throttle: 200ms + max_connections: 100 + checkin_limit: + interval: 50ms + burst: 25 + max: 100 + artifact_limit: + interval: 100ms + burst: 10 + max: 10 + enroll_limit: + interval: 100ms + burst: 5 + max: 10 + ack_limit: + interval: 10ms + burst: 20 + max: 20 \ No newline at end of file diff --git a/internal/pkg/config/defaults/max_limits.yml b/internal/pkg/config/defaults/max_limits.yml new file mode 100644 index 000000000..394ee9a73 --- /dev/null +++ b/internal/pkg/config/defaults/max_limits.yml @@ -0,0 +1,24 @@ +ram: + min: 16384 +cache_limits: + num_counters: 160000 + max_cost: 209715200 +server_limits: + policy_throttle: 2ms + max_connections: 32000 + checkin_limit: + interval: 500us + burst: 4000 + max: 15001 + artifact_limit: + interval: 500us + burst: 4000 + max: 8000 + enroll_limit: + interval: 10ms + burst: 100 + max: 200 + ack_limit: + interval: 500us + burst: 4000 + max: 8000 \ No newline at end of file diff --git a/internal/pkg/config/env_defaults.go b/internal/pkg/config/env_defaults.go new file mode 100644 index 000000000..87d6b5b2a --- /dev/null +++ b/internal/pkg/config/env_defaults.go @@ -0,0 +1,180 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// Code generated by dev-tools/cmd/buildlimits/buildlimits.go - DO NOT EDIT. + +package config + +import ( + "math" + "runtime" + "strings" + "time" + + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/packer" + "github.com/elastic/go-ucfg/yaml" + "github.com/pbnjay/memory" + "github.com/pkg/errors" +) + +const ( + defaultCacheNumCounters = 500000 // 10x times expected count + defaultCacheMaxCost = 50 * 1024 * 1024 // 50MiB cache size + + defaultMaxConnections = 0 // no limit + defaultPolicyThrottle = time.Millisecond * 5 + + defaultCheckinInterval = time.Millisecond + defaultCheckinBurst = 1000 + defaultCheckinMax = 0 + defaultCheckinMaxBody = 1024 * 1024 + + defaultArtifactInterval = time.Millisecond * 5 + defaultArtifactBurst = 25 + defaultArtifactMax = 50 + defaultArtifactMaxBody = 0 + + defaultEnrollInterval = time.Millisecond * 10 + defaultEnrollBurst = 100 + defaultEnrollMax = 50 + defaultEnrollMaxBody = 1024 * 512 + + defaultAckInterval = time.Millisecond * 10 + defaultAckBurst = 100 + defaultAckMax = 50 + defaultAckMaxBody = 1024 * 1024 * 2 +) + +type valueRange struct { + Min int `config:"min"` + Max int `config:"max"` +} + +type envLimits struct { + RAM valueRange `config:"ram"` + Server *serverLimitDefaults `config:"server_limits"` + Cache *cacheLimits `config:"cache_limits"` +} + +func defaultEnvLimits() *envLimits { + return &envLimits{ + RAM: valueRange{ + Min: 0, + Max: int(getMaxInt()), + }, + Server: defaultserverLimitDefaults(), + Cache: defaultCacheLimits(), + } +} + +type cacheLimits struct { + NumCounters int64 `config:"num_counters"` + MaxCost int64 `config:"max_cost"` +} + +func defaultCacheLimits() *cacheLimits { + return &cacheLimits{ + NumCounters: defaultCacheNumCounters, + MaxCost: defaultCacheMaxCost, + } +} + +type limit struct { + Interval time.Duration `config:"interval"` + Burst int `config:"burst"` + Max int64 `config:"max"` + MaxBody int64 `config:"max_body_byte_size"` +} + +type serverLimitDefaults struct { + PolicyThrottle time.Duration `config:"policy_throttle"` + MaxConnections int `config:"max_connections"` + + CheckinLimit limit `config:"checkin_limit"` + ArtifactLimit limit `config:"artifact_limit"` + EnrollLimit limit `config:"enroll_limit"` + AckLimit limit `config:"ack_limit"` +} + +func defaultserverLimitDefaults() *serverLimitDefaults { + return &serverLimitDefaults{ + PolicyThrottle: defaultCacheNumCounters, + MaxConnections: defaultCacheMaxCost, + + CheckinLimit: limit{ + Interval: defaultCheckinInterval, + Burst: defaultCheckinBurst, + Max: defaultCheckinMax, + MaxBody: defaultCheckinMaxBody, + }, + ArtifactLimit: limit{ + Interval: defaultArtifactInterval, + Burst: defaultArtifactBurst, + Max: defaultArtifactMax, + MaxBody: defaultArtifactMaxBody, + }, + EnrollLimit: limit{ + Interval: defaultEnrollInterval, + Burst: defaultEnrollBurst, + Max: defaultEnrollMax, + MaxBody: defaultEnrollMaxBody, + }, + AckLimit: limit{ + Interval: defaultAckInterval, + Burst: defaultAckBurst, + Max: defaultAckMax, + MaxBody: defaultAckMaxBody, + }, + } +} + +var defaults []*envLimits + +func init() { + // Packed Files + // internal/pkg/config/defaults/1024_limits.yml + // internal/pkg/config/defaults/2048_limits.yml + // internal/pkg/config/defaults/4096_limits.yml + // internal/pkg/config/defaults/8192_limits.yml + // internal/pkg/config/defaults/base_limits.yml + // internal/pkg/config/defaults/max_limits.yml + unpacked := packer.MustUnpack("eJzsll9vqkgYxu/3Y/R6swUUc9zkXIyOIDQzRoP8u9kwYBEO/1KxMGz2u28GwbZCFZtNerOXTvCZd97nN887fz8ESb57SZzoMfvlP7pp8hz4j97u2TlG+eGR54TxX1EQB/nhDxpHD38+uLGUr/zUV2Q+I2HqIw1QDOrf+x1MfRQCbgWy0DJwauvTg2PwmbdEk3kAfJLouSngV88QOdtUq1UAKIKgQO3/pWlIRpg7rf8aIW1LEcgq21QF21SfiRzljrmp6v2Xs1ciR+FOn3LOUn31lpuDraU+1ma5W7zTE8SjbWDOMaZHt0p9XLX74dQ28ItjiI3uuq5Rmc8ykmwiN7H3BKY+NviqWQ88U608tqaBolnLLdOfKHBbILh4UuazvZtsMjuWQk9qzg5T/+u6iwJBwHQjkqivRC7PtSrsDHPgO6deCpZRTBSoFMRET2xdkVXeTepe8qj5lhjSeBWAstHcW0J+/ezy4Bp/Pvx+nSSBG/+4QVJNzhtJGAIR+4NJ4j4hif8qSegGSUgDrTuhI+uhI0RHUygzYkRcrdtxRz3pzYFvJTp1R+t3DoO3c1fbAq3ZvhL15Ci2DMyddYP22+joyTr1YunAbkuP68zlC6eUO2la9NFUXtaLwtM+loFf/ovzMz0Eft4kasxNJ9eJOhH0RtQKLkQ0nKjxJ0SVCK7HWHMFVrkrMBd02p7crVLW4YIIZWaNomdP3lMy2nBE3k4UqF8nKryXKKmno0q3ozWp9+UT6smnkzMfsoRrKbMNkRKhPNwgtUQ9mYI6maI0fZdChz/X2avXQyjtkr8elFE/+KlwnagTQe+mnWZVK7h+sgSJOfYxV2SRJzqjRq8rPmWGVbTVsQ4y4lrXVppbYo2d/M6J1zjV6pFYjKzRJiOCyCiuzq4NnXgaKLxuV7luVxcUa6Cs776pco5h763R5kambAu01LtTBXbIGn8zWTyCM757u9ad27WCw/KKOIfdp3S195TiO99OV0i6M5uUou3gJySV92XTtm96ULzuf4sMziUN9E+lyzdO7bJ+dOPpgXxNk7985wybcr1Tk17UR4cQEzvlrcf398cPir8tfngE/4+fc/z889u/AQAA///e0qUb") + + for f, v := range unpacked { + cfg, err := yaml.NewConfig(v, DefaultOptions...) + if err != nil { + panic(errors.Wrap(err, "Cannot read spec from "+f)) + } + + l := defaultEnvLimits() + if err := cfg.Unpack(&l, DefaultOptions...); err != nil { + panic(errors.Wrap(err, "Cannot unpack spec from "+f)) + } + + defaults = append(defaults, l) + } +} + +func loadLimits() *envLimits { + ramSize := int(memory.TotalMemory() / 1024 / 1024) + return loadLimitsForRam(ramSize) +} + +func loadLimitsForRam(currentRAM int) *envLimits { + for _, l := range defaults { + // get max possible config for current env + if l.RAM.Min < currentRAM && currentRAM <= l.RAM.Max { + return l + } + } + + return defaultEnvLimits() +} + +func getMaxInt() int64 { + if strings.HasSuffix(runtime.GOARCH, "64") { + return math.MaxInt64 + } + return math.MaxInt32 +} diff --git a/internal/pkg/config/env_defaults_test.go b/internal/pkg/config/env_defaults_test.go new file mode 100644 index 000000000..cd16db9c4 --- /dev/null +++ b/internal/pkg/config/env_defaults_test.go @@ -0,0 +1,36 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// Code generated by dev-tools/cmd/buildlimits/buildlimits.go - DO NOT EDIT. + +package config + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestLoadLimits(t *testing.T) { + testCases := []struct { + Name string + CurrentRAM int + ExpectedMaxRAM int + }{ + {"low ram", 128, 1024}, + {"512", 512, 1024}, + {"precise", 1024, 1024}, + {"2-to-4", 2650, 4096}, + {"close to max", 16383, 16384}, + {"above max", 16385, int(getMaxInt())}, + } + + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + l := loadLimitsForRam(tc.CurrentRAM) + + require.Equal(t, tc.ExpectedMaxRAM, l.RAM.Max) + }) + } +} diff --git a/internal/pkg/config/fleet.go b/internal/pkg/config/fleet.go index a3fd7e502..5cb12d881 100644 --- a/internal/pkg/config/fleet.go +++ b/internal/pkg/config/fleet.go @@ -6,8 +6,9 @@ package config import ( "fmt" - "github.com/rs/zerolog" "strings" + + "github.com/rs/zerolog" ) // AgentLogging is the log level set on the Agent. @@ -15,13 +16,13 @@ type AgentLogging struct { Level string `config:"level"` } -// InitDefaults initializes the defaults for the configuration. -func (c *AgentLogging) InitDefaults() { - c.Level = "info" -} - // Validate ensures that the configuration is valid. func (c *AgentLogging) Validate() error { + if c.Level == "" { + // allowed to be empty because `agent.logging.level` is only + // an override of the logging level from `logging.level` + return nil + } if _, err := strToLevel(c.Level); err != nil { return err } @@ -36,7 +37,7 @@ func (c *AgentLogging) LogLevel() zerolog.Level { // Agent is the ID and logging configuration of the Agent running this Fleet Server. type Agent struct { - ID string `config:"id" validate:"required"` + ID string `config:"id"` Version string `config:"version"` Logging AgentLogging `config:"logging"` } @@ -58,6 +59,8 @@ func strToLevel(s string) (zerolog.Level, error) { s = strings.ToLower(s) switch strings.TrimSpace(s) { + case "trace": + l = zerolog.TraceLevel case "debug": l = zerolog.DebugLevel case "info": @@ -67,7 +70,7 @@ func strToLevel(s string) (zerolog.Level, error) { case "error": l = zerolog.ErrorLevel default: - return l, fmt.Errorf("invalid log level; must be one of: debug, info, warning, error") + return l, fmt.Errorf("invalid log level; must be one of: trace, debug, info, warning, error") } return l, nil diff --git a/internal/pkg/config/http.go b/internal/pkg/config/http.go new file mode 100644 index 000000000..c85aca0fc --- /dev/null +++ b/internal/pkg/config/http.go @@ -0,0 +1,23 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package config + +const kDefaultHTTPHost = "localhost" +const kDefaultHTTPPort = 5066 + +// Http is the configuration for the API endpoint. +type HTTP struct { + Enabled bool `config:"enabled"` + Host string `config:"host"` + Port int `config:"port"` + User string `config:"named_pipe.user"` + SecurityDescriptor string `config:"named_pipe.security_descriptor"` +} + +func (h *HTTP) InitDefaults() { + h.Enabled = false + h.Host = kDefaultHTTPHost + h.Port = kDefaultHTTPPort +} diff --git a/internal/pkg/config/input.go b/internal/pkg/config/input.go index 609e97df8..7124e430b 100644 --- a/internal/pkg/config/input.go +++ b/internal/pkg/config/input.go @@ -5,30 +5,33 @@ package config import ( + "compress/flate" "fmt" "strings" "time" + + "github.com/elastic/beats/v7/libbeat/common/transport/tlscommon" ) -// ServerTimeouts is the configuration for the server timeouts -type ServerTimeouts struct { - Read time.Duration `config:"read"` - Write time.Duration `config:"write"` -} +const kDefaultHost = "0.0.0.0" +const kDefaultPort = 8220 +const kDefaultInternalHost = "localhost" +const kDefaultInternalPort = 8221 -// InitDefaults initializes the defaults for the configuration. -func (c *ServerTimeouts) InitDefaults() { - c.Read = 5 * time.Second - c.Write = 60 * 10 * time.Second // 10 minutes (long poll) +// Policy is the configuration policy to use. +type Policy struct { + ID string `config:"id"` } -// ServerProfile is the configuration for profiling the server. -type ServerProfile struct { - Bind string `config:"bind"` +// ServerProfiler is the configuration for profiling the server. +type ServerProfiler struct { + Enabled bool `config:"enabled"` + Bind string `config:"bind"` } // InitDefaults initializes the defaults for the configuration. -func (c *ServerProfile) InitDefaults() { +func (c *ServerProfiler) InitDefaults() { + c.Enabled = false c.Bind = "localhost:6060" } @@ -38,50 +41,98 @@ type ServerTLS struct { Cert string `config:"cert"` } +type ServerBulk struct { + FlushInterval time.Duration `config:"flush_interval"` + FlushThresholdCount int `config:"flush_threshold_cnt"` + FlushThresholdSize int `config:"flush_threshold_size"` + FlushMaxPending int `config:"flush_max_pending"` +} + +func (c *ServerBulk) InitDefaults() { + c.FlushInterval = 250 * time.Millisecond + c.FlushThresholdCount = 2048 + c.FlushThresholdSize = 1024 * 1024 + c.FlushMaxPending = 8 +} + // Server is the configuration for the server type Server struct { - Host string `config:"host"` - Port uint16 `config:"port"` - TLS ServerTLS `config:"tls"` - Timeouts ServerTimeouts `config:"timeouts"` - MaxHeaderByteSize int `config:"max_header_byte_size"` - RateLimitBurst int `config:"rate_limit_burst"` - RateLimitInterval time.Duration `config:"rate_limit_interval"` - MaxEnrollPending int64 `config:"max_enroll_pending"` - Profile ServerProfile `config:"profile"` + Host string `config:"host"` + Port uint16 `config:"port"` + InternalPort uint16 `config:"internal_port"` + TLS *tlscommon.ServerConfig `config:"ssl"` + Timeouts ServerTimeouts `config:"timeouts"` + Profiler ServerProfiler `config:"profiler"` + CompressionLevel int `config:"compression_level"` + CompressionThresh int `config:"compression_threshold"` + Limits ServerLimits `config:"limits"` + Runtime Runtime `config:"runtime"` + Bulk ServerBulk `config:"bulk"` } // InitDefaults initializes the defaults for the configuration. func (c *Server) InitDefaults() { - c.Host = "localhost" - c.Port = 8000 + c.Host = kDefaultHost + c.Port = kDefaultPort + c.InternalPort = kDefaultInternalPort c.Timeouts.InitDefaults() - c.MaxHeaderByteSize = 8192 // 8k - c.RateLimitBurst = 1024 - c.RateLimitInterval = 5 * time.Millisecond - c.MaxEnrollPending = 64 - c.Profile.InitDefaults() + c.CompressionLevel = flate.BestSpeed + c.CompressionThresh = 1024 + c.Profiler.InitDefaults() + c.Limits.InitDefaults() + c.Runtime.InitDefaults() + c.Bulk.InitDefaults() +} + +// BindEndpoints returns the binding address for the all HTTP server listeners. +func (c *Server) BindEndpoints() []string { + primaryAddress := c.BindAddress() + endpoints := make([]string, 0, 2) + endpoints = append(endpoints, primaryAddress) + + if internalAddress := c.BindInternalAddress(); internalAddress != "" && internalAddress != ":0" && internalAddress != primaryAddress { + endpoints = append(endpoints, internalAddress) + } + + return endpoints } // BindAddress returns the binding address for the HTTP server. func (c *Server) BindAddress() string { - host := c.Host + return bindAddress(c.Host, c.Port) +} + +// BindInternalAddress returns the binding address for the internal HTTP server. +func (c *Server) BindInternalAddress() string { + if c.InternalPort <= 0 { + return bindAddress(kDefaultInternalHost, kDefaultInternalPort) + } + + return bindAddress(kDefaultInternalHost, c.InternalPort) +} + +func bindAddress(host string, port uint16) string { if strings.Count(host, ":") > 1 && strings.Count(host, "]") == 0 { host = "[" + host + "]" } - return fmt.Sprintf("%s:%d", host, c.Port) + return fmt.Sprintf("%s:%d", host, port) } // Input is the input defined by Agent to run Fleet Server. type Input struct { - Type string `config:"type"` - Server Server `config:"server"` + Type string `config:"type"` + Policy Policy `config:"policy"` + Server Server `config:"server"` + Cache Cache `config:"cache"` + Monitor Monitor `config:"monitor"` } // InitDefaults initializes the defaults for the configuration. func (c *Input) InitDefaults() { c.Type = "fleet-server" c.Server.InitDefaults() + c.Cache.InitDefaults() + c.Monitor.InitDefaults() } // Validate ensures that the configuration is valid. diff --git a/internal/pkg/config/input_test.go b/internal/pkg/config/input_test.go index 10a8ec520..0379aac19 100644 --- a/internal/pkg/config/input_test.go +++ b/internal/pkg/config/input_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build !integration // +build !integration package config diff --git a/internal/pkg/config/limits.go b/internal/pkg/config/limits.go new file mode 100644 index 000000000..f731cdb7d --- /dev/null +++ b/internal/pkg/config/limits.go @@ -0,0 +1,61 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package config + +import ( + "time" +) + +type Limit struct { + Interval time.Duration `config:"interval"` + Burst int `config:"burst"` + Max int64 `config:"max"` + MaxBody int64 `config:"max_body_byte_size"` +} + +type ServerLimits struct { + PolicyThrottle time.Duration `config:"policy_throttle"` + MaxHeaderByteSize int `config:"max_header_byte_size"` + MaxConnections int `config:"max_connections"` + + CheckinLimit Limit `config:"checkin_limit"` + ArtifactLimit Limit `config:"artifact_limit"` + EnrollLimit Limit `config:"enroll_limit"` + AckLimit Limit `config:"ack_limit"` +} + +// InitDefaults initializes the defaults for the configuration. +func (c *ServerLimits) InitDefaults() { + l := loadLimits().Server + + c.MaxHeaderByteSize = 8192 // 8k + c.MaxConnections = l.MaxConnections + c.PolicyThrottle = l.PolicyThrottle + + c.CheckinLimit = Limit{ + Interval: l.CheckinLimit.Interval, + Burst: l.CheckinLimit.Burst, + Max: l.CheckinLimit.Max, + MaxBody: l.CheckinLimit.MaxBody, + } + c.ArtifactLimit = Limit{ + Interval: l.ArtifactLimit.Interval, + Burst: l.ArtifactLimit.Burst, + Max: l.ArtifactLimit.Max, + MaxBody: l.ArtifactLimit.MaxBody, + } + c.EnrollLimit = Limit{ + Interval: l.EnrollLimit.Interval, + Burst: l.EnrollLimit.Burst, + Max: l.EnrollLimit.Max, + MaxBody: l.EnrollLimit.MaxBody, + } + c.AckLimit = Limit{ + Interval: l.AckLimit.Interval, + Burst: l.AckLimit.Burst, + Max: l.AckLimit.Max, + MaxBody: l.AckLimit.MaxBody, + } +} diff --git a/internal/pkg/config/logging.go b/internal/pkg/config/logging.go new file mode 100644 index 000000000..1199d5860 --- /dev/null +++ b/internal/pkg/config/logging.go @@ -0,0 +1,69 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package config + +import ( + "github.com/rs/zerolog" + "os" + "time" +) + +// LoggingFiles configuration for the logging file output. +type LoggingFiles struct { + Path string `config:"path"` + Name string `config:"name"` + MaxSize uint `config:"rotateeverybytes" validate:"min=1"` + MaxBackups uint `config:"keepfiles" validate:"max=1024"` + Permissions uint32 `config:"permissions"` + Interval time.Duration `config:"interval"` + RotateOnStartup bool `config:"rotateonstartup"` + RedirectStderr bool `config:"redirect_stderr"` +} + +// InitDefaults initializes the defaults for the configuration. +func (c *LoggingFiles) InitDefaults() { + cwd, err := os.Getwd() + if err != nil { + // something really wrong here + panic(err) + } + + c.Path = cwd + c.Name = "fleet-server.log" + c.MaxSize = 10 * 1024 * 1024 + c.MaxBackups = 7 + c.Permissions = 0600 + c.Interval = 0 + c.RotateOnStartup = true +} + +// Logging configuration. +type Logging struct { + Level string `config:"level"` + ToStderr bool `config:"to_stderr"` + ToFiles bool `config:"to_files"` + Pretty bool `config:"pretty"` + Files *LoggingFiles `config:"files"` +} + +// InitDefaults initializes the defaults for the configuration. +func (c *Logging) InitDefaults() { + c.Level = "info" + c.ToFiles = true +} + +// Validate ensures that the configuration is valid. +func (c *Logging) Validate() error { + if _, err := strToLevel(c.Level); err != nil { + return err + } + return nil +} + +// LogLevel returns configured zerolog.Level +func (c *Logging) LogLevel() zerolog.Level { + l, _ := strToLevel(c.Level) + return l +} diff --git a/internal/pkg/config/monitor.go b/internal/pkg/config/monitor.go new file mode 100644 index 000000000..d93837878 --- /dev/null +++ b/internal/pkg/config/monitor.go @@ -0,0 +1,22 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package config + +import "time" + +const ( + defaultFetchSize = 1000 + defaultPollTimeout = 4 * time.Minute +) + +type Monitor struct { + FetchSize int `config:"fetch_size"` + PollTimeout time.Duration `config:"poll_timeout"` +} + +func (m *Monitor) InitDefaults() { + m.FetchSize = defaultFetchSize + m.PollTimeout = defaultPollTimeout +} diff --git a/internal/pkg/config/output.go b/internal/pkg/config/output.go index c17c19336..7de261a9a 100644 --- a/internal/pkg/config/output.go +++ b/internal/pkg/config/output.go @@ -14,30 +14,35 @@ import ( "strings" "time" - "github.com/elastic/go-elasticsearch/v8" + "github.com/elastic/go-elasticsearch/v7" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/transport/tlscommon" ) +// The timeout would be driven by the server for long poll. +// Giving it some sane long value. +const httpTransportLongPollTimeout = 10 * time.Minute + var hasScheme = regexp.MustCompile(`^([a-z][a-z0-9+\-.]*)://`) // Elasticsearch is the configuration for elasticsearch. type Elasticsearch struct { - Protocol string `config:"protocol"` - Hosts []string `config:"hosts"` - Path string `config:"path"` - Headers map[string]string `config:"headers"` - Username string `config:"username"` - Password string `config:"password"` - APIKey string `config:"api_key"` - ProxyURL string `config:"proxy_url"` - ProxyDisable bool `config:"proxy_disable"` - TLS *tlscommon.Config `config:"ssl"` - MaxRetries int `config:"max_retries"` - MaxConnPerHost int `config:"max_conn_per_host"` - BulkFlushInterval time.Duration `config:"bulk_flush_interval"` - Timeout time.Duration `config:"timeout"` + Protocol string `config:"protocol"` + Hosts []string `config:"hosts"` + Path string `config:"path"` + Headers map[string]string `config:"headers"` + Username string `config:"username"` + Password string `config:"password"` + APIKey string `config:"api_key"` + ServiceToken string `config:"service_token"` + ProxyURL string `config:"proxy_url"` + ProxyDisable bool `config:"proxy_disable"` + ProxyHeaders map[string]string `config:"proxy_headers"` + TLS *tlscommon.Config `config:"ssl"` + MaxRetries int `config:"max_retries"` + MaxConnPerHost int `config:"max_conn_per_host"` + Timeout time.Duration `config:"timeout"` } // InitDefaults initializes the defaults for the configuration. @@ -47,7 +52,6 @@ func (c *Elasticsearch) InitDefaults() { c.Timeout = 90 * time.Second c.MaxRetries = 3 c.MaxConnPerHost = 128 - c.BulkFlushInterval = 250 * time.Millisecond } // Validate ensures that the configuration is valid. @@ -55,9 +59,6 @@ func (c *Elasticsearch) Validate() error { if c.APIKey != "" { return fmt.Errorf("cannot connect to elasticsearch with api_key; must use username/password") } - if c.Username == "" || c.Password == "" { - return fmt.Errorf("cannot connect to elasticsearch without username/password") - } if c.ProxyURL != "" && !c.ProxyDisable { if _, err := common.ParseURL(c.ProxyURL); err != nil { return err @@ -73,7 +74,7 @@ func (c *Elasticsearch) Validate() error { } // ToESConfig converts the configuration object into the config for the elasticsearch client. -func (c *Elasticsearch) ToESConfig() (elasticsearch.Config, error) { +func (c *Elasticsearch) ToESConfig(longPoll bool) (elasticsearch.Config, error) { // build the addresses addrs := make([]string, len(c.Hosts)) for i, host := range c.Hosts { @@ -100,6 +101,17 @@ func (c *Elasticsearch) ToESConfig() (elasticsearch.Config, error) { ResponseHeaderTimeout: c.Timeout, ExpectContinueTimeout: 1 * time.Second, } + + disableRetry := false + + if longPoll { + httpTransport.IdleConnTimeout = httpTransportLongPollTimeout + httpTransport.ResponseHeaderTimeout = httpTransportLongPollTimeout + + // no retries for long poll monitoring + disableRetry = true + } + if c.TLS != nil && c.TLS.IsEnabled() { tls, err := tlscommon.LoadTLSConfig(c.TLS) if err != nil { @@ -107,12 +119,26 @@ func (c *Elasticsearch) ToESConfig() (elasticsearch.Config, error) { } httpTransport.TLSClientConfig = tls.ToConfig() } - if c.ProxyURL != "" && !c.ProxyDisable { - proxyUrl, err := common.ParseURL(c.ProxyURL) - if err != nil { - return elasticsearch.Config{}, err + + if !c.ProxyDisable { + if c.ProxyURL != "" { + proxyUrl, err := common.ParseURL(c.ProxyURL) + if err != nil { + return elasticsearch.Config{}, err + } + httpTransport.Proxy = http.ProxyURL(proxyUrl) + } else { + httpTransport.Proxy = http.ProxyFromEnvironment + } + + var proxyHeaders http.Header + if len(c.ProxyHeaders) > 0 { + proxyHeaders = make(http.Header, len(c.ProxyHeaders)) + for k, v := range c.ProxyHeaders { + proxyHeaders.Add(k, v) + } } - httpTransport.Proxy = http.ProxyURL(proxyUrl) + httpTransport.ProxyConnectHeader = proxyHeaders } h := http.Header{} @@ -125,12 +151,14 @@ func (c *Elasticsearch) ToESConfig() (elasticsearch.Config, error) { h.Set("X-elastic-product-origin", "fleet") return elasticsearch.Config{ - Addresses: addrs, - Username: c.Username, - Password: c.Password, - Header: h, - Transport: httpTransport, - MaxRetries: c.MaxRetries, + Addresses: addrs, + Username: c.Username, + Password: c.Password, + ServiceToken: c.ServiceToken, + Header: h, + Transport: httpTransport, + MaxRetries: c.MaxRetries, + DisableRetry: disableRetry, }, nil } diff --git a/internal/pkg/config/output_test.go b/internal/pkg/config/output_test.go index dc29e2457..6d38a67e8 100644 --- a/internal/pkg/config/output_test.go +++ b/internal/pkg/config/output_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build !integration // +build !integration package config @@ -9,11 +10,12 @@ package config import ( "crypto/tls" "net/http" + "os" "testing" "time" "github.com/elastic/beats/v7/libbeat/common/transport/tlscommon" - "github.com/elastic/go-elasticsearch/v8" + "github.com/elastic/go-elasticsearch/v7" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/stretchr/testify/assert" @@ -27,14 +29,13 @@ func TestToESConfig(t *testing.T) { }{ "http": { cfg: Elasticsearch{ - Protocol: "http", - Hosts: []string{"localhost:9200"}, - Username: "elastic", - Password: "changeme", - MaxRetries: 3, - MaxConnPerHost: 128, - BulkFlushInterval: 250 * time.Millisecond, - Timeout: 90 * time.Second, + Protocol: "http", + Hosts: []string{"localhost:9200"}, + Username: "elastic", + Password: "changeme", + MaxRetries: 3, + MaxConnPerHost: 128, + Timeout: 90 * time.Second, }, result: elasticsearch.Config{ Addresses: []string{"http://localhost:9200"}, @@ -62,10 +63,9 @@ func TestToESConfig(t *testing.T) { Headers: map[string]string{ "X-Custom-Header": "Header-Value", }, - MaxRetries: 6, - MaxConnPerHost: 256, - BulkFlushInterval: 250 * time.Millisecond, - Timeout: 120 * time.Second, + MaxRetries: 6, + MaxConnPerHost: 256, + Timeout: 120 * time.Second, }, result: elasticsearch.Config{ Addresses: []string{"http://localhost:9200", "http://other-host:9200"}, @@ -93,10 +93,9 @@ func TestToESConfig(t *testing.T) { Headers: map[string]string{ "X-Custom-Header": "Header-Value", }, - MaxRetries: 6, - MaxConnPerHost: 256, - BulkFlushInterval: 250 * time.Millisecond, - Timeout: 120 * time.Second, + MaxRetries: 6, + MaxConnPerHost: 256, + Timeout: 120 * time.Second, TLS: &tlscommon.Config{ VerificationMode: tlscommon.VerifyNone, }, @@ -132,10 +131,9 @@ func TestToESConfig(t *testing.T) { Headers: map[string]string{ "X-Custom-Header": "Header-Value", }, - MaxRetries: 6, - MaxConnPerHost: 256, - BulkFlushInterval: 250 * time.Millisecond, - Timeout: 120 * time.Second, + MaxRetries: 6, + MaxConnPerHost: 256, + Timeout: 120 * time.Second, TLS: &tlscommon.Config{ VerificationMode: tlscommon.VerifyNone, }, @@ -171,8 +169,12 @@ func TestToESConfig(t *testing.T) { cmpopts.IgnoreUnexported(tls.Config{}), } t.Run(name, func(t *testing.T) { - res, err := test.cfg.ToESConfig() + res, err := test.cfg.ToESConfig(false) require.NoError(t, err) + + // cmp.Diff can't handle function pointers. + res.Transport.(*http.Transport).Proxy = nil + test.result.Header.Set("X-elastic-product-origin", "fleet") if !assert.True(t, cmp.Equal(test.result, res, copts...)) { diff := cmp.Diff(test.result, res, copts...) @@ -183,3 +185,102 @@ func TestToESConfig(t *testing.T) { }) } } + +func TestESProxyConfig(t *testing.T) { + testcases := map[string]struct { + cfg Elasticsearch + url string + want string + headers map[string]string + env map[string]string + }{ + "no proxy": { + cfg: Elasticsearch{ProxyDisable: true}, + }, + "proxy url set": { + cfg: Elasticsearch{ + ProxyURL: "http://proxy.com", + }, + url: "http://test.com", + want: "http://proxy.com", + }, + "with headers": { + cfg: Elasticsearch{ + ProxyURL: "http://proxy.com", + ProxyHeaders: map[string]string{ + "TestProxyHeader": "Custom Value", + }, + }, + url: "http://test.com", + want: "http://proxy.com", + headers: map[string]string{ + "TestProxyHeader": "Custom Value", + }, + }, + "proxy from env by default": { + cfg: Elasticsearch{}, + url: "http://test.com", + want: "http://proxy.com", + env: map[string]string{ + "HTTP_PROXY": "http://proxy.com", + }, + }, + } + + for name, test := range testcases { + t.Run(name, func(t *testing.T) { + setTestEnv(t, test.env) + + res, err := test.cfg.ToESConfig(false) + require.NoError(t, err) + + transport := res.Transport.(*http.Transport) + if test.want == "" { + require.Nil(t, transport.Proxy) + return + } + require.NotNil(t, transport.Proxy) + + req, err := http.NewRequest("GET", test.url, nil) + require.NoError(t, err) + + got, err := transport.Proxy(req) + require.NoError(t, err) + + if len(test.headers) == 0 { + require.Len(t, transport.ProxyConnectHeader, 0) + } else { + headers := http.Header{} + for k, v := range test.headers { + headers.Add(k, v) + } + require.Equal(t, headers, transport.ProxyConnectHeader) + } + + require.Equal(t, test.want, got.String()) + }) + } +} + +func setTestEnv(t *testing.T, env map[string]string) { + var oldEnv map[string]string + for k := range env { + if v := os.Getenv(k); v != "" { + oldEnv[k] = v + } + } + + t.Cleanup(func() { + for k := range env { + if v := oldEnv[k]; v != v { + os.Setenv(k, v) + } else { + os.Unsetenv(k) + } + } + }) + + for k, v := range env { + os.Setenv(k, v) + } +} diff --git a/internal/pkg/env/env.go b/internal/pkg/config/runtime.go similarity index 61% rename from internal/pkg/env/env.go rename to internal/pkg/config/runtime.go index 4ff1833e0..c1f2d1c37 100644 --- a/internal/pkg/env/env.go +++ b/internal/pkg/config/runtime.go @@ -2,16 +2,12 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package env +package config -import ( - "os" -) +type Runtime struct { + GCPercent int `config:"gc_percent"` +} -func GetStr(key, defaultVal string) string { - val, ok := os.LookupEnv(key) - if !ok { - val = defaultVal - } - return val +func (r Runtime) InitDefaults() { + r.GCPercent = 0 } diff --git a/internal/pkg/config/testdata/bad-logging.yml b/internal/pkg/config/testdata/bad-logging.yml index 503d7ffcc..844901693 100644 --- a/internal/pkg/config/testdata/bad-logging.yml +++ b/internal/pkg/config/testdata/bad-logging.yml @@ -7,4 +7,4 @@ fleet: agent: id: 1e4954ce-af37-4731-9f4a-407b08e69e42 logging: - level: trace + level: grace diff --git a/internal/pkg/config/timeouts.go b/internal/pkg/config/timeouts.go new file mode 100644 index 000000000..1c58382ba --- /dev/null +++ b/internal/pkg/config/timeouts.go @@ -0,0 +1,61 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package config + +import ( + "time" +) + +// ServerTimeouts is the configuration for the server timeouts +type ServerTimeouts struct { + Read time.Duration `config:"read"` + Write time.Duration `config:"write"` + Idle time.Duration `config:"idle"` + ReadHeader time.Duration `config:"read_header"` + CheckinTimestamp time.Duration `config:"checkin_timestamp"` + CheckinLongPoll time.Duration `config:"checkin_long_poll"` + CheckinJitter time.Duration `config:"checkin_jitter"` +} + +// InitDefaults initializes the defaults for the configuration. +func (c *ServerTimeouts) InitDefaults() { + // see https://blog.gopheracademy.com/advent-2016/exposing-go-on-the-internet/ + + // The read timeout starts on ACCEPT of the connection, and includes + // the time to read the entire body (if the body is read, otherwise to the end of the headers). + // Note that for TLS, this include the TLS handshake as well. + // In most cases, we are authenticating the apikey and doing an agent record lookup + // *before* reading the body. This is purposeful to avoid streaming data from an unauthenticated + // connection. However, the downside is that if the roundtrip to Elastic is slow, we may + // end up hitting the Read timeout before actually reading any data off the socket. + // Use a large timeout to accomodate the authentication lag. Add a ReadHeader timeout + // below to handle preAuth. + c.Read = 60 * time.Second + + // Read header timeout covers ACCEPT to the end of the HTTP headers. + // Note that for TLS, this include the TLS handshake as well. + // This is considered preauth in this server, so limit the timeout to something reasonable. + c.ReadHeader = 5 * time.Second + + // IdleTimeout is the maximum amount of time to wait for the + // next request when keep-alives are enabled. Because TLS handshakes are expensive + // for the server, avoid aggressive connection close with generous idle timeout. + c.Idle = 30 * time.Second + + // The write timeout for HTTPS covers the time from ACCEPT to the end of the response write; + // so in that case it covers the TLS handshake. If the connection is reused, the write timeout + // covers the time from the end of the request header to the end of the response write. + // Set to a very large timeout to allow for slow backend; must be at least as large as Read timeout plus Long Poll. + c.Write = 10 * time.Minute + + // Write out a timestamp to elastic on this timeout during long poll + c.CheckinTimestamp = 30 * time.Second + + // Long poll timeout, will be short-circuited on policy change + c.CheckinLongPoll = 5 * time.Minute + + // Jitter subtracted from c.CheckinLongPoll. Disabled if zero. + c.CheckinJitter = 30 * time.Second +} diff --git a/internal/pkg/coordinator/monitor.go b/internal/pkg/coordinator/monitor.go index 6544610e0..03019ac71 100644 --- a/internal/pkg/coordinator/monitor.go +++ b/internal/pkg/coordinator/monitor.go @@ -6,6 +6,8 @@ package coordinator import ( "context" + "errors" + "fmt" "net" "os" "runtime" @@ -19,6 +21,7 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/config" "github.com/elastic/fleet-server/v7/internal/pkg/dl" "github.com/elastic/fleet-server/v7/internal/pkg/es" + "github.com/elastic/fleet-server/v7/internal/pkg/logger" "github.com/elastic/fleet-server/v7/internal/pkg/model" "github.com/elastic/fleet-server/v7/internal/pkg/monitor" "github.com/elastic/fleet-server/v7/internal/pkg/sleep" @@ -29,6 +32,9 @@ const ( defaultLeaderInterval = 30 * time.Second // become leader for at least 30 seconds defaultMetadataInterval = 5 * time.Minute // update metadata every 5 minutes defaultCoordinatorRestartDelay = 5 * time.Second // delay in restarting coordinator on failure + defaultUnenrollCheckInterval = 1 * time.Minute // perform unenroll timeout interval check + + unenrolledReasonTimeout = "timeout" // reason agent was unenrolled ) // Monitor monitors the leader election of policies and routes managed policies to the coordinator. @@ -38,9 +44,11 @@ type Monitor interface { } type policyT struct { - id string - cord Coordinator - canceller context.CancelFunc + id string + cord Coordinator + cordCanceller context.CancelFunc + unenrollTimeout time.Duration + unenrollCanceller context.CancelFunc } type monitorT struct { @@ -55,14 +63,16 @@ type monitorT struct { agentMetadata model.AgentMetadata hostMetadata model.HostMetadata - checkInterval time.Duration - leaderInterval time.Duration - metadataInterval time.Duration - coordRestartDelay time.Duration + checkInterval time.Duration + leaderInterval time.Duration + metadataInterval time.Duration + coordRestartDelay time.Duration + unenrollCheckInterval time.Duration serversIndex string policiesIndex string leadersIndex string + agentsIndex string policies map[string]policyT } @@ -70,32 +80,38 @@ type monitorT struct { // NewMonitor creates a new coordinator policy monitor. func NewMonitor(fleet config.Fleet, version string, bulker bulk.Bulk, monitor monitor.Monitor, factory Factory) Monitor { return &monitorT{ - log: log.With().Str("ctx", "policy leader manager").Logger(), - version: version, - fleet: fleet, - bulker: bulker, - monitor: monitor, - factory: factory, - checkInterval: defaultCheckInterval, - leaderInterval: defaultLeaderInterval, - metadataInterval: defaultMetadataInterval, - coordRestartDelay: defaultCoordinatorRestartDelay, - serversIndex: dl.FleetServers, - policiesIndex: dl.FleetPolicies, - leadersIndex: dl.FleetPoliciesLeader, - policies: make(map[string]policyT), + log: log.With().Str("ctx", "policy leader manager").Logger(), + version: version, + fleet: fleet, + bulker: bulker, + monitor: monitor, + factory: factory, + checkInterval: defaultCheckInterval, + leaderInterval: defaultLeaderInterval, + metadataInterval: defaultMetadataInterval, + coordRestartDelay: defaultCoordinatorRestartDelay, + unenrollCheckInterval: defaultUnenrollCheckInterval, + serversIndex: dl.FleetServers, + policiesIndex: dl.FleetPolicies, + leadersIndex: dl.FleetPoliciesLeader, + agentsIndex: dl.FleetAgents, + policies: make(map[string]policyT), } } // Run runs the monitor. func (m *monitorT) Run(ctx context.Context) (err error) { - m.log.Info().Msg("start") - defer func() { - m.log.Info().Err(err).Msg("exited") - }() + // When ID of the Agent is not provided to Fleet Server then the Agent + // has not enrolled. The Fleet Server cannot become a leader until the + // Agent it is running under has been enrolled. + m.calcMetadata() + if m.agentMetadata.Id == "" { + m.log.Warn().Msg("missing config fleet.agent.id; acceptable until Elastic Agent has enrolled") + <-ctx.Done() + return ctx.Err() + } // Ensure leadership on startup - m.calcMetadata() err = m.ensureLeadership(ctx) if err != nil { return err @@ -117,7 +133,7 @@ func (m *monitorT) Run(ctx context.Context) (err error) { case hits := <-s.Output(): err = m.handlePolicies(ctx, hits) if err != nil { - return err + m.log.Warn().Err(err).Msgf("Encountered an error while policy leadership changes; continuing to retry.") } case <-mT.C: m.calcMetadata() @@ -125,7 +141,7 @@ func (m *monitorT) Run(ctx context.Context) (err error) { case <-lT.C: err = m.ensureLeadership(ctx) if err != nil { - return err + m.log.Warn().Err(err).Msgf("Encountered an error while checking/assigning policy leaders; continuing to retry.") } lT.Reset(m.checkInterval) case <-ctx.Done(): @@ -142,6 +158,7 @@ func (m *monitorT) handlePolicies(ctx context.Context, hits []es.HitT) error { var policy model.Policy err := hit.Unmarshal(&policy) if err != nil { + m.log.Debug().Err(err).Msg("Failed to deserialize policy json") return err } if policy.CoordinatorIdx != 0 { @@ -155,9 +172,11 @@ func (m *monitorT) handlePolicies(ctx context.Context, hits []es.HitT) error { // current leader send to its coordinator err = p.cord.Update(ctx, policy) if err != nil { + m.log.Info().Err(err).Msg("Failed to update policy leader") return err } } + m.rescheduleUnenroller(ctx, &p, &policy) } else { new = true } @@ -176,15 +195,20 @@ func (m *monitorT) handlePolicies(ctx context.Context, hits []es.HitT) error { func (m *monitorT) ensureLeadership(ctx context.Context) error { m.log.Debug().Msg("ensuring leadership of policies") err := dl.EnsureServer(ctx, m.bulker, m.version, m.agentMetadata, m.hostMetadata, dl.WithIndexName(m.serversIndex)) + if err != nil { - return err + return fmt.Errorf("Failed to check server status on Elasticsearch (%s): %w", m.hostMetadata.Name, err) } // fetch current policies and leaders leaders := map[string]model.PolicyLeader{} policies, err := dl.QueryLatestPolicies(ctx, m.bulker, dl.WithIndexName(m.policiesIndex)) if err != nil { - return err + if errors.Is(err, es.ErrIndexNotFound) { + m.log.Debug().Str("index", m.policiesIndex).Msg(es.ErrIndexNotFound.Error()) + return nil + } + return fmt.Errorf("Encountered error while querying policies: %w", err) } if len(policies) > 0 { ids := make([]string, len(policies)) @@ -193,7 +217,9 @@ func (m *monitorT) ensureLeadership(ctx context.Context) error { } leaders, err = dl.SearchPolicyLeaders(ctx, m.bulker, ids, dl.WithIndexName(m.leadersIndex)) if err != nil { - return err + if !errors.Is(err, es.ErrIndexNotFound) { + return fmt.Errorf("Encountered error while fetching policy leaders: %w", err) + } } } @@ -234,9 +260,9 @@ func (m *monitorT) ensureLeadership(ctx context.Context) error { if pt.cord != nil { pt.cord = nil } - if pt.canceller != nil { - pt.canceller() - pt.canceller = nil + if pt.cordCanceller != nil { + pt.cordCanceller() + pt.cordCanceller = nil } return } @@ -255,13 +281,14 @@ func (m *monitorT) ensureLeadership(ctx context.Context) error { go runCoordinator(cordCtx, cord, l, m.coordRestartDelay) go runCoordinatorOutput(cordCtx, cord, m.bulker, l, m.policiesIndex) pt.cord = cord - pt.canceller = canceller + pt.cordCanceller = canceller } else { err = pt.cord.Update(ctx, p) if err != nil { l.Err(err).Msg("failed to update coordinator") } } + m.rescheduleUnenroller(ctx, &pt, &p) }(p, pt) } for range lead { @@ -283,7 +310,7 @@ func (m *monitorT) releaseLeadership() { for _, pt := range m.policies { go func(pt policyT) { if pt.cord != nil { - pt.canceller() + pt.cordCanceller() } // uses a background context, because the context for the // monitor will be cancelled at this point in the code @@ -352,16 +379,40 @@ func (m *monitorT) getIPs() ([]string, error) { return ips, nil } +func (m *monitorT) rescheduleUnenroller(ctx context.Context, pt *policyT, p *model.Policy) { + l := m.log.With().Str(dl.FieldPolicyId, pt.id).Logger() + unenrollTimeout := time.Duration(p.UnenrollTimeout) * time.Second + if unenrollTimeout != pt.unenrollTimeout { + // unenroll timeout changed + if pt.unenrollCanceller != nil { + pt.unenrollCanceller() + pt.unenrollCanceller = nil + } + + if unenrollTimeout > 0 { + // start worker for unenrolling agents based timeout + unenrollCtx, canceller := context.WithCancel(ctx) + go runUnenroller(unenrollCtx, m.bulker, pt.id, unenrollTimeout, l, m.unenrollCheckInterval, m.agentsIndex) + pt.unenrollCanceller = canceller + } + pt.unenrollTimeout = unenrollTimeout + } +} + func runCoordinator(ctx context.Context, cord Coordinator, l zerolog.Logger, d time.Duration) { + cnt := 0 for { - l.Info().Str("coordinator", cord.Name()).Msg("starting coordinator for policy") + l.Info().Int("count", cnt).Str("coordinator", cord.Name()).Msg("Starting policy coordinator") err := cord.Run(ctx) if err != context.Canceled { - l.Err(err).Msg("coordinator failed") + l.Err(err).Msg("Policy coordinator failed and stopped") if sleep.WithContext(ctx, d) == context.Canceled { break } + } else { + break } + cnt += 1 } } @@ -372,12 +423,106 @@ func runCoordinatorOutput(ctx context.Context, cord Coordinator, bulker bulk.Bul s := l.With().Int64(dl.FieldRevisionIdx, p.RevisionIdx).Int64(dl.FieldCoordinatorIdx, p.CoordinatorIdx).Logger() _, err := dl.CreatePolicy(ctx, bulker, p, dl.WithIndexName(policiesIndex)) if err != nil { - l.Err(err).Msg("failed to insert a new policy revision") + s.Err(err).Msg("Policy coordinator failed to add a new policy revision") } else { - s.Info().Msg("coordinator inserted a new policy revision") + s.Info().Int64("revision_id", p.RevisionIdx).Msg("Policy coordinator added a new policy revision") } case <-ctx.Done(): return } } } + +func runUnenroller(ctx context.Context, bulker bulk.Bulk, policyId string, unenrollTimeout time.Duration, l zerolog.Logger, checkInterval time.Duration, agentsIndex string) { + l.Info(). + Dur("checkInterval", checkInterval). + Dur("unenrollTimeout", unenrollTimeout). + Msg("unenroll monitor start") + defer l.Info().Msg("Unenroll monitor exit") + + t := time.NewTimer(checkInterval) + defer t.Stop() + + for { + select { + case <-t.C: + if err := runUnenrollerWork(ctx, bulker, policyId, unenrollTimeout, l, agentsIndex); err != nil { + l.Err(err).Dur("unenroll_timeout", unenrollTimeout).Msg("failed to unenroll offline agents") + } + t.Reset(checkInterval) + case <-ctx.Done(): + return + } + } +} + +func runUnenrollerWork(ctx context.Context, bulker bulk.Bulk, policyId string, unenrollTimeout time.Duration, zlog zerolog.Logger, agentsIndex string) error { + agents, err := dl.FindOfflineAgents(ctx, bulker, policyId, unenrollTimeout, dl.WithIndexName(agentsIndex)) + if err != nil || len(agents) == 0 { + return err + } + + zlog = zlog.With().Dur("timeout", unenrollTimeout).Logger() + + agentIds := make([]string, len(agents)) + + for i, agent := range agents { + err = unenrollAgent(ctx, zlog, bulker, &agent, agentsIndex) + if err != nil { + return err + } + agentIds[i] = agent.Id + } + + zlog.Info(). + Strs(logger.ApiKeyId, agentIds). + Msg("marked agents unenrolled due to unenroll timeout") + + return nil +} + +func unenrollAgent(ctx context.Context, zlog zerolog.Logger, bulker bulk.Bulk, agent *model.Agent, agentsIndex string) error { + now := time.Now().UTC().Format(time.RFC3339) + fields := bulk.UpdateFields{ + dl.FieldActive: false, + dl.FieldUnenrolledAt: now, + dl.FieldUnenrolledReason: unenrolledReasonTimeout, + dl.FieldUpdatedAt: now, + } + body, err := fields.Marshal() + if err != nil { + return err + } + apiKeys := getAPIKeyIDs(agent) + + zlog = zlog.With(). + Str(logger.AgentId, agent.Id). + Strs(logger.ApiKeyId, apiKeys). + Logger() + + zlog.Info().Msg("unenrollAgent due to unenroll timeout") + + if len(apiKeys) > 0 { + err = bulker.ApiKeyInvalidate(ctx, apiKeys...) + if err != nil { + zlog.Error().Err(err).Msg("Fail apiKey invalidate") + return err + } + } + if err = bulker.Update(ctx, agentsIndex, agent.Id, body, bulk.WithRefresh()); err != nil { + zlog.Error().Err(err).Msg("Fail unenrollAgent record update") + } + + return err +} + +func getAPIKeyIDs(agent *model.Agent) []string { + keys := make([]string, 0, 1) + if agent.AccessApiKeyId != "" { + keys = append(keys, agent.AccessApiKeyId) + } + if agent.DefaultApiKeyId != "" { + keys = append(keys, agent.DefaultApiKeyId) + } + return keys +} diff --git a/internal/pkg/coordinator/monitor_integration_test.go b/internal/pkg/coordinator/monitor_integration_test.go index 29375e6dd..6e0d0a853 100644 --- a/internal/pkg/coordinator/monitor_integration_test.go +++ b/internal/pkg/coordinator/monitor_integration_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build integration // +build integration package coordinator @@ -9,12 +10,16 @@ package coordinator import ( "context" "encoding/json" - "sync" + "fmt" "testing" "time" "github.com/gofrs/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" + "github.com/elastic/fleet-server/v7/internal/pkg/apikey" "github.com/elastic/fleet-server/v7/internal/pkg/bulk" "github.com/elastic/fleet-server/v7/internal/pkg/config" "github.com/elastic/fleet-server/v7/internal/pkg/dl" @@ -36,7 +41,7 @@ func TestMonitorLeadership(t *testing.T) { serversIndex := ftesting.SetupIndex(bulkCtx, t, bulker, es.MappingServer) policiesIndex := ftesting.SetupIndex(bulkCtx, t, bulker, es.MappingPolicy) leadersIndex := ftesting.SetupIndex(bulkCtx, t, bulker, es.MappingPolicyLeader) - pim, err := monitor.New(policiesIndex, bulker.Client()) + pim, err := monitor.New(policiesIndex, bulker.Client(), bulker.Client()) if err != nil { t.Fatal(err) } @@ -60,22 +65,21 @@ func TestMonitorLeadership(t *testing.T) { } // start the monitors - var wg sync.WaitGroup - wg.Add(2) - go func() { + g, _ := errgroup.WithContext(context.Background()) + g.Go(func() error { err := pim.Run(ctx) - wg.Done() if err != nil && err != context.Canceled { - t.Fatal(err) + return err } - }() - go func() { + return nil + }) + g.Go(func() error { err := pm.Run(ctx) - wg.Done() if err != nil && err != context.Canceled { - t.Fatal(err) + return err } - }() + return nil + }) // wait 500ms to ensure everything is running; then create a new policy <-time.After(500 * time.Millisecond) @@ -101,13 +105,129 @@ func TestMonitorLeadership(t *testing.T) { // stop the monitors cn() - wg.Wait() + err = g.Wait() + require.NoError(t, err) // ensure leadership was released ensureLeadershipReleased(bulkCtx, t, bulker, cfg, leadersIndex, policy1Id) ensureLeadershipReleased(bulkCtx, t, bulker, cfg, leadersIndex, policy2Id) } +func TestMonitorUnenroller(t *testing.T) { + parentCtx := context.Background() + bulkCtx, bulkCn := context.WithCancel(parentCtx) + defer bulkCn() + ctx, cn := context.WithCancel(parentCtx) + defer cn() + + // flush bulker on every operation + bulker := ftesting.SetupBulk(bulkCtx, t, bulk.WithFlushThresholdCount(1)) + serversIndex := ftesting.SetupIndex(bulkCtx, t, bulker, es.MappingServer) + policiesIndex := ftesting.SetupIndex(bulkCtx, t, bulker, es.MappingPolicy) + leadersIndex := ftesting.SetupIndex(bulkCtx, t, bulker, es.MappingPolicyLeader) + agentsIndex := ftesting.SetupIndex(bulkCtx, t, bulker, es.MappingAgent) + pim, err := monitor.New(policiesIndex, bulker.Client(), bulker.Client()) + require.NoError(t, err) + cfg := makeFleetConfig() + pm := NewMonitor(cfg, "1.0.0", bulker, pim, NewCoordinatorZero) + pm.(*monitorT).serversIndex = serversIndex + pm.(*monitorT).leadersIndex = leadersIndex + pm.(*monitorT).policiesIndex = policiesIndex + pm.(*monitorT).agentsIndex = agentsIndex + pm.(*monitorT).unenrollCheckInterval = 10 * time.Millisecond // very fast check interval for test + + // add policy with unenroll timeout + policy1Id := uuid.Must(uuid.NewV4()).String() + policy1 := model.Policy{ + PolicyId: policy1Id, + CoordinatorIdx: 0, + Data: []byte("{}"), + RevisionIdx: 1, + UnenrollTimeout: 300, // 5 minutes (300 seconds) + } + _, err = dl.CreatePolicy(ctx, bulker, policy1, dl.WithIndexName(policiesIndex)) + require.NoError(t, err) + + // create apikeys that should be invalidated + agentId := uuid.Must(uuid.NewV4()).String() + accessKey, err := bulker.ApiKeyCreate( + ctx, + agentId, + "", + []byte(""), + apikey.NewMetadata(agentId, apikey.TypeAccess), + ) + require.NoError(t, err) + outputKey, err := bulker.ApiKeyCreate( + ctx, + agentId, + "", + []byte(""), + apikey.NewMetadata(agentId, apikey.TypeAccess), + ) + require.NoError(t, err) + + // add agent that should be unenrolled + sixAgo := time.Now().UTC().Add(-6 * time.Minute) + agentBody, err := json.Marshal(model.Agent{ + AccessApiKeyId: accessKey.Id, + DefaultApiKeyId: outputKey.Id, + Active: true, + EnrolledAt: sixAgo.Format(time.RFC3339), + LastCheckin: sixAgo.Format(time.RFC3339), + PolicyId: policy1Id, + UpdatedAt: sixAgo.Format(time.RFC3339), + }) + _, err = bulker.Create(ctx, agentsIndex, agentId, agentBody) + require.NoError(t, err) + + // start the monitors + g, _ := errgroup.WithContext(context.Background()) + g.Go(func() error { + err := pim.Run(ctx) + if err != nil && err != context.Canceled { + return err + } + return nil + }) + g.Go(func() error { + err := pm.Run(ctx) + if err != nil && err != context.Canceled { + return err + } + return nil + }) + + // should set the agent to not active (aka. unenrolled) + ftesting.Retry(t, ctx, func(ctx context.Context) error { + agent, err := dl.FindAgent(bulkCtx, bulker, dl.QueryAgentByID, dl.FieldId, agentId, dl.WithIndexName(agentsIndex)) + if err != nil { + return err + } + if agent.Active { + return fmt.Errorf("agent %s is still active", agentId) + } + return nil + }, ftesting.RetrySleep(100*time.Millisecond), ftesting.RetryCount(50)) + + // stop the monitors + cn() + err = g.Wait() + require.NoError(t, err) + + // check other fields now we know its marked unactive + agent, err := dl.FindAgent(bulkCtx, bulker, dl.QueryAgentByID, dl.FieldId, agentId, dl.WithIndexName(agentsIndex)) + require.NoError(t, err) + assert.NotEmpty(t, agent.UnenrolledAt) + assert.Equal(t, unenrolledReasonTimeout, agent.UnenrolledReason) + + // should error as they are now invalidated + _, err = bulker.ApiKeyAuth(bulkCtx, *accessKey) + assert.Error(t, err) + _, err = bulker.ApiKeyAuth(bulkCtx, *outputKey) + assert.Error(t, err) +} + func makeFleetConfig() config.Fleet { id := uuid.Must(uuid.NewV4()).String() return config.Fleet{ diff --git a/internal/pkg/coordinator/v0.go b/internal/pkg/coordinator/v0.go index 57ecabeea..1730b34c8 100644 --- a/internal/pkg/coordinator/v0.go +++ b/internal/pkg/coordinator/v0.go @@ -11,6 +11,7 @@ import ( "github.com/rs/zerolog" "github.com/rs/zerolog/log" + "github.com/elastic/fleet-server/v7/internal/pkg/logger" "github.com/elastic/fleet-server/v7/internal/pkg/model" ) @@ -26,9 +27,9 @@ type coordinatorZeroT struct { // NewCoordinatorZero creates a V0 coordinator. func NewCoordinatorZero(policy model.Policy) (Coordinator, error) { return &coordinatorZeroT{ - log: log.With().Str("ctx", "coordinator v0").Str("policyId", policy.PolicyId).Logger(), + log: log.With().Str("ctx", "coordinator v0").Str(logger.PolicyId, policy.PolicyId).Logger(), policy: policy, - in: make(chan model.Policy, 1), + in: make(chan model.Policy), out: make(chan model.Policy), }, nil } @@ -40,26 +41,19 @@ func (c *coordinatorZeroT) Name() string { // Run runs the coordinator for the policy. func (c *coordinatorZeroT) Run(ctx context.Context) error { - c.in <- c.policy + err := c.updatePolicy(c.policy) + if err != nil { + c.log.Err(err).Msg("failed to handle policy") + } + for { select { case p := <-c.in: - newData, err := c.handlePolicy(p.Data) + err = c.updatePolicy(p) if err != nil { c.log.Err(err).Msg("failed to handle policy") continue } - if p.CoordinatorIdx == 0 { - p.CoordinatorIdx = 1 - p.Data = newData - c.policy = p - c.out <- p - } else if string(newData) != string(p.Data) { - p.CoordinatorIdx += 1 - p.Data = newData - c.policy = p - c.out <- p - } case <-ctx.Done(): return ctx.Err() } @@ -77,7 +71,22 @@ func (c *coordinatorZeroT) Output() <-chan model.Policy { return c.out } -// handlePolicy handles the new policy. +// updatePolicy performs the working of incrementing the coordinator idx. +func (c *coordinatorZeroT) updatePolicy(p model.Policy) error { + newData, err := c.handlePolicy(p.Data) + if err != nil { + return err + } + if p.CoordinatorIdx == 0 || string(newData) != string(p.Data) { + p.CoordinatorIdx += 1 + p.Data = newData + c.policy = p + c.out <- p + } + return nil +} + +// handlePolicy performs the actual work of coordination. // // Does nothing at the moment. func (c *coordinatorZeroT) handlePolicy(data json.RawMessage) (json.RawMessage, error) { diff --git a/internal/pkg/coordinator/v0_test.go b/internal/pkg/coordinator/v0_test.go index a60a53c1c..41e340e63 100644 --- a/internal/pkg/coordinator/v0_test.go +++ b/internal/pkg/coordinator/v0_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build !integration // +build !integration package coordinator @@ -32,7 +33,8 @@ func TestCoordinatorZero(t *testing.T) { go func() { if err := coord.Run(ctx); err != nil && err != context.Canceled { - t.Fatal(err) + t.Error(err) + return } }() diff --git a/internal/pkg/danger/buf.go b/internal/pkg/danger/buf.go new file mode 100644 index 000000000..36daaa6f1 --- /dev/null +++ b/internal/pkg/danger/buf.go @@ -0,0 +1,83 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package danger + +// Effectively golang's string builder with a Reset option + +import ( + "unicode/utf8" +) + +type Buf struct { + buf []byte +} + +func (b *Buf) Bytes() []byte { + return b.buf +} + +func (b *Buf) Set(s []byte) { + b.buf = s +} + +func (b *Buf) Len() int { return len(b.buf) } + +func (b *Buf) Cap() int { return cap(b.buf) } + +func (b *Buf) Reset() { + b.buf = b.buf[:0] +} + +func (b *Buf) grow(n int) { + buf := make([]byte, len(b.buf), 2*cap(b.buf)+n) + copy(buf, b.buf) + b.buf = buf +} + +func (b *Buf) Grow(n int) { + if n < 0 { + panic("danger.Buf.Grow: negative count") + } + if cap(b.buf)-len(b.buf) < n { + b.grow(n) + } +} + +// Write appends the contents of p to b's buffer. +// Write always returns len(p), nil. +func (b *Buf) Write(p []byte) (int, error) { + b.buf = append(b.buf, p...) + return len(p), nil +} + +// WriteByte appends the byte c to b's buffer. +// The returned error is always nil. +func (b *Buf) WriteByte(c byte) error { + b.buf = append(b.buf, c) + return nil +} + +// WriteRune appends the UTF-8 encoding of Unicode code point r to b's buffer. +// It returns the length of r and a nil error. +func (b *Buf) WriteRune(r rune) (int, error) { + if r < utf8.RuneSelf { + b.buf = append(b.buf, byte(r)) + return 1, nil + } + l := len(b.buf) + if cap(b.buf)-l < utf8.UTFMax { + b.grow(utf8.UTFMax) + } + n := utf8.EncodeRune(b.buf[l:l+utf8.UTFMax], r) + b.buf = b.buf[:l+n] + return n, nil +} + +// WriteString appends the contents of s to b's buffer. +// It returns the length of s and a nil error. +func (b *Buf) WriteString(s string) (int, error) { + b.buf = append(b.buf, s...) + return len(s), nil +} diff --git a/internal/pkg/danger/buf_test.go b/internal/pkg/danger/buf_test.go new file mode 100644 index 000000000..d0618aea1 --- /dev/null +++ b/internal/pkg/danger/buf_test.go @@ -0,0 +1,43 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package danger + +import ( + "crypto/rand" + "testing" +) + +// Validate that if a buffer needs to grow during a write, +// Previous cached pointers into underlying data are still valid. +func TestBufGrowWhileWrite(t *testing.T) { + + nBytes := 1024 * 1024 + src := make([]byte, 1024*1024) + _, err := rand.Read(src) + if err != nil { + t.Fatal(err) + } + + ptrs := make([][]byte, 0, nBytes) + + var dst Buf + for i := 0; i < nBytes; i++ { + + if err = dst.WriteByte(src[i]); err != nil { + t.Fatal(err) + } + + ptr := dst.Bytes()[i:] + + ptrs = append(ptrs, ptr) + } + + for i, p := range ptrs { + + if p[0] != src[i] { + t.Fatal("Mismatch: ", i) + } + } +} diff --git a/internal/pkg/dl/action_results_integration_test.go b/internal/pkg/dl/action_results_integration_test.go index 417c38e84..938736cbc 100644 --- a/internal/pkg/dl/action_results_integration_test.go +++ b/internal/pkg/dl/action_results_integration_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build integration // +build integration package dl @@ -104,7 +105,7 @@ func TestActionResultsStored(t *testing.T) { index, bulker, acrs := setupActionResults(ctx, t) - res, err := bulker.Search(ctx, []string{index}, []byte("{}")) + res, err := bulker.Search(ctx, index, []byte("{}")) if err != nil { t.Fatal(err) } diff --git a/internal/pkg/dl/actions.go b/internal/pkg/dl/actions.go index 15b617241..4db876281 100644 --- a/internal/pkg/dl/actions.go +++ b/internal/pkg/dl/actions.go @@ -6,15 +6,22 @@ package dl import ( "context" + "errors" + "time" "github.com/elastic/fleet-server/v7/internal/pkg/bulk" "github.com/elastic/fleet-server/v7/internal/pkg/dsl" + "github.com/elastic/fleet-server/v7/internal/pkg/es" "github.com/elastic/fleet-server/v7/internal/pkg/model" + "github.com/elastic/fleet-server/v7/internal/pkg/sqn" + "github.com/rs/zerolog/log" ) const ( FieldAgents = "agents" FieldExpiration = "expiration" + + maxAgentActionsFetchSize = 100 ) var ( @@ -44,6 +51,8 @@ func prepareFindAgentActions() *dsl.Tmpl { filter.Terms(FieldAgents, tmpl.Bind(FieldAgents), nil) + // Select more actions per agent since the agents array is not loaded + root.Size(maxAgentActionsFetchSize) root.Source().Excludes(FieldAgents) tmpl.MustResolve(root) @@ -76,15 +85,59 @@ func FindActions(ctx context.Context, bulker bulk.Bulk, tmpl *dsl.Tmpl, params m return findActions(ctx, bulker, tmpl, FleetActions, params) } -func findActions(ctx context.Context, bulker bulk.Bulk, tmpl *dsl.Tmpl, index string, params map[string]interface{}) ([]model.Action, error) { +func FindAgentActions(ctx context.Context, bulker bulk.Bulk, minSeqNo, maxSeqNo sqn.SeqNo, agentId string) ([]model.Action, error) { + const index = FleetActions + params := map[string]interface{}{ + FieldSeqNo: minSeqNo.Value(), + FieldMaxSeqNo: maxSeqNo.Value(), + FieldExpiration: time.Now().UTC().Format(time.RFC3339), + FieldAgents: []string{agentId}, + } + + res, err := findActionsHits(ctx, bulker, QueryAgentActions, index, params) + if err != nil || res == nil { + return nil, err + } + + if es.HasHoles(minSeqNo, res.Hits) { + err = es.Refresh(ctx, bulker.Client(), index) + if err != nil { + log.Error().Err(err).Msg("failed to refresh index") + } + res, err := findActionsHits(ctx, bulker, QueryAgentActions, index, params) + if err != nil || res == nil { + return nil, err + } + } + + return hitsToActions(res.Hits) +} + +func findActionsHits(ctx context.Context, bulker bulk.Bulk, tmpl *dsl.Tmpl, index string, params map[string]interface{}) (*es.HitsT, error) { res, err := Search(ctx, bulker, tmpl, index, params) if err != nil { + if errors.Is(err, es.ErrIndexNotFound) { + log.Debug().Str("index", index).Msg(es.ErrIndexNotFound.Error()) + err = nil + } return nil, err } + return res, nil +} + +func findActions(ctx context.Context, bulker bulk.Bulk, tmpl *dsl.Tmpl, index string, params map[string]interface{}) ([]model.Action, error) { + res, err := findActionsHits(ctx, bulker, tmpl, index, params) + if err != nil || res == nil { + return nil, err + } + + return hitsToActions(res.Hits) +} - actions := make([]model.Action, 0, len(res.Hits)) +func hitsToActions(hits []es.HitT) ([]model.Action, error) { + actions := make([]model.Action, 0, len(hits)) - for _, hit := range res.Hits { + for _, hit := range hits { var action model.Action err := hit.Unmarshal(&action) if err != nil { @@ -92,5 +145,5 @@ func findActions(ctx context.Context, bulker bulk.Bulk, tmpl *dsl.Tmpl, index st } actions = append(actions, action) } - return actions, err + return actions, nil } diff --git a/internal/pkg/dl/actions_integration_test.go b/internal/pkg/dl/actions_integration_test.go index 7f9e26e74..6883c7875 100644 --- a/internal/pkg/dl/actions_integration_test.go +++ b/internal/pkg/dl/actions_integration_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build integration // +build integration package dl diff --git a/internal/pkg/dl/agent.go b/internal/pkg/dl/agent.go index 998dfc340..7ec07cc53 100644 --- a/internal/pkg/dl/agent.go +++ b/internal/pkg/dl/agent.go @@ -6,13 +6,11 @@ package dl import ( "context" - "encoding/json" + "time" "github.com/elastic/fleet-server/v7/internal/pkg/bulk" "github.com/elastic/fleet-server/v7/internal/pkg/dsl" "github.com/elastic/fleet-server/v7/internal/pkg/model" - - "github.com/gofrs/uuid" ) const ( @@ -20,8 +18,9 @@ const ( ) var ( - QueryAgentByAssessAPIKeyID = prepareAgentFindByAccessAPIKeyID() - QueryAgentByID = prepareAgentFindByID() + QueryAgentByAssessAPIKeyID = prepareAgentFindByAccessAPIKeyID() + QueryAgentByID = prepareAgentFindByID() + QueryOfflineAgentsByPolicyID = prepareOfflineAgentsByPolicyID() ) func prepareAgentFindByID() *dsl.Tmpl { @@ -36,8 +35,22 @@ func prepareAgentFindByField(field string) *dsl.Tmpl { return prepareFindByField(field, map[string]interface{}{"version": true}) } -func FindAgent(ctx context.Context, bulker bulk.Bulk, tmpl *dsl.Tmpl, name string, v interface{}) (agent model.Agent, err error) { - res, err := SearchWithOneParam(ctx, bulker, tmpl, FleetAgents, name, v) +func prepareOfflineAgentsByPolicyID() *dsl.Tmpl { + tmpl := dsl.NewTmpl() + + root := dsl.NewRoot() + filter := root.Query().Bool().Filter() + filter.Term(FieldActive, true, nil) + filter.Term(FieldPolicyId, tmpl.Bind(FieldPolicyId), nil) + filter.Range(FieldLastCheckin, dsl.WithRangeLTE(tmpl.Bind(FieldLastCheckin))) + + tmpl.MustResolve(root) + return tmpl +} + +func FindAgent(ctx context.Context, bulker bulk.Bulk, tmpl *dsl.Tmpl, name string, v interface{}, opt ...Option) (agent model.Agent, err error) { + o := newOption(FleetAgents, opt...) + res, err := SearchWithOneParam(ctx, bulker, tmpl, o.indexName, name, v) if err != nil { return } @@ -50,14 +63,26 @@ func FindAgent(ctx context.Context, bulker bulk.Bulk, tmpl *dsl.Tmpl, name strin return agent, err } -func IndexAgent(ctx context.Context, bulker bulk.Bulk, agent model.Agent) error { - if agent.Id == "" { - agent.Id = uuid.Must(uuid.NewV4()).String() - } - body, err := json.Marshal(agent) +func FindOfflineAgents(ctx context.Context, bulker bulk.Bulk, policyId string, unenrollTimeout time.Duration, opt ...Option) ([]model.Agent, error) { + o := newOption(FleetAgents, opt...) + past := time.Now().UTC().Add(-unenrollTimeout).Format(time.RFC3339) + res, err := Search(ctx, bulker, QueryOfflineAgentsByPolicyID, o.indexName, map[string]interface{}{ + FieldPolicyId: policyId, + FieldLastCheckin: past, + }) if err != nil { - return err + return nil, err + } + + if len(res.Hits) == 0 { + return nil, nil + } + + agents := make([]model.Agent, len(res.Hits)) + for i, hit := range res.Hits { + if err := hit.Unmarshal(&agents[i]); err != nil { + return nil, err + } } - _, err = bulker.Index(ctx, FleetAgents, agent.Id, body, bulk.WithRefresh()) - return err + return agents, nil } diff --git a/internal/pkg/dl/agent_integration_test.go b/internal/pkg/dl/agent_integration_test.go new file mode 100644 index 000000000..df9d637df --- /dev/null +++ b/internal/pkg/dl/agent_integration_test.go @@ -0,0 +1,111 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build integration +// +build integration + +package dl + +import ( + "context" + "encoding/json" + "testing" + "time" + + "github.com/gofrs/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/fleet-server/v7/internal/pkg/bulk" + "github.com/elastic/fleet-server/v7/internal/pkg/es" + "github.com/elastic/fleet-server/v7/internal/pkg/model" + ftesting "github.com/elastic/fleet-server/v7/internal/pkg/testing" +) + +func TestFindOfflineAgents(t *testing.T) { + ctx, cn := context.WithCancel(context.Background()) + defer cn() + + index, bulker := ftesting.SetupIndexWithBulk(ctx, t, es.MappingAgent) + + now := time.Now().UTC() + nowStr := now.Format(time.RFC3339) + + policyID := uuid.Must(uuid.NewV4()).String() + dayOld := now.Add(-24 * time.Hour).Format(time.RFC3339) + dayOldID := uuid.Must(uuid.NewV4()).String() + body, err := json.Marshal(model.Agent{ + PolicyId: policyID, + Active: true, + LastCheckin: dayOld, + LastCheckinStatus: "", + UpdatedAt: dayOld, + EnrolledAt: nowStr, + }) + require.NoError(t, err) + _, err = bulker.Create(ctx, index, dayOldID, body, bulk.WithRefresh()) + require.NoError(t, err) + + twoDayOld := now.Add(-48 * time.Hour).Format(time.RFC3339) + twoDayOldID := uuid.Must(uuid.NewV4()).String() + body, err = json.Marshal(model.Agent{ + PolicyId: policyID, + Active: true, + LastCheckin: twoDayOld, + LastCheckinStatus: "", + UpdatedAt: twoDayOld, + EnrolledAt: nowStr, + }) + require.NoError(t, err) + _, err = bulker.Create(ctx, index, twoDayOldID, body, bulk.WithRefresh()) + require.NoError(t, err) + + // not active (should not be included) + notActiveID := uuid.Must(uuid.NewV4()).String() + body, err = json.Marshal(model.Agent{ + PolicyId: policyID, + Active: false, + LastCheckin: twoDayOld, + LastCheckinStatus: "", + UpdatedAt: twoDayOld, + EnrolledAt: nowStr, + }) + require.NoError(t, err) + _, err = bulker.Create(ctx, index, notActiveID, body, bulk.WithRefresh()) + require.NoError(t, err) + + threeDayOld := now.Add(-48 * time.Hour).Format(time.RFC3339) + threeDayOldID := uuid.Must(uuid.NewV4()).String() + body, err = json.Marshal(model.Agent{ + PolicyId: policyID, + Active: true, + LastCheckin: threeDayOld, + LastCheckinStatus: "", + UpdatedAt: threeDayOld, + EnrolledAt: nowStr, + }) + require.NoError(t, err) + _, err = bulker.Create(ctx, index, threeDayOldID, body, bulk.WithRefresh()) + require.NoError(t, err) + + // add agent on a different policy; should not be returned (3 days old) + otherPolicyID := uuid.Must(uuid.NewV4()).String() + otherID := uuid.Must(uuid.NewV4()).String() + body, err = json.Marshal(model.Agent{ + PolicyId: otherPolicyID, + Active: true, + LastCheckin: threeDayOld, + LastCheckinStatus: "", + UpdatedAt: threeDayOld, + EnrolledAt: nowStr, + }) + require.NoError(t, err) + _, err = bulker.Create(ctx, index, otherID, body, bulk.WithRefresh()) + require.NoError(t, err) + + agents, err := FindOfflineAgents(ctx, bulker, policyID, 36*time.Hour, WithIndexName(index)) + require.NoError(t, err) + require.Len(t, agents, 2) + assert.EqualValues(t, []string{twoDayOldID, threeDayOldID}, []string{agents[0].Id, agents[1].Id}) +} diff --git a/internal/pkg/dl/artifact.go b/internal/pkg/dl/artifact.go new file mode 100644 index 000000000..c7e3f5090 --- /dev/null +++ b/internal/pkg/dl/artifact.go @@ -0,0 +1,72 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package dl + +import ( + "context" + "encoding/json" + + "github.com/rs/zerolog/log" + + "github.com/elastic/fleet-server/v7/internal/pkg/bulk" + "github.com/elastic/fleet-server/v7/internal/pkg/dsl" + "github.com/elastic/fleet-server/v7/internal/pkg/model" +) + +var ( + QueryArtifactTmpl = prepareQueryArtifact() +) + +func prepareQueryArtifact() *dsl.Tmpl { + root := dsl.NewRoot() + tmpl := dsl.NewTmpl() + + must := root.Query().Bool().Must() + must.Term(FieldDecodedSha256, tmpl.Bind(FieldDecodedSha256), nil) + must.Term(FieldIdentifier, tmpl.Bind(FieldIdentifier), nil) + tmpl.MustResolve(root) + return tmpl +} + +func FindArtifact(ctx context.Context, bulker bulk.Bulk, ident, sha2 string) (*model.Artifact, error) { + + params := map[string]interface{}{ + FieldDecodedSha256: sha2, + FieldIdentifier: ident, + } + + res, err := Search( + ctx, + bulker, + QueryArtifactTmpl, + FleetArtifacts, + params, + ) + + if err != nil { + return nil, err + } + + if len(res.Hits) == 0 { + return nil, ErrNotFound + } + + if len(res.Hits) > 1 { + log.Warn(). + Str("ident", ident). + Str("sha2", sha2). + Int("cnt", len(res.Hits)). + Str("used", res.Hits[0].Id). + Msg("Multiple HITS on artifact query. Using the first returned.") + } + + // deserialize + var artifact model.Artifact + if err = json.Unmarshal(res.Hits[0].Source, &artifact); err != nil { + return nil, err + } + + return &artifact, nil +} diff --git a/internal/pkg/dl/constants.go b/internal/pkg/dl/constants.go index aa5cab75e..882a09d37 100644 --- a/internal/pkg/dl/constants.go +++ b/internal/pkg/dl/constants.go @@ -4,11 +4,14 @@ package dl +import "github.com/elastic/fleet-server/v7/internal/pkg/sqn" + // Indices names const ( FleetActions = ".fleet-actions" FleetActionsResults = ".fleet-actions-results" FleetAgents = ".fleet-agents" + FleetArtifacts = ".fleet-artifacts" FleetEnrollmentAPIKeys = ".fleet-enrollment-api-keys" FleetPolicies = ".fleet-policies" FleetPoliciesLeader = ".fleet-policies-leader" @@ -24,24 +27,34 @@ const ( FieldMaxSeqNo = "max_seq_no" FieldActionSeqNo = "action_seq_no" - FieldActionId = "action_id" - FieldPolicyId = "policy_id" - FieldRevisionIdx = "revision_idx" - FieldCoordinatorIdx = "coordinator_idx" - FieldPolicyRevisionIdx = "policy_revision_idx" - FieldPolicyCoordinatorIdx = "policy_coordinator_idx" - - FieldUpdatedAt = "updated_at" - FieldUnenrolledAt = "unenrolled_at" -) - -// Public constants -const ( - UndefinedSeqNo = -1 + FieldActionId = "action_id" + FieldPolicyId = "policy_id" + FieldRevisionIdx = "revision_idx" + FieldCoordinatorIdx = "coordinator_idx" + FieldLastCheckin = "last_checkin" + FieldLastCheckinStatus = "last_checkin_status" + FieldLocalMetadata = "local_metadata" + FieldPolicyRevisionIdx = "policy_revision_idx" + FieldPolicyCoordinatorIdx = "policy_coordinator_idx" + FieldDefaultApiKey = "default_api_key" + FieldDefaultApiKeyId = "default_api_key_id" + FieldPolicyOutputPermissionsHash = "policy_output_permissions_hash" + FieldUnenrolledReason = "unenrolled_reason" + FieldAgentVersion = "version" + FieldAgent = "agent" + + FieldActive = "active" + FieldUpdatedAt = "updated_at" + FieldUnenrolledAt = "unenrolled_at" + FieldUpgradedAt = "upgraded_at" + FieldUpgradeStartedAt = "upgrade_started_at" + + FieldDecodedSha256 = "decoded_sha256" + FieldIdentifier = "identifier" ) // Private constants const ( - defaultSeqNo = UndefinedSeqNo + defaultSeqNo = sqn.UndefinedSeqNo seqNoPrimaryTerm = "seq_no_primary_term" ) diff --git a/internal/pkg/dl/enrollment_api_key.go b/internal/pkg/dl/enrollment_api_key.go index 1ac18e72b..176c51d1d 100644 --- a/internal/pkg/dl/enrollment_api_key.go +++ b/internal/pkg/dl/enrollment_api_key.go @@ -6,6 +6,7 @@ package dl import ( "context" + "encoding/json" "fmt" "github.com/elastic/fleet-server/v7/internal/pkg/bulk" @@ -18,39 +19,36 @@ const ( ) var ( - QueryEnrollmentAPIKeyByID = prepareFindEnrollmentAPIKeyByID() + QueryEnrollmentAPIKeyByID = prepareFindEnrollmentAPIKeyByID() + QueryEnrollmentAPIKeyByPolicyID = prepareFindEnrollmentAPIKeyByPolicyID() ) -// RenderAllEnrollmentAPIKeysQuery render all enrollment api keys query. For migration only. -func RenderAllEnrollmentAPIKeysQuery(size uint64) ([]byte, error) { +func prepareFindEnrollmentAPIKeyByID() *dsl.Tmpl { tmpl := dsl.NewTmpl() root := dsl.NewRoot() - root.Size(size) + root.Query().Bool().Filter().Term(FieldApiKeyID, tmpl.Bind(FieldApiKeyID), nil) - err := tmpl.Resolve(root) - if err != nil { - return nil, err - } - return tmpl.Render(nil) + tmpl.MustResolve(root) + return tmpl } -func prepareFindEnrollmentAPIKeyByID() *dsl.Tmpl { +func prepareFindEnrollmentAPIKeyByPolicyID() *dsl.Tmpl { tmpl := dsl.NewTmpl() root := dsl.NewRoot() - root.Query().Bool().Filter().Term(FieldApiKeyID, tmpl.Bind(FieldApiKeyID), nil) + root.Query().Bool().Filter().Term(FieldPolicyId, tmpl.Bind(FieldPolicyId), nil) tmpl.MustResolve(root) return tmpl } -func FindEnrollmentAPIKey(ctx context.Context, bulker bulk.Bulk, tmpl *dsl.Tmpl, id string) (rec model.EnrollmentApiKey, err error) { - return findEnrollmentAPIKey(ctx, bulker, FleetEnrollmentAPIKeys, tmpl, id) +func FindEnrollmentAPIKey(ctx context.Context, bulker bulk.Bulk, tmpl *dsl.Tmpl, field string, id string) (rec model.EnrollmentApiKey, err error) { + return findEnrollmentAPIKey(ctx, bulker, FleetEnrollmentAPIKeys, tmpl, field, id) } -func findEnrollmentAPIKey(ctx context.Context, bulker bulk.Bulk, index string, tmpl *dsl.Tmpl, id string) (rec model.EnrollmentApiKey, err error) { - res, err := SearchWithOneParam(ctx, bulker, tmpl, index, FieldApiKeyID, id) +func findEnrollmentAPIKey(ctx context.Context, bulker bulk.Bulk, index string, tmpl *dsl.Tmpl, field string, id string) (rec model.EnrollmentApiKey, err error) { + res, err := SearchWithOneParam(ctx, bulker, tmpl, index, field, id) if err != nil { return } @@ -63,3 +61,32 @@ func findEnrollmentAPIKey(ctx context.Context, bulker bulk.Bulk, index string, t err = res.Hits[0].Unmarshal(&rec) return rec, err } + +func FindEnrollmentAPIKeys(ctx context.Context, bulker bulk.Bulk, tmpl *dsl.Tmpl, field string, id string) ([]model.EnrollmentApiKey, error) { + return findEnrollmentAPIKeys(ctx, bulker, FleetEnrollmentAPIKeys, tmpl, field, id) +} + +func findEnrollmentAPIKeys(ctx context.Context, bulker bulk.Bulk, index string, tmpl *dsl.Tmpl, field string, id string) ([]model.EnrollmentApiKey, error) { + res, err := SearchWithOneParam(ctx, bulker, tmpl, index, field, id) + if err != nil { + return nil, err + } + + recs := make([]model.EnrollmentApiKey, len(res.Hits)) + for i := 0; i < len(res.Hits); i++ { + if err := res.Hits[i].Unmarshal(&recs[i]); err != nil { + return nil, err + } + } + return recs, nil +} + +// CreateEnrollmentAPIKey creates a new enrollment API key +func CreateEnrollmentAPIKey(ctx context.Context, bulker bulk.Bulk, key model.EnrollmentApiKey, opt ...Option) (string, error) { + o := newOption(FleetEnrollmentAPIKeys, opt...) + data, err := json.Marshal(&key) + if err != nil { + return "", err + } + return bulker.Create(ctx, o.indexName, "", data, bulk.WithRefresh()) +} diff --git a/internal/pkg/dl/enrollment_api_key_integration_test.go b/internal/pkg/dl/enrollment_api_key_integration_test.go index ba5420b34..9c2194ea7 100644 --- a/internal/pkg/dl/enrollment_api_key_integration_test.go +++ b/internal/pkg/dl/enrollment_api_key_integration_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build integration // +build integration package dl @@ -22,7 +23,7 @@ import ( ftesting "github.com/elastic/fleet-server/v7/internal/pkg/testing" ) -func createRandomEnrollmentAPIKey() model.EnrollmentApiKey { +func createRandomEnrollmentAPIKey(policyID string) model.EnrollmentApiKey { now := time.Now().UTC() return model.EnrollmentApiKey{ ESDocument: model.ESDocument{ @@ -33,13 +34,13 @@ func createRandomEnrollmentAPIKey() model.EnrollmentApiKey { ApiKeyId: xid.New().String(), CreatedAt: now.Format(time.RFC3339), Name: "Default (db3f8318-05f0-4625-a808-9deddb0112b5)", - PolicyId: uuid.Must(uuid.NewV4()).String(), + PolicyId: policyID, } } -func storeRandomEnrollmentAPIKey(ctx context.Context, bulker bulk.Bulk, index string) (rec model.EnrollmentApiKey, err error) { - rec = createRandomEnrollmentAPIKey() +func storeRandomEnrollmentAPIKey(ctx context.Context, bulker bulk.Bulk, index string, policyID string) (rec model.EnrollmentApiKey, err error) { + rec = createRandomEnrollmentAPIKey(policyID) body, err := json.Marshal(rec) if err != nil { @@ -52,22 +53,16 @@ func storeRandomEnrollmentAPIKey(ctx context.Context, bulker bulk.Bulk, index st return rec, err } -func setupEnrollmentAPIKeys(ctx context.Context, t *testing.T) (string, bulk.Bulk, model.EnrollmentApiKey) { +func TestSearchEnrollmentAPIKeyByID(t *testing.T) { + ctx, cn := context.WithCancel(context.Background()) + defer cn() + index, bulker := ftesting.SetupIndexWithBulk(ctx, t, es.MappingEnrollmentApiKey) - rec, err := storeRandomEnrollmentAPIKey(ctx, bulker, index) + rec, err := storeRandomEnrollmentAPIKey(ctx, bulker, index, uuid.Must(uuid.NewV4()).String()) if err != nil { t.Fatal(err) } - - return index, bulker, rec -} - -func TestSearchEnrollmentAPIKey(t *testing.T) { - ctx, cn := context.WithCancel(context.Background()) - defer cn() - - index, bulker, rec := setupEnrollmentAPIKeys(ctx, t) - foundRec, err := findEnrollmentAPIKey(ctx, bulker, index, QueryEnrollmentAPIKeyByID, rec.ApiKeyId) + foundRec, err := findEnrollmentAPIKey(ctx, bulker, index, QueryEnrollmentAPIKeyByID, FieldApiKeyID, rec.ApiKeyId) if err != nil { t.Fatal(err) } @@ -77,7 +72,7 @@ func TestSearchEnrollmentAPIKey(t *testing.T) { t.Fatal(diff) } - foundRec, err = findEnrollmentAPIKey(ctx, bulker, index, QueryEnrollmentAPIKeyByID, xid.New().String()) + foundRec, err = findEnrollmentAPIKey(ctx, bulker, index, QueryEnrollmentAPIKeyByID, FieldApiKeyID, xid.New().String()) if err == nil { t.Fatal("expected error") } else { @@ -87,3 +82,34 @@ func TestSearchEnrollmentAPIKey(t *testing.T) { } } } + +func TestSearchEnrollmentAPIKeyByPolicyID(t *testing.T) { + ctx, cn := context.WithCancel(context.Background()) + defer cn() + + index, bulker := ftesting.SetupIndexWithBulk(ctx, t, es.MappingEnrollmentApiKey) + + policyID := uuid.Must(uuid.NewV4()).String() + rec1, err := storeRandomEnrollmentAPIKey(ctx, bulker, index, policyID) + if err != nil { + t.Fatal(err) + } + rec2, err := storeRandomEnrollmentAPIKey(ctx, bulker, index, policyID) + if err != nil { + t.Fatal(err) + } + _, err = storeRandomEnrollmentAPIKey(ctx, bulker, index, uuid.Must(uuid.NewV4()).String()) + if err != nil { + t.Fatal(err) + } + + foundRecs, err := findEnrollmentAPIKeys(ctx, bulker, index, QueryEnrollmentAPIKeyByPolicyID, FieldPolicyId, policyID) + if err != nil { + t.Fatal(err) + } + + diff := cmp.Diff([]model.EnrollmentApiKey{rec1, rec2}, foundRecs) + if diff != "" { + t.Fatal(diff) + } +} diff --git a/internal/pkg/dl/migration.go b/internal/pkg/dl/migration.go new file mode 100644 index 000000000..4beb26741 --- /dev/null +++ b/internal/pkg/dl/migration.go @@ -0,0 +1,136 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package dl + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "net/http" + "time" + + "github.com/elastic/fleet-server/v7/internal/pkg/bulk" + "github.com/elastic/fleet-server/v7/internal/pkg/dsl" + + "github.com/elastic/go-elasticsearch/v7/esapi" + "github.com/pkg/errors" + "github.com/rs/zerolog/log" +) + +func Migrate(ctx context.Context, bulker bulk.Bulk) error { + return migrateAgentMetadata(ctx, bulker) +} + +// FleetServer 7.15 added a new *AgentMetadata field to the Agent record. +// This field was populated in new enrollments in 7.15 and later; however, the +// change was not backported to support 7.14. The security team is reliant on the +// existence of this field in 7.16, so the following migration was added to +// support upgrade from 7.14. +// +// It is currently safe to run this in the background; albeit with some +// concern on conflicts. The conflict risk exists regardless as N Fleet Servers +// can be run in parallel at the same time. +// +// As the update only occurs once, the 99.9% case is a noop. +func migrateAgentMetadata(ctx context.Context, bulker bulk.Bulk) error { + + root := dsl.NewRoot() + root.Query().Bool().MustNot().Exists("agent.id") + + painless := "ctx._source.agent = [:]; ctx._source.agent.id = ctx._id;" + root.Param("script", painless) + + body, err := root.MarshalJSON() + if err != nil { + return err + } + +LOOP: + for { + nConflicts, err := updateAgentMetadata(ctx, bulker, body) + if err != nil { + return err + } + if nConflicts == 0 { + break LOOP + } + + time.Sleep(time.Second) + } + + return nil +} + +func updateAgentMetadata(ctx context.Context, bulker bulk.Bulk, body []byte) (int, error) { + start := time.Now() + + client := bulker.Client() + + reader := bytes.NewReader(body) + + opts := []func(*esapi.UpdateByQueryRequest){ + client.UpdateByQuery.WithBody(reader), + client.UpdateByQuery.WithContext(ctx), + client.UpdateByQuery.WithRefresh(true), + client.UpdateByQuery.WithConflicts("proceed"), + } + + res, err := client.UpdateByQuery([]string{FleetAgents}, opts...) + + if err != nil { + return 0, err + } + + if res.IsError() { + if res.StatusCode == http.StatusNotFound { + // Ignore index not created yet; nothing to upgrade + return 0, nil + } + + return 0, fmt.Errorf("Migrate UpdateByQuery %s", res.String()) + } + + resp := struct { + Took int `json:"took"` + TimedOut bool `json:"timed_out"` + Total int `json:"total"` + Updated int `json:"updated"` + Deleted int `json:"deleted"` + Batches int `json:"batches"` + VersionConflicts int `json:"version_conflicts"` + Noops int `json:"noops"` + Retries struct { + Bulk int `json:"bulk"` + Search int `json:"search"` + } `json:"retries"` + Failures []json.RawMessage `json:"failures"` + }{} + + decoder := json.NewDecoder(res.Body) + if err := decoder.Decode(&resp); err != nil { + return 0, errors.Wrap(err, "decode UpdateByQuery response") + } + + log.Info(). + Int("took", resp.Took). + Bool("timed_out", resp.TimedOut). + Int("total", resp.Total). + Int("updated", resp.Updated). + Int("deleted", resp.Deleted). + Int("batches", resp.Batches). + Int("version_conflicts", resp.VersionConflicts). + Int("noops", resp.Noops). + Int("retries.bulk", resp.Retries.Bulk). + Int("retries.search", resp.Retries.Search). + Dur("rtt", time.Since(start)). + Msg("migrate agent records response") + + for _, fail := range resp.Failures { + log.Error().RawJSON("failure", fail).Msg("migration failure") + } + + return resp.VersionConflicts, err +} diff --git a/internal/pkg/dl/policies.go b/internal/pkg/dl/policies.go index f5277e731..9a65eddae 100644 --- a/internal/pkg/dl/policies.go +++ b/internal/pkg/dl/policies.go @@ -8,26 +8,23 @@ import ( "context" "encoding/json" "errors" + "github.com/elastic/fleet-server/v7/internal/pkg/bulk" "github.com/elastic/fleet-server/v7/internal/pkg/model" - "sync" "github.com/elastic/fleet-server/v7/internal/pkg/dsl" ) var ( - tmplQueryLatestPolicies []byte - initQueryLatestPoliciesOnce sync.Once + tmplQueryLatestPolicies = prepareQueryLatestPolicies() + ErrMissingAggregations = errors.New("missing expected aggregation result") ) -var ErrPolicyLeaderNotFound = errors.New("policy has no leader") -var ErrMissingAggregations = errors.New("missing expected aggregation result") - func prepareQueryLatestPolicies() []byte { root := dsl.NewRoot() root.Size(0) policyId := root.Aggs().Agg(FieldPolicyId) - policyId.Terms("field", FieldPolicyId, nil) + policyId.Terms("field", FieldPolicyId, nil).Size(10000) revisionIdx := policyId.Aggs().Agg(FieldRevisionIdx).TopHits() revisionIdx.Size(1) rSort := revisionIdx.Sort() @@ -38,12 +35,8 @@ func prepareQueryLatestPolicies() []byte { // QueryLatestPolices gets the latest revision for a policy func QueryLatestPolicies(ctx context.Context, bulker bulk.Bulk, opt ...Option) ([]model.Policy, error) { - initQueryLatestPoliciesOnce.Do(func() { - tmplQueryLatestPolicies = prepareQueryLatestPolicies() - }) - o := newOption(FleetPolicies, opt...) - res, err := bulker.Search(ctx, []string{o.indexName}, tmplQueryLatestPolicies) + res, err := bulker.Search(ctx, o.indexName, tmplQueryLatestPolicies) if err != nil { return nil, err } @@ -77,5 +70,5 @@ func CreatePolicy(ctx context.Context, bulker bulk.Bulk, policy model.Policy, op if err != nil { return "", err } - return bulker.Create(ctx, o.indexName, "", data) + return bulker.Create(ctx, o.indexName, "", data, bulk.WithRefresh()) } diff --git a/internal/pkg/dl/policies_integration_test.go b/internal/pkg/dl/policies_integration_test.go index 742feb629..66027c794 100644 --- a/internal/pkg/dl/policies_integration_test.go +++ b/internal/pkg/dl/policies_integration_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build integration // +build integration package dl diff --git a/internal/pkg/dl/policies_leader.go b/internal/pkg/dl/policies_leader.go index 57e3d650e..0d23a3233 100644 --- a/internal/pkg/dl/policies_leader.go +++ b/internal/pkg/dl/policies_leader.go @@ -7,12 +7,15 @@ package dl import ( "context" "encoding/json" + "errors" + "sync" + "time" + "github.com/elastic/fleet-server/v7/internal/pkg/bulk" "github.com/elastic/fleet-server/v7/internal/pkg/dsl" "github.com/elastic/fleet-server/v7/internal/pkg/es" "github.com/elastic/fleet-server/v7/internal/pkg/model" - "sync" - "time" + "github.com/rs/zerolog/log" ) var ( @@ -46,8 +49,12 @@ func SearchPolicyLeaders(ctx context.Context, bulker bulk.Bulk, ids []string, op if err != nil { return } - res, err := bulker.Search(ctx, []string{o.indexName}, data) + res, err := bulker.Search(ctx, o.indexName, data) if err != nil { + if errors.Is(err, es.ErrIndexNotFound) { + log.Debug().Str("index", o.indexName).Msg(es.ErrIndexNotFound.Error()) + err = nil + } return } diff --git a/internal/pkg/dl/policies_leader_integration_test.go b/internal/pkg/dl/policies_leader_integration_test.go index 3c026162e..e6308c725 100644 --- a/internal/pkg/dl/policies_leader_integration_test.go +++ b/internal/pkg/dl/policies_leader_integration_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build integration // +build integration package dl diff --git a/internal/pkg/dl/search.go b/internal/pkg/dl/search.go index 216c3e96f..0672b145a 100644 --- a/internal/pkg/dl/search.go +++ b/internal/pkg/dl/search.go @@ -18,7 +18,7 @@ func Search(ctx context.Context, bulker bulk.Bulk, tmpl *dsl.Tmpl, index string, return nil, err } - res, err := bulker.Search(ctx, []string{index}, query) + res, err := bulker.Search(ctx, index, query) if err != nil { return nil, err } @@ -31,7 +31,7 @@ func SearchWithOneParam(ctx context.Context, bulker bulk.Bulk, tmpl *dsl.Tmpl, i if err != nil { return nil, err } - res, err := bulker.Search(ctx, []string{index}, query) + res, err := bulker.Search(ctx, index, query) if err != nil { return nil, err } diff --git a/internal/pkg/dl/servers_integration_test.go b/internal/pkg/dl/servers_integration_test.go index 911e957c2..aba768efe 100644 --- a/internal/pkg/dl/servers_integration_test.go +++ b/internal/pkg/dl/servers_integration_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build integration // +build integration package dl diff --git a/internal/pkg/dsl/readme.txt b/internal/pkg/dsl/readme.txt deleted file mode 100644 index c42c76702..000000000 --- a/internal/pkg/dsl/readme.txt +++ /dev/null @@ -1,6 +0,0 @@ -readme.txt - - -Very basic elastic DSL query builder; grossly incomplete; probably broken. - -Only the parts that were needed were fleshed out. Needs work. diff --git a/internal/pkg/dsl/term.go b/internal/pkg/dsl/term.go index 8a13b80e5..7da88b08a 100644 --- a/internal/pkg/dsl/term.go +++ b/internal/pkg/dsl/term.go @@ -4,7 +4,7 @@ package dsl -func (n *Node) Term(field string, value interface{}, boost *float64) { +func (n *Node) Term(field string, value interface{}, boost *float64) *Node { childNode := n.appendOrSetChildNode(kKeywordTerm) leaf := value @@ -22,9 +22,10 @@ func (n *Node) Term(field string, value interface{}, boost *float64) { childNode.nodeMap = nodeMapT{field: &Node{ leaf: leaf, }} + return childNode } -func (n *Node) Terms(field string, value interface{}, boost *float64) { +func (n *Node) Terms(field string, value interface{}, boost *float64) *Node { childNode := n.appendOrSetChildNode(kKeywordTerms) childNode.nodeMap = nodeMapT{ @@ -34,4 +35,5 @@ func (n *Node) Terms(field string, value interface{}, boost *float64) { if boost != nil { childNode.nodeMap[kKeywordBoost] = &Node{leaf: *boost} } + return childNode } diff --git a/internal/pkg/es/client.go b/internal/pkg/es/client.go index b2159fd96..70db0f1c3 100644 --- a/internal/pkg/es/client.go +++ b/internal/pkg/es/client.go @@ -8,15 +8,20 @@ import ( "context" "encoding/json" "fmt" + "net/http" + "runtime" + "github.com/elastic/fleet-server/v7/internal/pkg/build" "github.com/elastic/fleet-server/v7/internal/pkg/config" - "github.com/elastic/go-elasticsearch/v8" + "github.com/elastic/go-elasticsearch/v7" "github.com/rs/zerolog/log" ) -func NewClient(ctx context.Context, cfg *config.Config) (*elasticsearch.Client, error) { - escfg, err := cfg.Output.Elasticsearch.ToESConfig() +type ConfigOption func(config elasticsearch.Config) + +func NewClient(ctx context.Context, cfg *config.Config, longPoll bool, opts ...ConfigOption) (*elasticsearch.Client, error) { + escfg, err := cfg.Output.Elasticsearch.ToESConfig(longPoll) if err != nil { return nil, err } @@ -24,32 +29,59 @@ func NewClient(ctx context.Context, cfg *config.Config) (*elasticsearch.Client, user := cfg.Output.Elasticsearch.Username mcph := cfg.Output.Elasticsearch.MaxConnPerHost - log.Debug(). - Strs("addr", addr). - Str("user", user). - Int("maxConnsPersHost", mcph). - Msg("init es") + // Apply configuration options + for _, opt := range opts { + opt(escfg) + } + + zlog := log.With(). + Strs("cluster.addr", addr). + Str("cluster.user", user). + Int("cluster.maxConnsPersHost", mcph). + Logger() + + zlog.Debug().Msg("init es") es, err := elasticsearch.NewClient(escfg) if err != nil { + zlog.Error().Err(err).Msg("fail elasticsearch init") return nil, err } // Validate connection resp, err := info(ctx, es) if err != nil { + zlog.Error().Err(err).Msg("fail elasticsearch info") return nil, err } - log.Info(). - Str("name", resp.ClusterName). - Str("uuid", resp.ClusterUUID). - Str("vers", resp.Version.Number). - Msg("Cluster Info") + zlog.Info(). + Str("cluster.name", resp.ClusterName). + Str("cluster.uuid", resp.ClusterUUID). + Str("cluster.version", resp.Version.Number). + Msg("elasticsearch cluster info") return es, nil } +func WithUserAgent(name string, bi build.Info) func(config elasticsearch.Config) { + return func(config elasticsearch.Config) { + ua := userAgent(name, bi) + // Set User-Agent header + if config.Header == nil { + config.Header = http.Header{} + } + config.Header.Set("User-Agent", ua) + } +} + +func userAgent(name string, bi build.Info) string { + return fmt.Sprintf("Elastic-%s/%s (%s; %s; %s; %s)", + name, + bi.Version, runtime.GOOS, runtime.GOARCH, + bi.Commit, bi.BuildTime) +} + type InfoResponse struct { ClusterName string `json:"cluster_name"` ClusterUUID string `json:"cluster_uuid"` diff --git a/internal/pkg/es/delete.go b/internal/pkg/es/delete.go index 720bc9f7d..708c5df0a 100644 --- a/internal/pkg/es/delete.go +++ b/internal/pkg/es/delete.go @@ -8,7 +8,7 @@ import ( "context" "encoding/json" - "github.com/elastic/go-elasticsearch/v8" + "github.com/elastic/go-elasticsearch/v7" ) func DeleteIndices(ctx context.Context, es *elasticsearch.Client, indices []string) error { @@ -26,7 +26,7 @@ func DeleteIndices(ctx context.Context, es *elasticsearch.Client, indices []stri return err } if !ares.Acknowledged { - err = TranslateError(res.StatusCode, ares.Error) + err = TranslateError(res.StatusCode, &ares.Error) } return err diff --git a/internal/pkg/es/error.go b/internal/pkg/es/error.go index 4966c88f6..7e39c5ce3 100644 --- a/internal/pkg/es/error.go +++ b/internal/pkg/es/error.go @@ -9,6 +9,8 @@ import ( "fmt" ) +// TODO: Why do we have both ErrElastic and ErrorT? Very strange. + type ErrElastic struct { Status int Type string @@ -22,7 +24,10 @@ type ErrElastic struct { func (e *ErrElastic) Unwrap() error { if e.Type == "index_not_found_exception" { return ErrIndexNotFound + } else if e.Type == "timeout_exception" { + return ErrTimeout } + return nil } @@ -35,12 +40,19 @@ var ( ErrElasticNotFound = errors.New("elastic not found") ErrInvalidBody = errors.New("invalid body") ErrIndexNotFound = errors.New("index not found") + ErrTimeout = errors.New("timeout") + ErrNotFound = errors.New("not found") ) -func TranslateError(status int, e ErrorT) error { +func TranslateError(status int, e *ErrorT) error { if status == 200 || status == 201 { return nil } + if e == nil { + return &ErrElastic{ + Status: status, + } + } var err error switch e.Type { diff --git a/internal/pkg/es/fleet_global_checkpoints.go b/internal/pkg/es/fleet_global_checkpoints.go new file mode 100644 index 000000000..40133d606 --- /dev/null +++ b/internal/pkg/es/fleet_global_checkpoints.go @@ -0,0 +1,175 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package es + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" + + "github.com/elastic/fleet-server/v7/internal/pkg/sqn" + "github.com/elastic/go-elasticsearch/v7/esapi" +) + +// The wrapper for the new _fleet global_checkpoints that is not the part of the +// standard client library at the moment. +// The shape mimics the official client API and should be easy drop-in replacement in the future. +// This should be replaced the official client library when/if the new API makes it in. + +func NewGlobalCheckpointsRequest(t esapi.Transport) GlobalCheckpoints { + return func(o ...func(*GlobalCheckpointsRequest)) (*esapi.Response, error) { + var r = GlobalCheckpointsRequest{} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// Copied from the official client +func formatDuration(d time.Duration) string { + if d < time.Millisecond { + return strconv.FormatInt(int64(d), 10) + "nanos" + } + return strconv.FormatInt(int64(d)/int64(time.Millisecond), 10) + "ms" +} + +type GlobalCheckpoints func(o ...func(*GlobalCheckpointsRequest)) (*esapi.Response, error) + +// GlobalCheckpointsRequest configures the _fleet API global_checkpoints request. +// +type GlobalCheckpointsRequest struct { + ctx context.Context + + Index string + WaitForAdvance *bool + WaitForIndex *bool + Checkpoints []int64 + Timeout time.Duration + + Header http.Header +} + +// Do executes the request and returns response or error. +// +func (r GlobalCheckpointsRequest) Do(ctx context.Context, transport esapi.Transport) (*esapi.Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "GET" + + path.Grow(1 + len(r.Index) + len("/_fleet/global_checkpoints")) + if len(r.Index) > 0 { + path.WriteString("/") + path.WriteString(r.Index) + } + path.WriteString("/_fleet/global_checkpoints") + + params = make(map[string]string) + + if r.WaitForAdvance != nil { + params["wait_for_advance"] = strconv.FormatBool(*r.WaitForAdvance) + } + + if r.WaitForIndex != nil { + params["wait_for_index"] = strconv.FormatBool(*r.WaitForIndex) + } + + if len(r.Checkpoints) > 0 { + seqNo := sqn.SeqNo(r.Checkpoints) + params["checkpoints"] = seqNo.String() + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + req, err := http.NewRequest(method, path.String(), nil) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := esapi.Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +// +func (f GlobalCheckpoints) WithContext(v context.Context) func(*GlobalCheckpointsRequest) { + return func(r *GlobalCheckpointsRequest) { + r.ctx = v + } +} + +// WithIndex - an index name +// +func (f GlobalCheckpoints) WithIndex(index string) func(*GlobalCheckpointsRequest) { + return func(r *GlobalCheckpointsRequest) { + r.Index = index + } +} + +func (f GlobalCheckpoints) WithWaitForAdvance(v bool) func(*GlobalCheckpointsRequest) { + return func(r *GlobalCheckpointsRequest) { + r.WaitForAdvance = &v + } +} + +func (f GlobalCheckpoints) WithWaitForIndex(v bool) func(*GlobalCheckpointsRequest) { + return func(r *GlobalCheckpointsRequest) { + r.WaitForIndex = &v + } +} + +func (f GlobalCheckpoints) WithCheckpoints(checkpoints []int64) func(*GlobalCheckpointsRequest) { + return func(r *GlobalCheckpointsRequest) { + r.Checkpoints = checkpoints + } +} + +func (f GlobalCheckpoints) WithTimeout(to time.Duration) func(*GlobalCheckpointsRequest) { + return func(r *GlobalCheckpointsRequest) { + r.Timeout = to + } +} diff --git a/internal/pkg/es/holes.go b/internal/pkg/es/holes.go new file mode 100644 index 000000000..f74df08c2 --- /dev/null +++ b/internal/pkg/es/holes.go @@ -0,0 +1,37 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package es + +import "github.com/elastic/fleet-server/v7/internal/pkg/sqn" + +func HasHoles(checkpoint sqn.SeqNo, hits []HitT) bool { + sz := len(hits) + if sz == 0 { + return false + } + + // Check if the hole is in the beginning of hits + seqNo := checkpoint.Value() + if seqNo != sqn.UndefinedSeqNo && (hits[0].SeqNo-seqNo) > 1 { + return true + } + + // No holes in the beginning, check if size <= 1 then there is no holes + if sz <= 1 { + return false + } + + // Set initial seqNo value from the last hit in the array + seqNo = hits[sz-1].SeqNo + + // Iterate from the end since that's where it more likely to have holes + for i := sz - 2; i >= 0; i-- { + if (seqNo - hits[i].SeqNo) > 1 { + return true + } + seqNo = hits[i].SeqNo + } + return false +} diff --git a/internal/pkg/es/holes_test.go b/internal/pkg/es/holes_test.go new file mode 100644 index 000000000..ad261cae7 --- /dev/null +++ b/internal/pkg/es/holes_test.go @@ -0,0 +1,88 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build !integration +// +build !integration + +package es + +import ( + "testing" + + "github.com/elastic/fleet-server/v7/internal/pkg/sqn" + "github.com/google/go-cmp/cmp" +) + +// Sanity test of interal check if hits lits has holes +func TestHashHoles(t *testing.T) { + + tests := []struct { + Name string + SeqNo sqn.SeqNo + Hits []HitT + HasHoles bool + }{ + { + Name: "nil", + Hits: genHitsSequence(nil), + }, + { + Name: "empty", + Hits: genHitsSequence([]int64{}), + }, + { + Name: "one", + SeqNo: sqn.SeqNo([]int64{2}), + Hits: genHitsSequence([]int64{3}), + }, + { + Name: "two", + Hits: genHitsSequence([]int64{2, 3}), + }, + { + Name: "two with hole", + Hits: genHitsSequence([]int64{2, 4}), + HasHoles: true, + }, + { + Name: "holed", + Hits: genHitsSequence([]int64{2, 3, 4, 6}), + HasHoles: true, + }, + { + Name: "hole in the beginning", + SeqNo: sqn.SeqNo([]int64{1}), + Hits: genHitsSequence([]int64{3, 4, 5}), + HasHoles: true, + }, + { + Name: "four no holes", + SeqNo: sqn.SeqNo([]int64{1}), + Hits: genHitsSequence([]int64{2, 3, 4, 5}), + }, + } + + for _, tc := range tests { + t.Run(tc.Name, func(t *testing.T) { + diff := cmp.Diff(tc.HasHoles, HasHoles(tc.SeqNo, tc.Hits)) + if diff != "" { + t.Fatal(diff) + } + }) + } +} + +func genHitsSequence(seq []int64) []HitT { + if seq == nil { + return nil + } + + hits := make([]HitT, 0, len(seq)) + for _, s := range seq { + hits = append(hits, HitT{ + SeqNo: s, + }) + } + return hits +} diff --git a/internal/pkg/es/info.go b/internal/pkg/es/info.go new file mode 100644 index 000000000..7dad0c7ff --- /dev/null +++ b/internal/pkg/es/info.go @@ -0,0 +1,50 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package es + +import ( + "context" + "encoding/json" + "strings" + + "github.com/elastic/go-elasticsearch/v7" +) + +type versionInfo struct { + Number string `json:"number"` +} + +type infoResponse struct { + Version versionInfo `json:"version"` + Error ErrorT `json:"error,omitempty"` +} + +func FetchESVersion(ctx context.Context, esCli *elasticsearch.Client) (version string, err error) { + res, err := esCli.Info( + esCli.Info.WithContext(ctx), + ) + + if err != nil { + return + } + defer res.Body.Close() + + var sres infoResponse + + err = json.NewDecoder(res.Body).Decode(&sres) + if err != nil { + return + } + + // Check error + err = TranslateError(res.StatusCode, &sres.Error) + if err != nil { + return + } + + verStr := strings.TrimSpace(strings.TrimSuffix(strings.ToLower(sres.Version.Number), "-snapshot")) + + return verStr, nil +} diff --git a/internal/pkg/es/mapping.go b/internal/pkg/es/mapping.go index dadae8870..deb9a52d9 100644 --- a/internal/pkg/es/mapping.go +++ b/internal/pkg/es/mapping.go @@ -24,27 +24,58 @@ const ( "expiration": { "type": "date" }, - "input_id": { + "input_type": { "type": "keyword" }, + "timeout": { + "type": "integer" + }, "@timestamp": { "type": "date" }, "type": { "type": "keyword" + }, + "user_id": { + "type": "keyword" } } }` + // ActionData The opaque payload. + MappingActionData = `{ + "properties": { + + } +}` + + // ActionResponse The custom action response payload. + MappingActionResponse = `{ + "properties": { + + } +}` + // ActionResult An Elastic Agent action results MappingActionResult = `{ "properties": { + "action_data": { + "enabled" : false, + "type": "object" + }, "action_id": { "type": "keyword" }, + "action_response": { + "enabled" : false, + "type": "object" + }, "agent_id": { "type": "keyword" }, + "completed_at": { + "type": "date" + }, "data": { "enabled" : false, "type": "object" @@ -52,6 +83,9 @@ const ( "error": { "type": "keyword" }, + "started_at": { + "type": "date" + }, "@timestamp": { "type": "date" } @@ -111,6 +145,9 @@ const ( "policy_id": { "type": "keyword" }, + "policy_output_permissions_hash": { + "type": "keyword" + }, "policy_revision_idx": { "type": "integer" }, @@ -123,6 +160,9 @@ const ( "unenrolled_at": { "type": "date" }, + "unenrolled_reason": { + "type": "keyword" + }, "unenrollment_started_at": { "type": "date" }, @@ -154,6 +194,50 @@ const ( } }` + // Artifact An artifact served by Fleet + MappingArtifact = `{ + "properties": { + "body": { + "enabled" : false, + "type": "object" + }, + "compression_algorithm": { + "type": "keyword" + }, + "created": { + "type": "date" + }, + "decoded_sha256": { + "type": "keyword" + }, + "decoded_size": { + "type": "integer" + }, + "encoded_sha256": { + "type": "keyword" + }, + "encoded_size": { + "type": "integer" + }, + "encryption_algorithm": { + "type": "keyword" + }, + "identifier": { + "type": "keyword" + }, + "package_name": { + "type": "keyword" + } + } +}` + + // Body Encoded artifact data + MappingBody = `{ + "properties": { + + } +}` + // Data The opaque payload. MappingData = `{ "properties": { @@ -237,6 +321,9 @@ const ( }, "@timestamp": { "type": "date" + }, + "unenroll_timeout": { + "type": "integer" } } }` diff --git a/internal/pkg/es/refresh.go b/internal/pkg/es/refresh.go new file mode 100644 index 000000000..b138aeb5b --- /dev/null +++ b/internal/pkg/es/refresh.go @@ -0,0 +1,45 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package es + +import ( + "context" + "encoding/json" + "errors" + + "github.com/elastic/go-elasticsearch/v7" +) + +// Refresh refreshes index. This is temporary code +// TODO: Remove this when the refresh is properly implemented on Eleasticsearch side +// The issue for "refresh" falls under phase 2 of https://github.com/elastic/elasticsearch/issues/71449. +// Once the phase 2 is complete we can remove the refreshes from fleet-server. +func Refresh(ctx context.Context, esCli *elasticsearch.Client, index string) error { + res, err := esCli.Indices.Refresh( + esCli.Indices.Refresh.WithContext(ctx), + esCli.Indices.Refresh.WithIndex(index), + ) + if err != nil { + return err + } + defer res.Body.Close() + var esres Response + err = json.NewDecoder(res.Body).Decode(&esres) + if err != nil { + return err + } + + if res.IsError() { + err = TranslateError(res.StatusCode, &esres.Error) + } + + if err != nil { + if errors.Is(err, ErrIndexNotFound) { + return nil + } + return err + } + return nil +} diff --git a/internal/pkg/es/result_test.go b/internal/pkg/es/result_test.go index 8484613fc..83808f2ad 100644 --- a/internal/pkg/es/result_test.go +++ b/internal/pkg/es/result_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build !integration // +build !integration package es diff --git a/internal/pkg/esboot/bootstrap.go b/internal/pkg/esboot/bootstrap.go deleted file mode 100644 index 614d500d6..000000000 --- a/internal/pkg/esboot/bootstrap.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package esboot - -import ( - "context" - "github.com/elastic/fleet-server/v7/internal/pkg/es" - - "github.com/elastic/go-elasticsearch/v8" -) - -// Temporary ES indices bootstrapping until we move this logic to a proper place -// The plans at the moment possibly handle at ES plugin - -type indexConfig struct { - mapping string - datastream bool -} - -var indexConfigs = map[string]indexConfig{ - ".fleet-actions": {mapping: es.MappingAction}, - ".fleet-actions-results": {mapping: es.MappingActionResult, datastream: true}, - ".fleet-agents": {mapping: es.MappingAgent}, - ".fleet-enrollment-api-keys": {mapping: es.MappingEnrollmentApiKey}, - ".fleet-policies": {mapping: es.MappingPolicy}, - ".fleet-policies-leader": {mapping: es.MappingPolicyLeader}, - ".fleet-servers": {mapping: es.MappingServer}, -} - -// Bootstrap creates .fleet-actions data stream -func EnsureESIndices(ctx context.Context, cli *elasticsearch.Client) error { - for name, idxcfg := range indexConfigs { - err := EnsureDatastream(ctx, cli, name, idxcfg) - if err != nil { - return err - } - } - return nil -} - -func EnsureDatastream(ctx context.Context, cli *elasticsearch.Client, name string, idxcfg indexConfig) error { - if idxcfg.datastream { - err := EnsureILMPolicy(ctx, cli, name) - if err != nil { - return err - } - } - - err := EnsureTemplate(ctx, cli, name, idxcfg.mapping, idxcfg.datastream) - if err != nil { - return err - } - - if idxcfg.datastream { - err = CreateDatastream(ctx, cli, name) - } else { - err = CreateIndex(ctx, cli, name) - } - if err != nil { - return err - } - - return nil -} - -func EnsureIndex(ctx context.Context, cli *elasticsearch.Client, name, mapping string) error { - err := EnsureTemplate(ctx, cli, name, mapping, false) - if err != nil { - return err - } - return CreateIndex(ctx, cli, name) -} diff --git a/internal/pkg/limit/limiter.go b/internal/pkg/limit/limiter.go new file mode 100644 index 000000000..09a82b417 --- /dev/null +++ b/internal/pkg/limit/limiter.go @@ -0,0 +1,72 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package limit + +import ( + "errors" + "time" + + "github.com/elastic/fleet-server/v7/internal/pkg/config" + + "golang.org/x/sync/semaphore" + "golang.org/x/time/rate" +) + +type Limiter struct { + rateLimit *rate.Limiter + maxLimit *semaphore.Weighted +} + +type ReleaseFunc func() + +var ( + ErrRateLimit = errors.New("rate limit") + ErrMaxLimit = errors.New("max limit") +) + +func NewLimiter(cfg *config.Limit) *Limiter { + + if cfg == nil { + return &Limiter{} + } + + l := &Limiter{} + + if cfg.Interval != time.Duration(0) { + l.rateLimit = rate.NewLimiter(rate.Every(cfg.Interval), cfg.Burst) + } + + if cfg.Max != 0 { + l.maxLimit = semaphore.NewWeighted(cfg.Max) + } + + return l +} + +func (l *Limiter) Acquire() (ReleaseFunc, error) { + releaseFunc := noop + + if l.rateLimit != nil && !l.rateLimit.Allow() { + return nil, ErrRateLimit + } + + if l.maxLimit != nil { + if !l.maxLimit.TryAcquire(1) { + return nil, ErrMaxLimit + } + releaseFunc = l.release + } + + return releaseFunc, nil +} + +func (l *Limiter) release() { + if l.maxLimit != nil { + l.maxLimit.Release(1) + } +} + +func noop() { +} diff --git a/internal/pkg/limit/listener.go b/internal/pkg/limit/listener.go new file mode 100644 index 000000000..bd5fe987f --- /dev/null +++ b/internal/pkg/limit/listener.go @@ -0,0 +1,94 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package limit + +import ( + "github.com/elastic/fleet-server/v7/internal/pkg/logger" + + "github.com/rs/zerolog/log" + "net" + "sync" +) + +// Derived from netutil.LimitListener but works slightly differently. +// Instead of blocking on the semaphore before acception connection, +// this implementation immediately accepts connections and if cannot +// acquire the semaphore it forces the connection closed. +// Ideally, this limiter is run *before* the TLS handshake occurs +// to prevent DDOS attack that eats all the server's CPU. +// The downside to this is that it will Close() valid connections +// indiscriminately. + +func Listener(l net.Listener, n int) net.Listener { + return &limitListener{ + Listener: l, + sem: make(chan struct{}, n), + done: make(chan struct{}), + } +} + +type limitListener struct { + net.Listener + sem chan struct{} + closeOnce sync.Once // ensures the done chan is only closed once + done chan struct{} // no values sent; closed when Close is called +} + +func (l *limitListener) acquire() bool { + select { + case <-l.done: + return false + case l.sem <- struct{}{}: + return true + default: + return false + } +} +func (l *limitListener) release() { <-l.sem } + +func (l *limitListener) Accept() (net.Conn, error) { + + // Accept the connection irregardless + c, err := l.Listener.Accept() + if err != nil { + return nil, err + } + + // If we cannot acquire the semaphore, close the connection + if acquired := l.acquire(); !acquired { + zlog := log.Warn() + + var err error + if c != nil { + err = c.Close() + zlog.Str(logger.EcsServerAddress, c.LocalAddr().String()) + zlog.Str(logger.EcsClientAddress, c.RemoteAddr().String()) + zlog.Err(err) + } + zlog.Int("max", cap(l.sem)).Msg("Connection closed due to max limit") + + return c, nil + } + + return &limitListenerConn{Conn: c, release: l.release}, nil +} + +func (l *limitListener) Close() error { + err := l.Listener.Close() + l.closeOnce.Do(func() { close(l.done) }) + return err +} + +type limitListenerConn struct { + net.Conn + releaseOnce sync.Once + release func() +} + +func (l *limitListenerConn) Close() error { + err := l.Conn.Close() + l.releaseOnce.Do(l.release) + return err +} diff --git a/internal/pkg/logger/ecs.go b/internal/pkg/logger/ecs.go new file mode 100644 index 000000000..b25097b13 --- /dev/null +++ b/internal/pkg/logger/ecs.go @@ -0,0 +1,68 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package logger + +const ( + + // Basic logging + EcsLogLevel = "log.level" + EcsLogName = "log.logger" + EcsLogCaller = "log.origin" + EcsLogStackTrace = "log.origin.stack_trace" + EcsMessage = "message" + EcsTimestamp = "@timestamp" + EcsErrorMessage = "error.message" + + // HTTP + EcsHttpVersion = "http.version" + EcsHttpRequestId = "http.request.id" + EcsHttpRequestMethod = "http.request.method" + EcsHttpRequestBodyBytes = "http.request.body.bytes" + EcsHttpResponseCode = "http.response.status_code" + EcsHttpResponseBodyBytes = "http.response.body.bytes" + + // URL + EcsUrlFull = "url.full" + EcsUrlDomain = "url.domain" + EcsUrlPort = "url.port" + + // Client + EcsClientAddress = "client.address" + EcsClientIp = "client.ip" + EcsClientPort = "client.port" + + // Server + EcsServerAddress = "server.address" + + // TLS + EcsTlsEstablished = "tls.established" + EcsTlsResumed = "tls.resumed" + EcsTlsVersion = "tls.version" + EcsTlsClientServerName = "tls.client.server_name" + EcsTlsCipher = "tls.cipher" + EcsTlsClientIssuer = "tls.client.issuer" + EcsTlsClientSubject = "tls.client.subject" + EcsTlsClientNotBefore = "tls.client.not_before" + EcsTlsClientNotAfter = "tls.client.not_after" + EcsTlsClientSerialNumber = "tls.client.x509.serial_number" + EcsTlsClientTimeFormat = "2006-01-02T15:04:05.999Z" + + // Event + EcsEventDuration = "event.duration" + + // Service + EcsServiceName = "service.name" +) + +// Non ECS compliant contants used in logging + +const ( + ApiKeyId = "fleet.apikey.id" + PolicyId = "fleet.policy.id" + AgentId = "fleet.agent.id" + EnrollApiKeyId = "fleet.enroll.apikey.id" + AccessApiKeyId = "fleet.access.apikey.id" + DefaultOutputApiKeyId = "fleet.default.apikey.id" +) diff --git a/internal/pkg/logger/http.go b/internal/pkg/logger/http.go new file mode 100644 index 000000000..07fbbfb12 --- /dev/null +++ b/internal/pkg/logger/http.go @@ -0,0 +1,239 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package logger + +import ( + "crypto/tls" + "fmt" + "io" + "net" + "net/http" + "strconv" + "strings" + "sync/atomic" + "time" + + "github.com/elastic/fleet-server/v7/internal/pkg/apikey" + "github.com/julienschmidt/httprouter" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" +) + +const ( + HeaderRequestID = "X-Request-ID" + httpSlashPrefix = "HTTP/" +) + +type ReaderCounter struct { + io.ReadCloser + count uint64 +} + +func NewReaderCounter(r io.ReadCloser) *ReaderCounter { + return &ReaderCounter{ + ReadCloser: r, + } +} + +func (rd *ReaderCounter) Read(buf []byte) (int, error) { + n, err := rd.ReadCloser.Read(buf) + atomic.AddUint64(&rd.count, uint64(n)) + return n, err +} + +func (rd *ReaderCounter) Count() uint64 { + return atomic.LoadUint64(&rd.count) +} + +type ResponseCounter struct { + http.ResponseWriter + count uint64 + statusCode int +} + +func NewResponseCounter(w http.ResponseWriter) *ResponseCounter { + return &ResponseCounter{ + ResponseWriter: w, + } +} + +func (rc *ResponseCounter) Write(buf []byte) (int, error) { + if rc.statusCode == 0 { + rc.WriteHeader(http.StatusOK) + } + + n, err := rc.ResponseWriter.Write(buf) + atomic.AddUint64(&rc.count, uint64(n)) + return n, err +} + +func (rc *ResponseCounter) WriteHeader(statusCode int) { + rc.ResponseWriter.WriteHeader(statusCode) + + // Defend unsupported multiple calls to WriteHeader + if rc.statusCode == 0 { + rc.statusCode = statusCode + } +} + +func (rc *ResponseCounter) Count() uint64 { + return atomic.LoadUint64(&rc.count) +} + +func splitAddr(addr string) (host string, port int) { + + host, portS, err := net.SplitHostPort(addr) + + if err == nil { + if v, err := strconv.Atoi(portS); err == nil { + port = v + } + } + + return +} + +// Expects HTTP version in form of HTTP/x.y +func stripHTTP(h string) string { + + switch h { + case "HTTP/2.0": + return "2.0" + case "HTTP/1.1": + return "1.1" + default: + if strings.HasPrefix(h, httpSlashPrefix) { + return h[len(httpSlashPrefix):] + } + } + + return h +} + +func httpMeta(r *http.Request, e *zerolog.Event) { + // Look for request id + if reqID := r.Header.Get(HeaderRequestID); reqID != "" { + e.Str(EcsHttpRequestId, reqID) + } + + oldForce := r.URL.ForceQuery + r.URL.ForceQuery = false + e.Str(EcsUrlFull, r.URL.String()) + r.URL.ForceQuery = oldForce + + if domain := r.URL.Hostname(); domain != "" { + e.Str(EcsUrlDomain, domain) + } + + port := r.URL.Port() + if port != "" { + if v, err := strconv.Atoi(port); err == nil { + e.Int(EcsUrlPort, v) + } + } + + // HTTP info + e.Str(EcsHttpVersion, stripHTTP(r.Proto)) + e.Str(EcsHttpRequestMethod, r.Method) + + // ApiKey + if apiKey, err := apikey.ExtractAPIKey(r); err == nil { + e.Str(ApiKeyId, apiKey.Id) + } + + // Client info + if r.RemoteAddr != "" { + e.Str(EcsClientAddress, r.RemoteAddr) + } + + // TLS info + e.Bool(EcsTlsEstablished, r.TLS != nil) +} + +func httpDebug(r *http.Request, e *zerolog.Event) { + // Client info + if r.RemoteAddr != "" { + remoteIP, remotePort := splitAddr(r.RemoteAddr) + e.Str(EcsClientIp, remoteIP) + e.Int(EcsClientPort, remotePort) + } + + if r.TLS != nil { + + e.Str(EcsTlsVersion, TlsVersionToString(r.TLS.Version)) + e.Str(EcsTlsCipher, tls.CipherSuiteName(r.TLS.CipherSuite)) + e.Bool(EcsTlsResumed, r.TLS.DidResume) + + if r.TLS.ServerName != "" { + e.Str(EcsTlsClientServerName, r.TLS.ServerName) + } + + if len(r.TLS.PeerCertificates) > 0 && r.TLS.PeerCertificates[0] != nil { + leaf := r.TLS.PeerCertificates[0] + if leaf.SerialNumber != nil { + e.Str(EcsTlsClientSerialNumber, leaf.SerialNumber.String()) + } + e.Str(EcsTlsClientIssuer, leaf.Issuer.String()) + e.Str(EcsTlsClientSubject, leaf.Subject.String()) + e.Str(EcsTlsClientNotBefore, leaf.NotBefore.UTC().Format(EcsTlsClientTimeFormat)) + e.Str(EcsTlsClientNotAfter, leaf.NotAfter.UTC().Format(EcsTlsClientTimeFormat)) + } + } +} + +// ECS HTTP log wrapper +func HttpHandler(next httprouter.Handle) httprouter.Handle { + + return func(w http.ResponseWriter, r *http.Request, p httprouter.Params) { + e := log.Info() + + if !e.Enabled() { + next(w, r, p) + return + } + + start := time.Now() + + rdCounter := NewReaderCounter(r.Body) + r.Body = rdCounter + + wrCounter := NewResponseCounter(w) + + if log.Debug().Enabled() { + d := log.Debug() + httpMeta(r, d) + httpDebug(r, d) + d.Msg("HTTP start") + } + + next(wrCounter, r, p) + + httpMeta(r, e) + + // Data on response + e.Uint64(EcsHttpRequestBodyBytes, rdCounter.Count()) + e.Uint64(EcsHttpResponseBodyBytes, wrCounter.Count()) + e.Int(EcsHttpResponseCode, wrCounter.statusCode) + e.Int64(EcsEventDuration, time.Since(start).Nanoseconds()) + + e.Msg("HTTP done") + } +} + +func TlsVersionToString(vers uint16) string { + switch vers { + case tls.VersionTLS10: + return "1.0" + case tls.VersionTLS11: + return "1.1" + case tls.VersionTLS12: + return "1.2" + case tls.VersionTLS13: + return "1.3" + default: + } + + return fmt.Sprintf("unknown_0x%x", vers) +} diff --git a/internal/pkg/logger/logger.go b/internal/pkg/logger/logger.go index a84f79716..ec73c9c2b 100644 --- a/internal/pkg/logger/logger.go +++ b/internal/pkg/logger/logger.go @@ -6,81 +6,175 @@ package logger import ( "context" - "github.com/elastic/fleet-server/v7/internal/pkg/reload" + "io" + "io/ioutil" "os" - "strings" + "path/filepath" "sync" "time" + "github.com/elastic/beats/v7/libbeat/common/file" "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/elastic/fleet-server/v7/internal/pkg/config" ) -const ( - kPrettyTimeFormat = "15:04:05.000000" -) - var once sync.Once -var gLogger *logger - -func strToLevel(s string) zerolog.Level { - l := zerolog.DebugLevel - - s = strings.ToLower(s) - switch strings.TrimSpace(s) { - case "trace": - l = zerolog.TraceLevel - case "debug": - l = zerolog.DebugLevel - case "info": - l = zerolog.InfoLevel - case "warn": - l = zerolog.WarnLevel - case "error": - l = zerolog.ErrorLevel - case "fatal": - l = zerolog.FatalLevel - case "panic": - l = zerolog.PanicLevel - } +var gLogger *Logger - return l +// WriterSync implements a Sync function. +type WriterSync interface { + // Sync syncs the logger to its output. + Sync() error } -type logger struct { - cfg *config.Config +// Logger for the Fleet Server. +type Logger struct { + cfg *config.Config + sync WriterSync + name string } // Reload reloads the logger configuration. -func (l *logger) Reload(_ context.Context, cfg *config.Config) error { - if l.cfg.Fleet.Agent.Logging != cfg.Fleet.Agent.Logging { - // reload the logger to new config level - log.Logger = log.Output(os.Stdout).Level(cfg.Fleet.Agent.Logging.LogLevel()) +func (l *Logger) Reload(_ context.Context, cfg *config.Config) error { + if changed(l.cfg, cfg) { + // sync before reload + l.Sync() + + // reload the logger + logger, w, err := configure(cfg, l.name) + if err != nil { + return err + } + log.Logger = logger + l.sync = w } l.cfg = cfg return nil } +// Sync syncs the logger to its output. +func (l *Logger) Sync() { + if l.sync != nil { + l.sync.Sync() + } +} + // Init initializes the logger. -func Init(cfg *config.Config) reload.Reloadable { +func Init(cfg *config.Config, svcName string) (*Logger, error) { + var err error once.Do(func() { - gLogger = &logger{ - cfg: cfg, + + var l zerolog.Logger + var w WriterSync + l, w, err = configure(cfg, svcName) + if err != nil { + return } - zerolog.TimeFieldFormat = time.StampMicro + log.Logger = l + gLogger = &Logger{ + cfg: cfg, + sync: w, + name: svcName, + } - log.Logger = log.Output(os.Stdout).Level(cfg.Fleet.Agent.Logging.LogLevel()) - log.Info(). - Int("pid", os.Getpid()). - Int("ppid", os.Getppid()). - Str("exe", os.Args[0]). - Strs("args", os.Args[1:]). - Msg("boot") + // override the field names for ECS + zerolog.LevelFieldName = EcsLogLevel + zerolog.ErrorFieldName = EcsErrorMessage + zerolog.MessageFieldName = EcsMessage + zerolog.TimeFieldFormat = "2006-01-02T15:04:05.999Z" // RFC3339 at millisecond resolution in zulu timezone + zerolog.TimestampFieldName = EcsTimestamp - log.Debug().Strs("env", os.Environ()).Msg("environment") + if !cfg.Logging.Pretty || !cfg.Logging.ToStderr { + zerolog.TimestampFunc = func() time.Time { return time.Now().UTC() } + } }) - return gLogger + return gLogger, err +} + +func changed(a *config.Config, b *config.Config) bool { + if a.Fleet.Agent.Logging != b.Fleet.Agent.Logging { + return true + } + al := a.Logging + aFiles := al.Files + al.Files = nil + bl := b.Logging + bFiles := bl.Files + bl.Files = nil + if al != bl { + return true + } + if (aFiles == nil && bFiles != nil) || (aFiles != nil && bFiles == nil) || (*aFiles != *bFiles) { + return true + } + return false +} + +func level(cfg *config.Config) zerolog.Level { + if cfg.Fleet.Agent.Logging.Level != "" { + return cfg.Fleet.Agent.Logging.LogLevel() + } + return cfg.Logging.LogLevel() +} + +func configureStderrLogger(cfg *config.Config) (zerolog.Logger, WriterSync) { + + out := io.Writer(os.Stderr) + if cfg.Logging.Pretty { + out = zerolog.ConsoleWriter{Out: os.Stderr, TimeFormat: "15:04:05.000"} + } + + return log.Output(out).Level(level(cfg)), os.Stderr +} + +func configureFileRotatorLogger(cfg *config.Config) (zerolog.Logger, WriterSync, error) { + + files := cfg.Logging.Files + if files == nil { + files = &config.LoggingFiles{} + files.InitDefaults() + } + filename := filepath.Join(files.Path, files.Name) + rotator, err := file.NewFileRotator(filename, + file.MaxSizeBytes(files.MaxSize), + file.MaxBackups(files.MaxBackups), + file.Permissions(os.FileMode(files.Permissions)), + file.Interval(files.Interval), + file.RotateOnStartup(files.RotateOnStartup), + file.RedirectStderr(files.RedirectStderr), + ) + if err != nil { + return zerolog.Logger{}, nil, err + } + return log.Output(rotator).Level(level(cfg)), rotator, nil +} + +func configure(cfg *config.Config, svcName string) (lg zerolog.Logger, wr WriterSync, err error) { + + switch { + case cfg.Logging.ToStderr: + lg, wr = configureStderrLogger(cfg) + case cfg.Logging.ToFiles: + lg, wr, err = configureFileRotatorLogger(cfg) + default: + lg = log.Output(ioutil.Discard).Level(level(cfg)) + wr = &nopSync{} + } + + if svcName != "" { + lg = lg.With().Str(EcsServiceName, svcName).Logger() + } + + return +} + +type nopSync struct { +} + +// Sync does nothing. +func (*nopSync) Sync() error { + return nil } diff --git a/internal/pkg/logger/zapStub.go b/internal/pkg/logger/zapStub.go new file mode 100644 index 000000000..4a3d3c1bd --- /dev/null +++ b/internal/pkg/logger/zapStub.go @@ -0,0 +1,92 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package logger + +import ( + "encoding/json" + + "github.com/elastic/beats/v7/libbeat/logp" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +func encoderConfig() zapcore.EncoderConfig { + return zapcore.EncoderConfig{ + MessageKey: EcsMessage, + LevelKey: EcsLogLevel, + NameKey: EcsLogName, + TimeKey: EcsTimestamp, + CallerKey: EcsLogCaller, + StacktraceKey: EcsLogStackTrace, + LineEnding: "\n", + EncodeTime: zapcore.EpochTimeEncoder, + EncodeLevel: zapcore.LowercaseLevelEncoder, + EncodeDuration: zapcore.NanosDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + } +} + +type zapStub struct { +} + +func (z zapStub) Enabled(zapLevel zapcore.Level) bool { + + zeroLevel := log.Logger.GetLevel() + + switch zapLevel { + case zapcore.DebugLevel: + return zeroLevel == zerolog.DebugLevel + case zapcore.InfoLevel: + return zeroLevel <= zerolog.InfoLevel + case zapcore.WarnLevel: + return zeroLevel <= zerolog.WarnLevel + case zapcore.ErrorLevel: + return zeroLevel <= zerolog.ErrorLevel + case zapcore.FatalLevel: + return zeroLevel <= zerolog.FatalLevel + case zapcore.DPanicLevel, zapcore.PanicLevel: + return zeroLevel <= zerolog.PanicLevel + } + + return true +} + +func (z zapStub) Sync() error { + return nil +} + +func (z zapStub) Write(p []byte) (n int, err error) { + + // Unwrap the zap object for logging + m := make(map[string]interface{}) + if err := json.Unmarshal(p, &m); err != nil { + return 0, err + } + + ctx := log.Log() + for key, val := range m { + + // Don't dupe the timestamp, use the fleet formatted timestamp. + if key != EcsTimestamp { + ctx.Interface(key, val) + } + } + + ctx.Send() + return 0, nil +} + +func NewZapStub(selector string) *logp.Logger { + + wrapF := func(zapcore.Core) zapcore.Core { + enc := zapcore.NewJSONEncoder(encoderConfig()) + stub := zapStub{} + return zapcore.NewCore(enc, stub, stub) + } + + return logp.NewLogger(selector, zap.WrapCore(wrapF)) +} diff --git a/internal/pkg/migrate/migrate.go b/internal/pkg/migrate/migrate.go deleted file mode 100644 index dbcc8cb83..000000000 --- a/internal/pkg/migrate/migrate.go +++ /dev/null @@ -1,108 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package migrate - -import ( - "context" - "encoding/json" - "github.com/elastic/fleet-server/v7/internal/pkg/bulk" - "github.com/elastic/fleet-server/v7/internal/pkg/dl" - "github.com/elastic/fleet-server/v7/internal/pkg/model" - "github.com/elastic/fleet-server/v7/internal/pkg/saved" -) - -type enrollmentApiKey struct { - Name string `json:"name"` - Type string `json:"type"` - ApiKey string `json:"api_key" saved:"encrypt"` - ApiKeyId string `json:"api_key_id"` - PolicyId string `json:"policy_id"` - CreatedAt string `json:"created_at"` - UpdatedAt string `json:"updated_at"` - ExpireAt string `json:"expire_at"` - Active bool `json:"active"` -} - -// Data migration -// This is for development only (1 instance of fleet) -// Not safe for multiple instances of fleet -// Initially needed to migrate the enrollment-api-keys that kibana creates -func Migrate(ctx context.Context, sv saved.CRUD, bulker bulk.Bulk) error { - return MigrateEnrollmentAPIKeys(ctx, sv, bulker) -} - -func MigrateEnrollmentAPIKeys(ctx context.Context, sv saved.CRUD, bulker bulk.Bulk) error { - - // Query all enrollment keys from the new schema - raw, err := dl.RenderAllEnrollmentAPIKeysQuery(1000) - if err != nil { - return err - } - - var recs []model.EnrollmentApiKey - res, err := bulker.Search(ctx, []string{dl.FleetEnrollmentAPIKeys}, raw, bulk.WithRefresh()) - if err != nil { - return err - } - - for _, hit := range res.Hits { - var rec model.EnrollmentApiKey - err := json.Unmarshal(hit.Source, &rec) - if err != nil { - return err - } - recs = append(recs, rec) - } - - // Query enrollment keys from kibana saved objects - query := saved.NewQuery("fleet-enrollment-api-keys") - - hits, err := sv.FindByNode(ctx, query) - if err != nil { - return err - } - - for _, hit := range hits { - var rec enrollmentApiKey - if err := sv.Decode(hit, &rec); err != nil { - return err - } - if _, ok := findExistingEnrollmentAPIKey(recs, rec); !ok { - newRec := translateEnrollmentAPIKey(rec) - b, err := json.Marshal(newRec) - if err != nil { - return err - } - _, err = bulker.Create(ctx, dl.FleetEnrollmentAPIKeys, "", b, bulk.WithRefresh()) - if err != nil { - return err - } - } - } - - return nil -} - -func findExistingEnrollmentAPIKey(hay []model.EnrollmentApiKey, needle enrollmentApiKey) (*model.EnrollmentApiKey, bool) { - for _, rec := range hay { - if rec.ApiKeyId == needle.ApiKeyId { - return &rec, true - } - } - return nil, false -} - -func translateEnrollmentAPIKey(src enrollmentApiKey) model.EnrollmentApiKey { - return model.EnrollmentApiKey{ - Active: src.Active, - ApiKey: src.ApiKey, - ApiKeyId: src.ApiKeyId, - CreatedAt: src.CreatedAt, - ExpireAt: src.ExpireAt, - Name: src.Name, - PolicyId: src.PolicyId, - UpdatedAt: src.UpdatedAt, - } -} diff --git a/internal/pkg/model/ext.go b/internal/pkg/model/ext.go index 4d6a4bf84..d89787855 100644 --- a/internal/pkg/model/ext.go +++ b/internal/pkg/model/ext.go @@ -25,3 +25,16 @@ func (m *Server) Time() (time.Time, error) { func (m *Server) SetTime(t time.Time) { m.Timestamp = t.Format(time.RFC3339Nano) } + +// CheckDifferentVersion returns Agent version if it is different from ver, otherwise return empty string +func (m *Agent) CheckDifferentVersion(ver string) string { + if m == nil { + return "" + } + + if m.Agent == nil || ver != m.Agent.Version { + return ver + } + + return "" +} diff --git a/internal/pkg/model/ext_test.go b/internal/pkg/model/ext_test.go new file mode 100644 index 000000000..e48194b30 --- /dev/null +++ b/internal/pkg/model/ext_test.go @@ -0,0 +1,87 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build !integration +// +build !integration + +package model + +import ( + "testing" + + "github.com/google/go-cmp/cmp" +) + +func TestAgentGetNewVersion(t *testing.T) { + tests := []struct { + Name string + Agent *Agent + Ver string + WantVer string + }{ + { + Name: "nil", + }, + { + Name: "agent no meta empty version", + Agent: &Agent{}, + }, + { + Name: "agent no meta nonempty version", + Agent: &Agent{}, + Ver: "7.14", + WantVer: "7.14", + }, + { + Name: "agent with meta empty new version", + Agent: &Agent{ + Agent: &AgentMetadata{ + Version: "7.14", + }, + }, + Ver: "", + WantVer: "", + }, + { + Name: "agent with meta empty version", + Agent: &Agent{ + Agent: &AgentMetadata{ + Version: "", + }, + }, + Ver: "7.15", + WantVer: "7.15", + }, + { + Name: "agent with meta non empty version", + Agent: &Agent{ + Agent: &AgentMetadata{ + Version: "7.14", + }, + }, + Ver: "7.14", + WantVer: "", + }, + { + Name: "agent with meta new version", + Agent: &Agent{ + Agent: &AgentMetadata{ + Version: "7.14", + }, + }, + Ver: "7.15", + WantVer: "7.15", + }, + } + + for _, tc := range tests { + t.Run(tc.Name, func(t *testing.T) { + newVer := tc.Agent.CheckDifferentVersion(tc.Ver) + diff := cmp.Diff(tc.WantVer, newVer) + if diff != "" { + t.Error(diff) + } + }) + } +} diff --git a/internal/pkg/model/schema.go b/internal/pkg/model/schema.go index 8f66bee5e..c1cf8160a 100644 --- a/internal/pkg/model/schema.go +++ b/internal/pkg/model/schema.go @@ -45,32 +45,58 @@ type Action struct { // The action expiration date/time Expiration string `json:"expiration,omitempty"` - // The input identifier the actions should be routed to. - InputId string `json:"input_id,omitempty"` + // The input type the actions should be routed to. + InputType string `json:"input_type,omitempty"` + + // The optional action timeout in seconds + Timeout int64 `json:"timeout,omitempty"` // Date/time the action was created Timestamp string `json:"@timestamp,omitempty"` - // The action type. APP_ACTION is the value for the actions that suppose to be routed to the endpoints/beats. + // The action type. INPUT_ACTION is the value for the actions that suppose to be routed to the endpoints/beats. Type string `json:"type,omitempty"` + + // The ID of the user who created the action. + UserId string `json:"user_id,omitempty"` +} + +// ActionData The opaque payload. +type ActionData struct { +} + +// ActionResponse The custom action response payload. +type ActionResponse struct { } // ActionResult An Elastic Agent action results type ActionResult struct { ESDocument + // The opaque payload. + ActionData json.RawMessage `json:"action_data,omitempty"` + // The action id. ActionId string `json:"action_id,omitempty"` + // The custom action response payload. + ActionResponse json.RawMessage `json:"action_response,omitempty"` + // The agent id. AgentId string `json:"agent_id,omitempty"` + // Date/time the action was completed + CompletedAt string `json:"completed_at,omitempty"` + // The opaque payload. Data json.RawMessage `json:"data,omitempty"` // The action error message. Error string `json:"error,omitempty"` + // Date/time the action was started + StartedAt string `json:"started_at,omitempty"` + // Date/time the action was created Timestamp string `json:"@timestamp,omitempty"` } @@ -83,7 +109,7 @@ type Agent struct { AccessApiKeyId string `json:"access_api_key_id,omitempty"` // The last acknowledged action sequence number for the Elastic Agent - ActionSeqNo int64 `json:"action_seq_no,omitempty"` + ActionSeqNo []int64 `json:"action_seq_no,omitempty"` // Active flag Active bool `json:"active"` @@ -119,6 +145,9 @@ type Agent struct { // The policy ID for the Elastic Agent PolicyId string `json:"policy_id,omitempty"` + // The policy output permissions hash + PolicyOutputPermissionsHash string `json:"policy_output_permissions_hash,omitempty"` + // The current policy revision_idx for the Elastic Agent PolicyRevisionIdx int64 `json:"policy_revision_idx,omitempty"` @@ -131,6 +160,9 @@ type Agent struct { // Date/time the Elastic Agent unenrolled UnenrolledAt string `json:"unenrolled_at,omitempty"` + // Reason the Elastic Agent was unenrolled + UnenrolledReason string `json:"unenrolled_reason,omitempty"` + // Date/time the Elastic Agent unenrolled started UnenrollmentStartedAt string `json:"unenrollment_started_at,omitempty"` @@ -157,6 +189,45 @@ type AgentMetadata struct { Version string `json:"version"` } +// Artifact An artifact served by Fleet +type Artifact struct { + ESDocument + + // Encoded artifact data + Body json.RawMessage `json:"body"` + + // Name of compression algorithm applied to artifact + CompressionAlgorithm string `json:"compression_algorithm,omitempty"` + + // Timestamp artifact was created + Created string `json:"created"` + + // SHA256 of artifact before encoding has been applied + DecodedSha256 string `json:"decoded_sha256,omitempty"` + + // Size of artifact before encoding has been applied + DecodedSize int64 `json:"decoded_size,omitempty"` + + // SHA256 of artifact after encoding has been applied + EncodedSha256 string `json:"encoded_sha256,omitempty"` + + // Size of artifact after encoding has been applied + EncodedSize int64 `json:"encoded_size,omitempty"` + + // Name of encryption algorithm applied to artifact + EncryptionAlgorithm string `json:"encryption_algorithm,omitempty"` + + // Human readable artifact identifier + Identifier string `json:"identifier"` + + // Name of the package that owns this artifact + PackageName string `json:"package_name,omitempty"` +} + +// Body Encoded artifact data +type Body struct { +} + // Data The opaque payload. type Data struct { } @@ -223,6 +294,9 @@ type Policy struct { // Date/time the policy revision was created Timestamp string `json:"@timestamp,omitempty"` + + // Timeout (seconds) that an Elastic Agent should be un-enrolled. + UnenrollTimeout int64 `json:"unenroll_timeout,omitempty"` } // PolicyLeader The current leader Fleet Server for a policy diff --git a/internal/pkg/monitor/global_checkpoint.go b/internal/pkg/monitor/global_checkpoint.go index 31eb73ea9..5d5e7c8fe 100644 --- a/internal/pkg/monitor/global_checkpoint.go +++ b/internal/pkg/monitor/global_checkpoint.go @@ -7,55 +7,91 @@ package monitor import ( "context" "encoding/json" - "github.com/elastic/fleet-server/v7/internal/pkg/es" + "errors" + "net/http" + "time" - "github.com/elastic/go-elasticsearch/v8" + esh "github.com/elastic/fleet-server/v7/internal/pkg/es" + "github.com/elastic/fleet-server/v7/internal/pkg/sqn" + + "github.com/elastic/go-elasticsearch/v7" + "github.com/elastic/go-elasticsearch/v7/esapi" ) -type shard struct { - SeqNo struct { - GlobalCheckpoint int64 `json:"global_checkpoint"` - } `json:"seq_no"` -} +var ErrGlobalCheckpoint = errors.New("global checkpoint error") + +// Global checkpoint response +// {"global_checkpoints":[-1]} -type indexStats struct { - Shards map[string][]shard `json:"shards"` +type globalCheckpointsResponse struct { + GlobalCheckpoints []int64 `json:"global_checkpoints"` + TimedOut bool `json:"timed_out"` + Error esh.ErrorT `json:"error,omitempty"` } -type statsResponse struct { - IndexStats map[string]indexStats `json:"indices"` +func queryGlobalCheckpoint(ctx context.Context, es *elasticsearch.Client, index string) (seqno sqn.SeqNo, err error) { + req := esh.NewGlobalCheckpointsRequest(es.Transport) + res, err := req(req.WithContext(ctx), + req.WithIndex(index)) - Error es.ErrorT `json:"error,omitempty"` -} + if err != nil { + return + } -func queryGlobalCheckpoint(ctx context.Context, es *elasticsearch.Client, index string) (seqno int64, err error) { - seqno = defaultSeqNo + seqno, err = processGlobalCheckpointResponse(res) + if errors.Is(err, esh.ErrIndexNotFound) { + seqno = sqn.DefaultSeqNo + err = nil + } + + return seqno, err +} - res, err := es.Indices.Stats( - es.Indices.Stats.WithContext(ctx), - es.Indices.Stats.WithIndex(index), - es.Indices.Stats.WithLevel("shards"), +func waitCheckpointAdvance(ctx context.Context, es *elasticsearch.Client, index string, checkpoint sqn.SeqNo, to time.Duration) (seqno sqn.SeqNo, err error) { + req := esh.NewGlobalCheckpointsRequest(es.Transport) + res, err := req(req.WithContext(ctx), + req.WithIndex(index), + req.WithCheckpoints(checkpoint), + req.WithWaitForAdvance(true), + req.WithWaitForIndex(true), + req.WithTimeout(to), ) if err != nil { return } + return processGlobalCheckpointResponse(res) +} + +func processGlobalCheckpointResponse(res *esapi.Response) (seqno sqn.SeqNo, err error) { defer res.Body.Close() - var sres statsResponse + // Don't parse the payload if timeout + if res.StatusCode == http.StatusGatewayTimeout { + return seqno, esh.ErrTimeout + } + + // Parse payload + var sres globalCheckpointsResponse err = json.NewDecoder(res.Body).Decode(&sres) if err != nil { return } - if stats, ok := sres.IndexStats[index]; ok { - if shards, ok := stats.Shards["0"]; ok { - if len(shards) > 0 { - seqno = shards[0].SeqNo.GlobalCheckpoint - } - } + // Check error + err = esh.TranslateError(res.StatusCode, &sres.Error) + if err != nil { + return nil, err + } + + if sres.TimedOut { + return nil, esh.ErrTimeout + } + + if len(sres.GlobalCheckpoints) == 0 { + return nil, esh.ErrNotFound } - return + return sres.GlobalCheckpoints, nil } diff --git a/internal/pkg/monitor/mock/monitor.go b/internal/pkg/monitor/mock/monitor.go index 47368dcf7..25b268f07 100644 --- a/internal/pkg/monitor/mock/monitor.go +++ b/internal/pkg/monitor/mock/monitor.go @@ -12,6 +12,7 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/es" "github.com/elastic/fleet-server/v7/internal/pkg/monitor" + "github.com/elastic/fleet-server/v7/internal/pkg/sqn" ) var gMockIndexCounter uint64 @@ -26,7 +27,7 @@ func (s *mockSubT) Output() <-chan []es.HitT { } type MockIndexMonitor struct { - checkpoint int64 + checkpoint sqn.SeqNo mut sync.RWMutex subs map[uint64]*mockSubT @@ -35,13 +36,13 @@ type MockIndexMonitor struct { // NewMockIndexMonitor returns a mock monitor. func NewMockIndexMonitor() *MockIndexMonitor { return &MockIndexMonitor{ - checkpoint: -1, + checkpoint: sqn.DefaultSeqNo, subs: make(map[uint64]*mockSubT), } } // GetCheckpoint returns the current checkpoint. -func (m *MockIndexMonitor) GetCheckpoint() int64 { +func (m *MockIndexMonitor) GetCheckpoint() sqn.SeqNo { return m.checkpoint } @@ -85,7 +86,7 @@ func (m *MockIndexMonitor) Notify(ctx context.Context, hits []es.HitT) { sz := len(hits) if sz > 0 { maxVal := hits[sz-1].SeqNo - m.checkpoint = maxVal + m.checkpoint = []int64{maxVal} m.mut.RLock() var wg sync.WaitGroup diff --git a/internal/pkg/monitor/monitor.go b/internal/pkg/monitor/monitor.go index 78df63db9..361b5ba40 100644 --- a/internal/pkg/monitor/monitor.go +++ b/internal/pkg/monitor/monitor.go @@ -8,22 +8,40 @@ import ( "bytes" "context" "encoding/json" - "sync/atomic" + "errors" + "sync" "time" "github.com/elastic/fleet-server/v7/internal/pkg/dl" "github.com/elastic/fleet-server/v7/internal/pkg/dsl" "github.com/elastic/fleet-server/v7/internal/pkg/es" + "github.com/elastic/fleet-server/v7/internal/pkg/sleep" + "github.com/elastic/fleet-server/v7/internal/pkg/sqn" - "github.com/elastic/go-elasticsearch/v8" + "github.com/elastic/go-elasticsearch/v7" "github.com/rs/zerolog" "github.com/rs/zerolog/log" ) const ( - defaultCheckInterval = 1 // check every second for the new action - defaultSeqNo = int64(-1) // the _seq_no in elasticsearch start with 0 + defaultPollTimeout = 4 * time.Minute // default long poll timeout + defaultSeqNo = int64(-1) // the _seq_no in elasticsearch start with 0 defaultWithExpiration = false + + // Making the default fetch size larger, in order to increase the throughput of the monitor. + // This is configurable as well, so can be adjusted based on the memory size of the container if needed. + // Seems like the usage of smaller actions, one or few agents in the action document would be more prevalent in the future. + // For example, as of now the current size of osquery action JSON document for 1000 agents is 40KB. + // Assuiming the worst case scenario of 1000 of document fetched, we are looking at 50MB slice. + // One action can be split up into multiple documents up to the 1000 agents per action if needed. + defaultFetchSize = 1000 + + // Retry delay on error waiting on the global checkpoint update. + // This is the wait time between requests to elastisearch in case if: + // 1. Index is not found (index is created only on the first document save) + // 2. Any other error waiting on global checkpoint, except timeouts. + // For the long poll timeout, start a new request as soon as possible. + retryDelay = 3 * time.Second ) const ( @@ -34,25 +52,8 @@ const ( fieldExpiration = "expiration" ) -type HitT struct { - Id string `json:"_id"` - SeqNo int64 `json:"_seq_no"` - Index string `json:"_index"` - Source json.RawMessage `json:"_source"` - Score *float64 `json:"_score"` -} - -type HitsT struct { - Hits []HitT `json:"hits"` - Total struct { - Relation string `json:"relation"` - Value uint64 `json:"value"` - } `json:"total"` - MaxScore *float64 `json:"max_score"` -} - type GlobalCheckpointProvider interface { - GetCheckpoint() int64 + GetCheckpoint() sqn.SeqNo } // SimpleMonitor monitors for new documents in an index @@ -72,15 +73,18 @@ type SimpleMonitor interface { // simpleMonitorT monitors for new documents in an index type simpleMonitorT struct { - cli *elasticsearch.Client + esCli *elasticsearch.Client + monCli *elasticsearch.Client tmplCheck *dsl.Tmpl tmplQuery *dsl.Tmpl index string - checkInterval time.Duration + pollTimeout time.Duration withExpiration bool + fetchSize int - checkpoint int64 // index global checkpoint + checkpoint sqn.SeqNo // index global checkpoint + mx sync.RWMutex // checkpoint mutex log zerolog.Logger @@ -93,13 +97,16 @@ type simpleMonitorT struct { type Option func(SimpleMonitor) // New creates new simple monitor -func NewSimple(index string, cli *elasticsearch.Client, opts ...Option) (SimpleMonitor, error) { +func NewSimple(index string, esCli, monCli *elasticsearch.Client, opts ...Option) (SimpleMonitor, error) { + m := &simpleMonitorT{ index: index, - cli: cli, - checkInterval: defaultCheckInterval * time.Second, + esCli: esCli, + monCli: monCli, + pollTimeout: defaultPollTimeout, withExpiration: defaultWithExpiration, - checkpoint: defaultSeqNo, + fetchSize: defaultFetchSize, + checkpoint: sqn.DefaultSeqNo, outCh: make(chan []es.HitT, 1), } @@ -125,9 +132,18 @@ func NewSimple(index string, cli *elasticsearch.Client, opts ...Option) (SimpleM } // WithCheckInterval sets a periodic check interval -func WithCheckInterval(interval time.Duration) Option { +func WithFetchSize(fetchSize int) Option { return func(m SimpleMonitor) { - m.(*simpleMonitorT).checkInterval = interval + if fetchSize > 0 { + m.(*simpleMonitorT).fetchSize = fetchSize + } + } +} + +// WithPollTimeout sets the global checkpoint polling timeout +func WithPollTimeout(to time.Duration) Option { + return func(m SimpleMonitor) { + m.(*simpleMonitorT).pollTimeout = to } } @@ -151,24 +167,31 @@ func (m *simpleMonitorT) Output() <-chan []es.HitT { } // GetCheckpoint implements GlobalCheckpointProvider interface -func (m *simpleMonitorT) GetCheckpoint() int64 { +func (m *simpleMonitorT) GetCheckpoint() sqn.SeqNo { return m.loadCheckpoint() } -func (m *simpleMonitorT) storeCheckpoint(val int64) { - m.log.Debug().Int64("checkpoint", val).Msg("updated checkpoint") - atomic.StoreInt64(&m.checkpoint, val) +func (m *simpleMonitorT) storeCheckpoint(val sqn.SeqNo) { + m.log.Debug().Ints64("checkpoints", val).Msg("updated checkpoint") + m.mx.Lock() + defer m.mx.Unlock() + m.checkpoint = val.Clone() } -func (m *simpleMonitorT) loadCheckpoint() int64 { - return atomic.LoadInt64(&m.checkpoint) +func (m *simpleMonitorT) loadCheckpoint() sqn.SeqNo { + m.mx.RLock() + defer m.mx.RUnlock() + return m.checkpoint.Clone() } // Run runs monitor. func (m *simpleMonitorT) Run(ctx context.Context) (err error) { - m.log.Info().Msg("start") + m.log.Info().Msg("Starting index monitor") defer func() { - m.log.Info().Err(err).Msg("exited") + if err == context.Canceled { + err = nil + } + m.log.Info().Err(err).Msg("Index monitor exited") }() defer func() { @@ -178,10 +201,10 @@ func (m *simpleMonitorT) Run(ctx context.Context) (err error) { }() // Initialize global checkpoint from the index stats - var checkpoint int64 - checkpoint, err = queryGlobalCheckpoint(ctx, m.cli, m.index) + var checkpoint sqn.SeqNo + checkpoint, err = queryGlobalCheckpoint(ctx, m.monCli, m.index) if err != nil { - m.log.Error().Err(err).Msg("failed to initialize the global checkpoint") + m.log.Error().Err(err).Msg("failed to initialize the global checkpoints") return err } m.storeCheckpoint(checkpoint) @@ -192,76 +215,105 @@ func (m *simpleMonitorT) Run(ctx context.Context) (err error) { m.readyCh = nil } - // Start timer loop to check for global checkpoint changes - t := time.NewTimer(m.checkInterval) - defer t.Stop() for { - select { - case <-t.C: - hits, err := m.check(ctx) + checkpoint := m.loadCheckpoint() + + // Wait checkpoint advance + newCheckpoint, err := waitCheckpointAdvance(ctx, m.monCli, m.index, checkpoint, m.pollTimeout) + if err != nil { + if errors.Is(err, es.ErrIndexNotFound) { + // Wait until created + m.log.Debug().Msgf("index not found, poll again in %v", retryDelay) + } else if errors.Is(err, es.ErrTimeout) { + // Timed out, wait again + m.log.Debug().Msg("timeout on global checkpoints advance, poll again") + continue + } else if errors.Is(err, context.Canceled) { + m.log.Info().Msg("context closed waiting for global checkpoints advance") + } else { + // Log the error and keep trying + m.log.Info().Err(err).Msg("failed on waiting for global checkpoints advance") + } + + // Delay next attempt + err = sleep.WithContext(ctx, retryDelay) + if err != nil { + return err + } + } + + // This is an example of steps for fetching the documents without "holes" (not-yet-indexed documents in between) + // as recommended by Elasticsearch team on August 25th, 2021 + // 1. Call Global checkpoints = 5 + // 2. Search = 1, 2, 3, 5. + // 3. Manual refresh + // 4. Search and get 4,5 + // 5. Return to step 1 + + // Fetch up to known checkpoint + count := m.fetchSize + for count == m.fetchSize { + hits, err := m.fetch(ctx, checkpoint, newCheckpoint) if err != nil { m.log.Error().Err(err).Msg("failed checking new documents") - } else { - m.notify(ctx, hits) + break + } + + // Check if the list of hits has holes + if es.HasHoles(checkpoint, hits) { + m.log.Debug().Msg("hits list has holes, refresh index") + err = es.Refresh(ctx, m.esCli, m.index) + if err != nil { + m.log.Error().Err(err).Msg("failed to refresh index") + break + } + + // Refetch + hits, err = m.fetch(ctx, checkpoint, newCheckpoint) + if err != nil { + m.log.Error().Err(err).Msg("failed checking new documents after refresh") + break + } + } + + // Notify call updates checkpoint + count = m.notify(ctx, hits) + + // Get the latest checkpoint for the next fetch iteration + if count == m.fetchSize { + checkpoint = m.loadCheckpoint() } - t.Reset(m.checkInterval) - case <-ctx.Done(): - return ctx.Err() } } } -func (m *simpleMonitorT) notify(ctx context.Context, hits []es.HitT) { +func (m *simpleMonitorT) notify(ctx context.Context, hits []es.HitT) int { sz := len(hits) if sz > 0 { select { case m.outCh <- hits: maxVal := hits[sz-1].SeqNo - m.storeCheckpoint(maxVal) + m.storeCheckpoint([]int64{maxVal}) + return sz case <-ctx.Done(): } } + return 0 } -func (m *simpleMonitorT) check(ctx context.Context) ([]es.HitT, error) { +func (m *simpleMonitorT) fetch(ctx context.Context, checkpoint, maxCheckpoint sqn.SeqNo) ([]es.HitT, error) { now := time.Now().UTC().Format(time.RFC3339) - checkpoint := m.loadCheckpoint() - // Run check query that detects that there are new documents available params := map[string]interface{}{ - dl.FieldSeqNo: checkpoint, + dl.FieldSeqNo: checkpoint.Value(), + dl.FieldMaxSeqNo: maxCheckpoint.Value(), } if m.withExpiration { params[dl.FieldExpiration] = now } - hits, err := m.search(ctx, m.tmplCheck, params) - if err != nil { - return nil, err - } - - if len(hits) == 0 { - return nil, nil - } - - // New documents are detected, fetch global checkpoint - gcp, err := queryGlobalCheckpoint(ctx, m.cli, m.index) - if err != nil { - m.log.Error().Err(err).Msg("failed to check the global checkpoint") - return nil, err - } - - // If global check point is still not greater that the current known checkpoint, return nothing - if gcp <= checkpoint { - return nil, nil - } - - // Fetch documents capped by the global checkpoint - // Reusing params for the documents query - params[dl.FieldMaxSeqNo] = gcp - - hits, err = m.search(ctx, m.tmplQuery, params) + hits, err := m.search(ctx, m.tmplQuery, params) if err != nil { return nil, err } @@ -275,10 +327,10 @@ func (m *simpleMonitorT) search(ctx context.Context, tmpl *dsl.Tmpl, params map[ return nil, err } - res, err := m.cli.Search( - m.cli.Search.WithContext(ctx), - m.cli.Search.WithIndex(m.index), - m.cli.Search.WithBody(bytes.NewBuffer(query)), + res, err := m.esCli.Search( + m.esCli.Search.WithContext(ctx), + m.esCli.Search.WithIndex(m.index), + m.esCli.Search.WithBody(bytes.NewBuffer(query)), ) if err != nil { return nil, err @@ -291,7 +343,15 @@ func (m *simpleMonitorT) search(ctx context.Context, tmpl *dsl.Tmpl, params map[ } if res.IsError() { - return nil, es.TranslateError(res.StatusCode, esres.Error) + err = es.TranslateError(res.StatusCode, &esres.Error) + } + + if err != nil { + if errors.Is(err, es.ErrIndexNotFound) { + m.log.Debug().Msg(es.ErrIndexNotFound.Error()) + return nil, nil + } + return nil, err } return esres.Hits.Hits, nil @@ -313,6 +373,7 @@ func (m *simpleMonitorT) prepareCheckQuery() (tmpl *dsl.Tmpl, err error) { // Prepares full documents query func (m *simpleMonitorT) prepareQuery() (tmpl *dsl.Tmpl, err error) { tmpl, root := m.prepareCommon(true) + root.Size(uint64(m.fetchSize)) root.Sort().SortOrder(fieldSeqNo, dsl.SortAscend) if err := tmpl.Resolve(root); err != nil { diff --git a/internal/pkg/monitor/monitor_integration_test.go b/internal/pkg/monitor/monitor_integration_test.go index eb1ebefb2..c9704f409 100644 --- a/internal/pkg/monitor/monitor_integration_test.go +++ b/internal/pkg/monitor/monitor_integration_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build integration // +build integration package monitor @@ -10,7 +11,6 @@ import ( "context" "sync" "testing" - "time" "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/require" @@ -21,8 +21,6 @@ import ( ftesting "github.com/elastic/fleet-server/v7/internal/pkg/testing" ) -const testMonitorIntervalMS = 100 - func setupIndex(ctx context.Context, t *testing.T) (string, bulk.Bulk) { index, bulker := ftesting.SetupIndexWithBulk(ctx, t, es.MappingAction) return index, bulker @@ -46,8 +44,7 @@ func TestSimpleMonitorNonEmptyIndex(t *testing.T) { func runSimpleMonitorTest(t *testing.T, ctx context.Context, index string, bulker bulk.Bulk) { readyCh := make(chan error) - mon, err := NewSimple(index, bulker.Client(), - WithCheckInterval(testMonitorIntervalMS*time.Millisecond), + mon, err := NewSimple(index, bulker.Client(), bulker.Client(), WithReadyChan(readyCh), ) require.NoError(t, err) diff --git a/internal/pkg/monitor/subscription_monitor.go b/internal/pkg/monitor/subscription_monitor.go index 06907d5ec..02e43c4d8 100644 --- a/internal/pkg/monitor/subscription_monitor.go +++ b/internal/pkg/monitor/subscription_monitor.go @@ -6,12 +6,14 @@ package monitor import ( "context" - "github.com/elastic/fleet-server/v7/internal/pkg/es" "sync" "sync/atomic" "time" - "github.com/elastic/go-elasticsearch/v8" + "github.com/elastic/fleet-server/v7/internal/pkg/es" + "github.com/elastic/fleet-server/v7/internal/pkg/sqn" + + "github.com/elastic/go-elasticsearch/v7" "github.com/rs/zerolog/log" "golang.org/x/sync/errgroup" ) @@ -60,8 +62,8 @@ type monitorT struct { } // New creates new subscription monitor -func New(index string, cli *elasticsearch.Client, opts ...Option) (Monitor, error) { - sm, err := NewSimple(index, cli, opts...) +func New(index string, esCli, monCli *elasticsearch.Client, opts ...Option) (Monitor, error) { + sm, err := NewSimple(index, esCli, monCli, opts...) if err != nil { return nil, err } @@ -75,7 +77,7 @@ func New(index string, cli *elasticsearch.Client, opts ...Option) (Monitor, erro return m, nil } -func (m *monitorT) GetCheckpoint() int64 { +func (m *monitorT) GetCheckpoint() sqn.SeqNo { return m.sm.GetCheckpoint() } @@ -143,10 +145,11 @@ func (m *monitorT) notify(ctx context.Context, hits []es.HitT) { select { case s.c <- hits: case <-lc.Done(): - err := ctx.Err() - if err == context.DeadlineExceeded { - log.Err(err).Str("ctx", "subscription monitor").Dur("timeout", m.subTimeout).Msg("dropped notification") - } + log.Error(). + Err(lc.Err()). + Str("ctx", "subscription monitor"). + Dur("timeout", m.subTimeout). + Msg("dropped notification") } }(s) } diff --git a/internal/pkg/monitor/subscription_monitor_integration_test.go b/internal/pkg/monitor/subscription_monitor_integration_test.go index d24a894cf..e3371764d 100644 --- a/internal/pkg/monitor/subscription_monitor_integration_test.go +++ b/internal/pkg/monitor/subscription_monitor_integration_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build integration // +build integration package monitor @@ -10,7 +11,6 @@ import ( "context" "sync" "testing" - "time" "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/require" @@ -38,8 +38,7 @@ func TestMonitorNonEmptyIndex(t *testing.T) { func runMonitorTest(t *testing.T, ctx context.Context, index string, bulker bulk.Bulk) { readyCh := make(chan error) - mon, err := New(index, bulker.Client(), - WithCheckInterval(testMonitorIntervalMS*time.Millisecond), + mon, err := New(index, bulker.Client(), bulker.Client(), WithReadyChan(readyCh), ) require.NoError(t, err) diff --git a/internal/pkg/policy/monitor.go b/internal/pkg/policy/monitor.go index 2c09836a0..972261340 100644 --- a/internal/pkg/policy/monitor.go +++ b/internal/pkg/policy/monitor.go @@ -7,26 +7,48 @@ package policy import ( "context" "errors" - "fmt" "sync" - "sync/atomic" "time" - "github.com/gofrs/uuid" "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/elastic/fleet-server/v7/internal/pkg/bulk" "github.com/elastic/fleet-server/v7/internal/pkg/dl" + "github.com/elastic/fleet-server/v7/internal/pkg/es" + "github.com/elastic/fleet-server/v7/internal/pkg/logger" "github.com/elastic/fleet-server/v7/internal/pkg/model" "github.com/elastic/fleet-server/v7/internal/pkg/monitor" ) -var gCounter uint64 +const cloudPolicyId = "policy-elastic-agent-on-cloud" + +/* +Design should have the following properites + +Policy rollout scheduling should... +1) be fair; delivered in first come first server order. +2) be throttled to avoid uncontrolled impact on resources, particularly CPU. +3) adapt to subscribers that drop offline. +4) attempt to deliver the latest policy to each subscriber at the time of delivery. +5) prioritize delivery to agents that supervise fleet-servers. + +This implementation addresses the above issues by queuing subscription requests per +policy, and moving requests to the pending queue when the requirement is met; ie. +the policy is updateable. + +If the subscription is unsubscribed (ie. the agent drops offline), this implementation +will remove the subscription request from its current location in either the waiting +queue on the policy or the pending queue. + +Ordering is achieved with a simple double linked list implementation that allows object +migration across queues, and O(1) unlink without knowledge about which queue the subscription +is in. +*/ type Subscription interface { // Output returns a new policy that needs to be sent based on the current subscription. - Output() <-chan model.Policy + Output() <-chan *ParsedPolicy } type Monitor interface { @@ -42,19 +64,9 @@ type Monitor interface { type policyFetcher func(ctx context.Context, bulker bulk.Bulk, opt ...dl.Option) ([]model.Policy, error) -type subT struct { - idx uint64 - - policyId string - revIdx int64 - coordIdx int64 - - c chan model.Policy -} - type policyT struct { - policy model.Policy - subs map[uint64]subT // map sub counter to channel + pp ParsedPolicy + head *subT } type monitorT struct { @@ -65,66 +77,181 @@ type monitorT struct { monitor monitor.Monitor kickCh chan struct{} + deployCh chan struct{} + policies map[string]policyT + pendingQ *subT policyF policyFetcher policiesIndex string throttle time.Duration -} -// Output returns a new policy that needs to be sent based on the current subscription. -func (s *subT) Output() <-chan model.Policy { - return s.c + startCh chan struct{} } // NewMonitor creates the policy monitor for subscribing agents. func NewMonitor(bulker bulk.Bulk, monitor monitor.Monitor, throttle time.Duration) Monitor { return &monitorT{ - log: log.With().Str("ctx", "policy agent manager").Logger(), + log: log.With().Str("ctx", "policy agent monitor").Logger(), bulker: bulker, monitor: monitor, kickCh: make(chan struct{}, 1), + deployCh: make(chan struct{}, 1), policies: make(map[string]policyT), + pendingQ: makeHead(), throttle: throttle, policyF: dl.QueryLatestPolicies, policiesIndex: dl.FleetPolicies, + startCh: make(chan struct{}), } } // Run runs the monitor. func (m *monitorT) Run(ctx context.Context) error { + m.log.Info(). + Dur("throttle", m.throttle). + Msg("run policy monitor") + s := m.monitor.Subscribe() defer m.monitor.Unsubscribe(s) + // If no throttle set, setup a minimal spin rate. + dur := m.throttle + if dur == 0 { + dur = time.Nanosecond + } + + isDeploying := true + ticker := time.NewTicker(dur) + + startDeploy := func() { + if !isDeploying { + isDeploying = true + ticker = time.NewTicker(dur) + } + } + + stopDeploy := func() { + ticker.Stop() + isDeploying = false + } + + // begin in stopped state + stopDeploy() + + // stop timer on exit + defer stopDeploy() + + close(m.startCh) + LOOP: for { select { - case <-ctx.Done(): - break LOOP case <-m.kickCh: - if err := m.process(ctx); err != nil { + if err := m.loadPolicies(ctx); err != nil { return err } + startDeploy() + case <-m.deployCh: + startDeploy() case hits := <-s.Output(): - policies := make([]model.Policy, len(hits)) - for i, hit := range hits { - err := hit.Unmarshal(&policies[i]) - if err != nil { - return err - } - } - if err := m.processPolicies(ctx, policies); err != nil { + if err := m.processHits(ctx, hits); err != nil { return err } + startDeploy() + case <-ticker.C: + if done := m.dispatchPending(); done { + stopDeploy() + } + case <-ctx.Done(): + break LOOP } } return nil } -func (m *monitorT) process(ctx context.Context) error { +func unmarshalHits(hits []es.HitT) ([]model.Policy, error) { + + policies := make([]model.Policy, len(hits)) + for i, hit := range hits { + err := hit.Unmarshal(&policies[i]) + if err != nil { + return nil, err + } + } + + return policies, nil +} + +func (m *monitorT) processHits(ctx context.Context, hits []es.HitT) error { + policies, err := unmarshalHits(hits) + if err != nil { + m.log.Error().Err(err).Msg("fail unmarshal hits") + return err + } + + return m.processPolicies(ctx, policies) +} + +func (m *monitorT) waitStart(ctx context.Context) (err error) { + select { + case <-ctx.Done(): + err = ctx.Err() + case <-m.startCh: + } + return +} + +func (m *monitorT) dispatchPending() bool { + m.mut.Lock() + defer m.mut.Unlock() + + s := m.pendingQ.popFront() + if s == nil { + return true + } + + done := m.pendingQ.isEmpty() + + // Lookup the latest policy for this subscription + policy, ok := m.policies[s.policyId] + if !ok { + m.log.Warn(). + Str(logger.PolicyId, s.policyId). + Msg("logic error: policy missing on dispatch") + return done + } + + select { + case s.ch <- &policy.pp: + m.log.Debug(). + Str(logger.AgentId, s.agentId). + Str(logger.PolicyId, s.policyId). + Int64("rev", s.revIdx). + Int64("coord", s.coordIdx). + Msg("dispatch") + default: + // Should never block on a channel; we created a channel of size one. + // A block here indicates a logic error somewheres. + m.log.Error(). + Str(logger.PolicyId, s.policyId). + Str(logger.AgentId, s.agentId). + Msg("logic error: should never block on policy channel") + } + + return done +} + +func (m *monitorT) loadPolicies(ctx context.Context) error { policies, err := m.policyF(ctx, m.bulker, dl.WithIndexName(m.policiesIndex)) if err != nil { + if errors.Is(err, es.ErrIndexNotFound) { + m.log.Debug(). + Str("index", m.policiesIndex). + Msg(es.ErrIndexNotFound.Error()) + return nil + } return err } if len(policies) == 0 { @@ -136,17 +263,17 @@ func (m *monitorT) process(ctx context.Context) error { func (m *monitorT) processPolicies(ctx context.Context, policies []model.Policy) error { if len(policies) == 0 { - // nothing to do return nil } + latest := m.groupByLatest(policies) for _, policy := range latest { - if err := m.rollout(ctx, policy); err != nil { - if err == context.Canceled { - return err - } - return fmt.Errorf("failed rolling out policy %s: %w", policy.PolicyId, err) + pp, err := NewParsedPolicy(policy) + if err != nil { + return err } + + m.updatePolicy(pp) } return nil } @@ -169,119 +296,98 @@ func (m *monitorT) groupByLatest(policies []model.Policy) map[string]model.Polic return latest } -func (m *monitorT) rollout(ctx context.Context, policy model.Policy) error { - zlog := m.log.With().Str("policyId", policy.PolicyId).Logger() +func (m *monitorT) updatePolicy(pp *ParsedPolicy) bool { + newPolicy := pp.Policy - subs := m.updatePolicy(policy) - if subs == nil { - return nil - } - if len(subs) == 0 { - zlog.Info().Msg("no pending subscriptions to revised policy") - return nil + zlog := m.log.With(). + Str(logger.PolicyId, newPolicy.PolicyId). + Int64("rev", newPolicy.RevisionIdx). + Int64("coord", newPolicy.CoordinatorIdx). + Logger() + + if newPolicy.CoordinatorIdx <= 0 { + zlog.Info().Str(logger.PolicyId, newPolicy.PolicyId).Msg("Ignore policy that has not passed through coordinator") + return false } - // Not holding the mutex, however, we are blocking the main processing loop. - // No more lookups will occur will this is rolling out. - // This is by design; there is an optional throttle here. The queue will roll - // out before any new revisions are detected and will slow based on throttle. - // Note: We may want a more sophisticated system that detects new revisions during - // a throttled rollout; but that is TBD. - - var throttle *time.Ticker - if m.throttle != time.Duration(0) { - throttle = time.NewTicker(m.throttle) - defer throttle.Stop() + m.mut.Lock() + defer m.mut.Unlock() + + p, ok := m.policies[newPolicy.PolicyId] + if !ok { + p = policyT{ + pp: *pp, + head: makeHead(), + } + m.policies[newPolicy.PolicyId] = p + zlog.Info().Str(logger.PolicyId, newPolicy.PolicyId).Msg("New policy found on update and added") + return false } - start := time.Now() + // Cache the old stored policy for logging + oldPolicy := p.pp.Policy - zlog.Info(). - Int("nSubs", len(subs)). - Dur("throttle", m.throttle). - Msg("policy rollout begin") + // Update the policy in our data structure + p.pp = *pp + m.policies[newPolicy.PolicyId] = p - var err error -LOOP: - for _, s := range subs { - - if throttle != nil { - select { - case <-throttle.C: - case <-ctx.Done(): - err = ctx.Err() - break LOOP + // Iterate through the subscriptions on this policy; + // schedule any subscription for delivery that requires an update. + nQueued := 0 + + iter := NewIterator(p.head) + for sub := iter.Next(); sub != nil; sub = iter.Next() { + if sub.isUpdate(&newPolicy) { + + // Unlink the target node from the list + iter.Unlink() + + // Push the node onto the pendingQ + // HACK: if update is for cloud agent, put on front of queue + // not at the end for immediate delivery. + if newPolicy.PolicyId == cloudPolicyId { + m.pendingQ.pushFront(sub) + } else { + m.pendingQ.pushBack(sub) } - } - select { - case s.c <- policy: - default: - // Should never block on a channel; we created a channel of size one. - // A block here indicates a logic error somewheres. - zlog.Error(). - Str("policyId", policy.PolicyId). - Msg("should never block on policy channel") - } + zlog.Debug(). + Str(logger.AgentId, sub.agentId). + Msg("scheduled pendingQ on policy revision") + nQueued += 1 + } } zlog.Info(). - Err(err). - Dur("tdiff", time.Since(start)). - Msg("policy rollout end") + Int64("oldRev", oldPolicy.RevisionIdx). + Int64("oldCoord", oldPolicy.CoordinatorIdx). + Int("nQueued", nQueued). + Str(logger.PolicyId, newPolicy.PolicyId). + Msg("New revision of policy received and added to the queue") - return err + return true } -func (m *monitorT) updatePolicy(policy model.Policy) []subT { - m.mut.Lock() - defer m.mut.Unlock() +func (m *monitorT) kickLoad() { - p, ok := m.policies[policy.PolicyId] - if !ok { - p = policyT{ - policy: policy, - subs: make(map[uint64]subT), - } - m.policies[policy.PolicyId] = p - return nil + select { + case m.kickCh <- struct{}{}: + default: + m.log.Debug().Msg("kick channel full") } +} - p.policy = policy - m.policies[policy.PolicyId] = p +func (m *monitorT) kickDeploy() { - if policy.CoordinatorIdx <= 0 { - // don't rollout new policy that has not passed through the coordinator - return nil + select { + case m.deployCh <- struct{}{}: + default: } - - m.log.Info(). - Str("policyId", policy.PolicyId). - Int64("orev", p.policy.RevisionIdx). - Int64("nrev", policy.RevisionIdx). - Int64("ocoord", p.policy.CoordinatorIdx). - Int64("ncoord", policy.CoordinatorIdx). - Msg("new policy") - - subs := make([]subT, 0, len(p.subs)) - for idx, sub := range p.subs { - if p.policy.RevisionIdx > sub.revIdx || - (p.policy.RevisionIdx == sub.revIdx && p.policy.CoordinatorIdx > sub.coordIdx) { - // These subscriptions are one shot; delete from map. - delete(p.subs, idx) - subs = append(subs, sub) - } - } - - return subs } // Subscribe creates a new subscription for a policy update. func (m *monitorT) Subscribe(agentId string, policyId string, revisionIdx int64, coordinatorIdx int64) (Subscription, error) { - if _, err := uuid.FromString(policyId); err != nil { - return nil, errors.New("policyId must be a UUID") - } if revisionIdx < 0 { return nil, errors.New("revisionIdx must be greater than or equal to 0") } @@ -290,44 +396,47 @@ func (m *monitorT) Subscribe(agentId string, policyId string, revisionIdx int64, } m.log.Debug(). - Str("agentId", agentId). - Str("policyId", policyId). - Int64("revno", revisionIdx). - Int64("coordno", coordinatorIdx). + Str(logger.AgentId, agentId). + Str(logger.PolicyId, policyId). + Int64("rev", revisionIdx). + Int64("coord", coordinatorIdx). Msg("subscribed to policy monitor") - idx := atomic.AddUint64(&gCounter, 1) - - s := subT{ - idx: idx, - policyId: policyId, - revIdx: revisionIdx, - coordIdx: coordinatorIdx, - c: make(chan model.Policy, 1), - } + s := NewSub( + policyId, + agentId, + revisionIdx, + coordinatorIdx, + ) m.mut.Lock() + defer m.mut.Unlock() p, ok := m.policies[policyId] - if (p.policy.RevisionIdx > revisionIdx && p.policy.CoordinatorIdx > 0) || - (p.policy.RevisionIdx == revisionIdx && p.policy.CoordinatorIdx > coordinatorIdx) { - // fill the channel, clear out id; no point putting it in map as it is already fired - s.idx = 0 - s.c <- p.policy - } else { - if !ok { - p = policyT{subs: make(map[uint64]subT)} - m.policies[policyId] = p - select { - case m.kickCh <- struct{}{}: - default: - m.log.Debug().Msg("kick channel full") - } + + switch { + case !ok: + // We've not seen this policy before, force load. + m.log.Info(). + Str(logger.PolicyId, policyId). + Msg("force load on unknown policyId") + p = policyT{head: makeHead()} + p.head.pushBack(s) + m.policies[policyId] = p + m.kickLoad() + case s.isUpdate(&p.pp.Policy): + empty := m.pendingQ.isEmpty() + m.pendingQ.pushBack(s) + m.log.Debug(). + Str(logger.AgentId, s.agentId). + Msg("scheduled pending on subscribe") + if empty { + m.kickDeploy() } - p.subs[idx] = s + default: + p.head.pushBack(s) } - m.mut.Unlock() - return &s, nil + return s, nil } // Unsubscribe removes the current subscription. @@ -336,15 +445,17 @@ func (m *monitorT) Unsubscribe(sub Subscription) error { if !ok { return errors.New("not a subscription returned from this monitor") } - if s.idx == 0 { - return nil - } m.mut.Lock() - if policy, ok := m.policies[s.policyId]; ok { - delete(policy.subs, s.idx) - } + s.unlink() m.mut.Unlock() + m.log.Debug(). + Str(logger.AgentId, s.agentId). + Str(logger.PolicyId, s.policyId). + Int64("rev", s.revIdx). + Int64("coord", s.coordIdx). + Msg("unsubscribe") + return nil } diff --git a/internal/pkg/policy/monitor_integration_test.go b/internal/pkg/policy/monitor_integration_test.go index 202cff9e3..94148c50a 100644 --- a/internal/pkg/policy/monitor_integration_test.go +++ b/internal/pkg/policy/monitor_integration_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build integration // +build integration package policy @@ -22,7 +23,7 @@ import ( ftesting "github.com/elastic/fleet-server/v7/internal/pkg/testing" ) -const testMonitorIntervalMS = 100 +var policyBytes = []byte(`{"outputs":{"default":{"type":"elasticsearch"}}}`) func setupIndex(ctx context.Context, t *testing.T) (string, bulk.Bulk) { index, bulker := ftesting.SetupIndexWithBulk(ctx, t, es.MappingPolicy) @@ -34,7 +35,7 @@ func TestMonitor_Integration(t *testing.T) { defer cancel() index, bulker := setupIndex(ctx, t) - im, err := monitor.New(index, bulker.Client(), monitor.WithCheckInterval(testMonitorIntervalMS)) + im, err := monitor.New(index, bulker.Client(), bulker.Client()) if err != nil { t.Fatal(err) } @@ -77,7 +78,7 @@ func TestMonitor_Integration(t *testing.T) { policy := model.Policy{ PolicyId: policyId, CoordinatorIdx: 1, - Data: []byte("{}"), + Data: policyBytes, RevisionIdx: 1, } go func() { @@ -92,7 +93,7 @@ func TestMonitor_Integration(t *testing.T) { select { case subPolicy := <-s.Output(): tm.Stop() - if subPolicy.PolicyId != policyId && subPolicy.RevisionIdx != 1 && subPolicy.CoordinatorIdx != 1 { + if subPolicy.Policy.PolicyId != policyId && subPolicy.Policy.RevisionIdx != 1 && subPolicy.Policy.CoordinatorIdx != 1 { t.Fatal("failed to get the expected updated policy") } case <-tm.C: diff --git a/internal/pkg/policy/monitor_test.go b/internal/pkg/policy/monitor_test.go index 0f21c2491..eac8e4419 100644 --- a/internal/pkg/policy/monitor_test.go +++ b/internal/pkg/policy/monitor_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build !integration // +build !integration package policy @@ -25,6 +26,8 @@ import ( ftesting "github.com/elastic/fleet-server/v7/internal/pkg/testing" ) +var policyBytes = []byte(`{"outputs":{"default":{"type":"elasticsearch"}}}`) + func TestMonitor_NewPolicy(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -45,6 +48,10 @@ func TestMonitor_NewPolicy(t *testing.T) { merr = monitor.Run(ctx) }() + if err := monitor.(*monitorT).waitStart(ctx); err != nil { + t.Fatal(err) + } + agentId := uuid.Must(uuid.NewV4()).String() policyId := uuid.Must(uuid.NewV4()).String() s, err := monitor.Subscribe(agentId, policyId, 0, 0) @@ -62,7 +69,7 @@ func TestMonitor_NewPolicy(t *testing.T) { }, PolicyId: policyId, CoordinatorIdx: 1, - Data: []byte("{}"), + Data: policyBytes, RevisionIdx: 1, } policyData, err := json.Marshal(&policy) @@ -85,7 +92,7 @@ func TestMonitor_NewPolicy(t *testing.T) { select { case subPolicy := <-s.Output(): tm.Stop() - diff := cmp.Diff(policy, subPolicy) + diff := cmp.Diff(policy, subPolicy.Policy) if diff != "" { t.Fatal(diff) } @@ -140,7 +147,7 @@ func TestMonitor_SamePolicy(t *testing.T) { }, PolicyId: policyId, CoordinatorIdx: 1, - Data: []byte("{}"), + Data: policyBytes, RevisionIdx: 1, } policyData, err := json.Marshal(&policy) @@ -160,6 +167,7 @@ func TestMonitor_SamePolicy(t *testing.T) { gotPolicy := false tm := time.NewTimer(1 * time.Second) + defer tm.Stop() select { case <-s.Output(): gotPolicy = true @@ -213,7 +221,7 @@ func TestMonitor_NewPolicyUncoordinated(t *testing.T) { }, PolicyId: policyId, CoordinatorIdx: 0, - Data: []byte("{}"), + Data: policyBytes, RevisionIdx: 2, } policyData, err := json.Marshal(&policy) @@ -233,6 +241,7 @@ func TestMonitor_NewPolicyUncoordinated(t *testing.T) { gotPolicy := false tm := time.NewTimer(1 * time.Second) + defer tm.Stop() select { case <-s.Output(): gotPolicy = true @@ -289,7 +298,7 @@ func runTestMonitor_NewPolicyExists(t *testing.T, delay time.Duration) { }, PolicyId: policyId, CoordinatorIdx: 1, - Data: []byte("{}"), + Data: policyBytes, RevisionIdx: 2, } @@ -317,7 +326,7 @@ func runTestMonitor_NewPolicyExists(t *testing.T, delay time.Duration) { select { case subPolicy := <-s.Output(): tm.Stop() - diff := cmp.Diff(policy, subPolicy) + diff := cmp.Diff(policy, subPolicy.Policy) if diff != "" { t.Fatal(diff) } diff --git a/internal/pkg/policy/parsed_policy.go b/internal/pkg/policy/parsed_policy.go new file mode 100644 index 000000000..75160eac5 --- /dev/null +++ b/internal/pkg/policy/parsed_policy.go @@ -0,0 +1,163 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package policy + +import ( + "encoding/json" + "errors" + + "github.com/elastic/fleet-server/v7/internal/pkg/model" + "github.com/elastic/fleet-server/v7/internal/pkg/smap" +) + +const ( + FieldOutputs = "outputs" + FieldOutputType = "type" + FieldOutputFleetServer = "fleet_server" + FieldOutputServiceToken = "service_token" + FieldOutputPermissions = "output_permissions" + + OutputTypeElasticsearch = "elasticsearch" +) + +var ( + ErrOutputsNotFound = errors.New("outputs not found") + ErrDefaultOutputNotFound = errors.New("default output not found") + ErrMultipleDefaultOutputsFound = errors.New("multiple default outputs found") + ErrInvalidPermissionsFormat = errors.New("invalid permissions format") +) + +type RoleT struct { + Raw []byte + Sha2 string +} + +type RoleMapT map[string]RoleT + +type ParsedPolicyDefaults struct { + Name string + Role *RoleT +} + +type ParsedPolicy struct { + Policy model.Policy + Fields map[string]json.RawMessage + Roles RoleMapT + Default ParsedPolicyDefaults +} + +func NewParsedPolicy(p model.Policy) (*ParsedPolicy, error) { + var err error + + var fields map[string]json.RawMessage + if err = json.Unmarshal(p.Data, &fields); err != nil { + return nil, err + } + + // Interpret the output permissions if available + var roles map[string]RoleT + if perms, _ := fields[FieldOutputPermissions]; len(perms) != 0 { + if roles, err = parsePerms(perms); err != nil { + return nil, err + } + } + + // Find the default role. + outputs, ok := fields[FieldOutputs] + if !ok { + return nil, ErrOutputsNotFound + } + defaultName, err := findDefaultOutputName(outputs) + if err != nil { + return nil, err + } + var roleP *RoleT + if role, ok := roles[defaultName]; ok { + roleP = &role + } + + // We are cool and the gang + pp := &ParsedPolicy{ + Policy: p, + Fields: fields, + Roles: roles, + Default: ParsedPolicyDefaults{ + Name: defaultName, + Role: roleP, + }, + } + + return pp, nil +} + +func parsePerms(permsRaw json.RawMessage) (RoleMapT, error) { + permMap, err := smap.Parse(permsRaw) + if err != nil { + return nil, err + } + + // iterate across the keys + m := make(RoleMapT, len(permMap)) + for k := range permMap { + + v := permMap.GetMap(k) + + if v != nil { + var r RoleT + + // Stable hash on permissions payload + if r.Sha2, err = v.Hash(); err != nil { + return nil, err + } + + // Re-marshal, the payload for each section + if r.Raw, err = json.Marshal(v); err != nil { + return nil, err + } + m[k] = r + } + } + + return m, nil +} + +func findDefaultOutputName(outputsRaw json.RawMessage) (string, error) { + outputsMap, err := smap.Parse(outputsRaw) + if err != nil { + return "", err + } + + // iterate across the keys finding the defaults + var defaults []string + for k := range outputsMap { + + v := outputsMap.GetMap(k) + + if v != nil { + outputType := v.GetString(FieldOutputType) + if outputType != OutputTypeElasticsearch { + continue + } + fleetServer := v.GetMap(FieldOutputFleetServer) + if fleetServer == nil { + defaults = append(defaults, k) + continue + } + serviceToken := fleetServer.GetString(FieldOutputServiceToken) + if serviceToken == "" { + defaults = append(defaults, k) + continue + } + } + } + + if len(defaults) == 0 { + return "", ErrDefaultOutputNotFound + } + if len(defaults) == 1 { + return defaults[0], nil + } + return "", ErrMultipleDefaultOutputsFound +} diff --git a/internal/pkg/policy/parsed_policy_test.go b/internal/pkg/policy/parsed_policy_test.go new file mode 100644 index 000000000..116f8948e --- /dev/null +++ b/internal/pkg/policy/parsed_policy_test.go @@ -0,0 +1,526 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package policy + +import ( + "encoding/json" + "fmt" + "github.com/elastic/fleet-server/v7/internal/pkg/model" + "testing" +) + +const testPolicy = ` +{ + "id": "63f4e6d0-9626-11eb-b486-6de1529a4151", + "revision": 33, + "outputs": { + "other": { + "type": "elasticsearch", + "hosts": [ + "https://5a8bb94bfbe0401a909e1496a9e884c2.us-central1.gcp.foundit.no:443" + ], + "fleet_server": {} + }, + "remote_not_es": { + "type": "logstash", + "hosts": [ + "https://5a8bb94bfbe0401a909e1496a9e884c2.us-central1.gcp.foundit.no:443" + ] + }, + "remote_with_token": { + "type": "elasticsearch", + "hosts": [ + "https://5a8bb94bfbe0401a909e1496a9e884c2.us-central1.gcp.foundit.no:443" + ], + "fleet_server": { + "service_token": "abc123" + } + } + }, + "output_permissions": { + "other": { + "_fallback": { + "cluster": [ + "monitor" + ], + "indices": [ + { + "names": [ + "logs-*", + "metrics-*", + "traces-*", + ".logs-endpoint.diagnostic.collection-*" + ], + "privileges": [ + "auto_configure", + "create_doc" + ] + } + ] + } + } + }, + "agent": { + "monitoring": { + "enabled": true, + "use_output": "other", + "logs": true, + "metrics": true + } + }, + "inputs": [ + { + "id": "278c54f2-f62c-4efd-b4f8-50d14c4ee337", + "name": "system-1", + "revision": 2, + "type": "logfile", + "use_output": "other", + "meta": { + "package": { + "name": "system", + "version": "0.11.2" + } + }, + "data_stream": { + "namespace": "default" + }, + "streams": [ + { + "id": "logfile-system.auth-278c54f2-f62c-4efd-b4f8-50d14c4ee337", + "data_stream": { + "dataset": "system.auth", + "type": "logs" + }, + "exclude_files": [ + ".gz$" + ], + "paths": [ + "/var/log/auth.log*", + "/var/log/secure*" + ], + "multiline": { + "pattern": "^\\s", + "match": "after" + }, + "processors": [ + { + "add_locale": null + }, + { + "add_fields": { + "fields": { + "ecs.version": "1.8.0" + }, + "target": "" + } + } + ] + }, + { + "id": "logfile-system.syslog-278c54f2-f62c-4efd-b4f8-50d14c4ee337", + "data_stream": { + "dataset": "system.syslog", + "type": "logs" + }, + "exclude_files": [ + ".gz$" + ], + "paths": [ + "/var/log/messages*", + "/var/log/syslog*" + ], + "multiline": { + "pattern": "^\\s", + "match": "after" + }, + "processors": [ + { + "add_locale": null + }, + { + "add_fields": { + "fields": { + "ecs.version": "1.5.0" + }, + "target": "" + } + } + ] + } + ] + }, + { + "id": "278c54f2-f62c-4efd-b4f8-50d14c4ee337", + "name": "system-1", + "revision": 2, + "type": "system/metrics", + "use_output": "other", + "meta": { + "package": { + "name": "system", + "version": "0.11.2" + } + }, + "data_stream": { + "namespace": "default" + }, + "streams": [ + { + "id": "system/metrics-system.cpu-278c54f2-f62c-4efd-b4f8-50d14c4ee337", + "data_stream": { + "dataset": "system.cpu", + "type": "metrics" + }, + "period": "10s", + "cpu.metrics": [ + "percentages", + "normalized_percentages" + ], + "metricsets": [ + "cpu" + ] + }, + { + "id": "system/metrics-system.diskio-278c54f2-f62c-4efd-b4f8-50d14c4ee337", + "data_stream": { + "dataset": "system.diskio", + "type": "metrics" + }, + "period": "10s", + "diskio.include_devices": null, + "metricsets": [ + "diskio" + ] + }, + { + "id": "system/metrics-system.filesystem-278c54f2-f62c-4efd-b4f8-50d14c4ee337", + "data_stream": { + "dataset": "system.filesystem", + "type": "metrics" + }, + "period": "1m", + "metricsets": [ + "filesystem" + ], + "processors": [ + { + "drop_event.when.regexp": { + "system.filesystem.mount_point": "^/(sys|cgroup|proc|dev|etc|host|lib|snap)($|/)" + } + } + ] + }, + { + "id": "system/metrics-system.fsstat-278c54f2-f62c-4efd-b4f8-50d14c4ee337", + "data_stream": { + "dataset": "system.fsstat", + "type": "metrics" + }, + "period": "1m", + "metricsets": [ + "fsstat" + ], + "processors": [ + { + "drop_event.when.regexp": { + "system.fsstat.mount_point": "^/(sys|cgroup|proc|dev|etc|host|lib|snap)($|/)" + } + } + ] + }, + { + "id": "system/metrics-system.load-278c54f2-f62c-4efd-b4f8-50d14c4ee337", + "data_stream": { + "dataset": "system.load", + "type": "metrics" + }, + "condition": "${host.platform} != 'windows'", + "period": "10s", + "metricsets": [ + "load" + ] + }, + { + "id": "system/metrics-system.memory-278c54f2-f62c-4efd-b4f8-50d14c4ee337", + "data_stream": { + "dataset": "system.memory", + "type": "metrics" + }, + "period": "10s", + "metricsets": [ + "memory" + ] + }, + { + "id": "system/metrics-system.network-278c54f2-f62c-4efd-b4f8-50d14c4ee337", + "data_stream": { + "dataset": "system.network", + "type": "metrics" + }, + "period": "10s", + "network.interfaces": null, + "metricsets": [ + "network" + ] + }, + { + "id": "system/metrics-system.process-278c54f2-f62c-4efd-b4f8-50d14c4ee337", + "data_stream": { + "dataset": "system.process", + "type": "metrics" + }, + "process.include_top_n.by_memory": 5, + "period": "10s", + "processes": [ + ".*" + ], + "process.include_top_n.by_cpu": 5, + "process.cgroups.enabled": false, + "process.cmdline.cache.enabled": true, + "metricsets": [ + "process" + ], + "process.include_cpu_ticks": false + }, + { + "id": "system/metrics-system.process_summary-278c54f2-f62c-4efd-b4f8-50d14c4ee337", + "data_stream": { + "dataset": "system.process_summary", + "type": "metrics" + }, + "period": "10s", + "metricsets": [ + "process_summary" + ] + }, + { + "id": "system/metrics-system.socket_summary-278c54f2-f62c-4efd-b4f8-50d14c4ee337", + "data_stream": { + "dataset": "system.socket_summary", + "type": "metrics" + }, + "period": "10s", + "metricsets": [ + "socket_summary" + ] + }, + { + "id": "system/metrics-system.uptime-278c54f2-f62c-4efd-b4f8-50d14c4ee337", + "data_stream": { + "dataset": "system.uptime", + "type": "metrics" + }, + "period": "10s", + "metricsets": [ + "uptime" + ] + } + ] + }, + { + "id": "74abb3e2-a041-4684-8b3d-09e0e5eacd36", + "name": "Endgame", + "revision": 28, + "type": "endpoint", + "use_output": "other", + "meta": { + "package": { + "name": "endpoint", + "version": "0.18.0" + } + }, + "data_stream": { + "namespace": "default" + }, + "artifact_manifest": { + "schema_version": "v1", + "manifest_version": "1.0.28", + "artifacts": { + "endpoint-trustlist-windows-v1": { + "relative_url": "/api/endpoint/artifacts/download/endpoint-trustlist-windows-v1/74c2255ce31e0b48ada298ed6dacf6d1be7b0fb40c1bcb251d2da66f4b060acf", + "compression_algorithm": "zlib", + "decoded_size": 338, + "decoded_sha256": "74c2255ce31e0b48ada298ed6dacf6d1be7b0fb40c1bcb251d2da66f4b060acf", + "encryption_algorithm": "none", + "encoded_sha256": "8e70ce05d25709b6bbd4fd6981e86e24e1a2f85e3f69d2733058c568830f25d2", + "encoded_size": 185 + }, + "endpoint-trustlist-macos-v1": { + "relative_url": "/api/endpoint/artifacts/download/endpoint-trustlist-macos-v1/d801aa1fb7ddcc330a5e3173372ea6af4a3d08ec58074478e85aa5603e926658", + "compression_algorithm": "zlib", + "decoded_size": 14, + "decoded_sha256": "d801aa1fb7ddcc330a5e3173372ea6af4a3d08ec58074478e85aa5603e926658", + "encryption_algorithm": "none", + "encoded_sha256": "f8e6afa1d5662f5b37f83337af774b5785b5b7f1daee08b7b00c2d6813874cda", + "encoded_size": 22 + }, + "endpoint-exceptionlist-macos-v1": { + "relative_url": "/api/endpoint/artifacts/download/endpoint-exceptionlist-macos-v1/d801aa1fb7ddcc330a5e3173372ea6af4a3d08ec58074478e85aa5603e926658", + "compression_algorithm": "zlib", + "decoded_size": 14, + "decoded_sha256": "d801aa1fb7ddcc330a5e3173372ea6af4a3d08ec58074478e85aa5603e926658", + "encryption_algorithm": "none", + "encoded_sha256": "f8e6afa1d5662f5b37f83337af774b5785b5b7f1daee08b7b00c2d6813874cda", + "encoded_size": 22 + }, + "endpoint-trustlist-linux-v1": { + "relative_url": "/api/endpoint/artifacts/download/endpoint-trustlist-linux-v1/d801aa1fb7ddcc330a5e3173372ea6af4a3d08ec58074478e85aa5603e926658", + "compression_algorithm": "zlib", + "decoded_size": 14, + "decoded_sha256": "d801aa1fb7ddcc330a5e3173372ea6af4a3d08ec58074478e85aa5603e926658", + "encryption_algorithm": "none", + "encoded_sha256": "f8e6afa1d5662f5b37f83337af774b5785b5b7f1daee08b7b00c2d6813874cda", + "encoded_size": 22 + }, + "endpoint-exceptionlist-windows-v1": { + "relative_url": "/api/endpoint/artifacts/download/endpoint-exceptionlist-windows-v1/d801aa1fb7ddcc330a5e3173372ea6af4a3d08ec58074478e85aa5603e926658", + "compression_algorithm": "zlib", + "decoded_size": 14, + "decoded_sha256": "d801aa1fb7ddcc330a5e3173372ea6af4a3d08ec58074478e85aa5603e926658", + "encryption_algorithm": "none", + "encoded_sha256": "f8e6afa1d5662f5b37f83337af774b5785b5b7f1daee08b7b00c2d6813874cda", + "encoded_size": 22 + } + } + }, + "policy": { + "linux": { + "logging": { + "file": "info" + }, + "events": { + "process": true, + "file": true, + "network": true + } + }, + "windows": { + "popup": { + "malware": { + "enabled": true, + "message": "" + }, + "ransomware": { + "enabled": true, + "message": "" + } + }, + "malware": { + "mode": "prevent" + }, + "logging": { + "file": "info" + }, + "antivirus_registration": { + "enabled": false + }, + "events": { + "registry": true, + "process": true, + "security": true, + "file": true, + "dns": true, + "dll_and_driver_load": true, + "network": true + }, + "ransomware": { + "mode": "prevent" + } + }, + "mac": { + "popup": { + "malware": { + "enabled": true, + "message": "" + } + }, + "malware": { + "mode": "prevent" + }, + "logging": { + "file": "info" + }, + "events": { + "process": true, + "file": true, + "network": true + } + } + } + } + ], + "fleet": { + "hosts": [ + "http://10.128.0.4:8220" + ] + } +} +` + +const minified = ` +{"id":"63f4e6d0-9626-11eb-b486-6de1529a4151","revision":33,"outputs":{"default":{"type":"elasticsearch","hosts":["https://5a8bb94bfbe0401a909e1496a9e884c2.us-central1.gcp.foundit.no:443"]}},"output_permissions":{"default":{"_fallback":{"cluster":["monitor"],"indices":[{"names":["logs-*","metrics-*","traces-*",".logs-endpoint.diagnostic.collection-*"],"privileges":["auto_configure","create_doc"]}]}}},"agent":{"monitoring":{"enabled":true,"use_output":"default","logs":true,"metrics":true}},"inputs":[{"id":"278c54f2-f62c-4efd-b4f8-50d14c4ee337","name":"system-1","revision":2,"type":"logfile","use_output":"default","meta":{"package":{"name":"system","version":"0.11.2"}},"data_stream":{"namespace":"default"},"streams":[{"id":"logfile-system.auth-278c54f2-f62c-4efd-b4f8-50d14c4ee337","data_stream":{"dataset":"system.auth","type":"logs"},"exclude_files":[".gz$"],"paths":["/var/log/auth.log*","/var/log/secure*"],"multiline":{"pattern":"^\\s","match":"after"},"processors":[{"add_locale":null},{"add_fields":{"fields":{"ecs.version":"1.8.0"},"target":""}}]},{"id":"logfile-system.syslog-278c54f2-f62c-4efd-b4f8-50d14c4ee337","data_stream":{"dataset":"system.syslog","type":"logs"},"exclude_files":[".gz$"],"paths":["/var/log/messages*","/var/log/syslog*"],"multiline":{"pattern":"^\\s","match":"after"},"processors":[{"add_locale":null},{"add_fields":{"fields":{"ecs.version":"1.5.0"},"target":""}}]}]},{"id":"278c54f2-f62c-4efd-b4f8-50d14c4ee337","name":"system-1","revision":2,"type":"system/metrics","use_output":"default","meta":{"package":{"name":"system","version":"0.11.2"}},"data_stream":{"namespace":"default"},"streams":[{"id":"system/metrics-system.cpu-278c54f2-f62c-4efd-b4f8-50d14c4ee337","data_stream":{"dataset":"system.cpu","type":"metrics"},"period":"10s","cpu.metrics":["percentages","normalized_percentages"],"metricsets":["cpu"]},{"id":"system/metrics-system.diskio-278c54f2-f62c-4efd-b4f8-50d14c4ee337","data_stream":{"dataset":"system.diskio","type":"metrics"},"period":"10s","diskio.include_devices":null,"metricsets":["diskio"]},{"id":"system/metrics-system.filesystem-278c54f2-f62c-4efd-b4f8-50d14c4ee337","data_stream":{"dataset":"system.filesystem","type":"metrics"},"period":"1m","metricsets":["filesystem"],"processors":[{"drop_event.when.regexp":{"system.filesystem.mount_point":"^/(sys|cgroup|proc|dev|etc|host|lib|snap)($|/)"}}]},{"id":"system/metrics-system.fsstat-278c54f2-f62c-4efd-b4f8-50d14c4ee337","data_stream":{"dataset":"system.fsstat","type":"metrics"},"period":"1m","metricsets":["fsstat"],"processors":[{"drop_event.when.regexp":{"system.fsstat.mount_point":"^/(sys|cgroup|proc|dev|etc|host|lib|snap)($|/)"}}]},{"id":"system/metrics-system.load-278c54f2-f62c-4efd-b4f8-50d14c4ee337","data_stream":{"dataset":"system.load","type":"metrics"},"condition":"${host.platform} != 'windows'","period":"10s","metricsets":["load"]},{"id":"system/metrics-system.memory-278c54f2-f62c-4efd-b4f8-50d14c4ee337","data_stream":{"dataset":"system.memory","type":"metrics"},"period":"10s","metricsets":["memory"]},{"id":"system/metrics-system.network-278c54f2-f62c-4efd-b4f8-50d14c4ee337","data_stream":{"dataset":"system.network","type":"metrics"},"period":"10s","network.interfaces":null,"metricsets":["network"]},{"id":"system/metrics-system.process-278c54f2-f62c-4efd-b4f8-50d14c4ee337","data_stream":{"dataset":"system.process","type":"metrics"},"process.include_top_n.by_memory":5,"period":"10s","processes":[".*"],"process.include_top_n.by_cpu":5,"process.cgroups.enabled":false,"process.cmdline.cache.enabled":true,"metricsets":["process"],"process.include_cpu_ticks":false},{"id":"system/metrics-system.process_summary-278c54f2-f62c-4efd-b4f8-50d14c4ee337","data_stream":{"dataset":"system.process_summary","type":"metrics"},"period":"10s","metricsets":["process_summary"]},{"id":"system/metrics-system.socket_summary-278c54f2-f62c-4efd-b4f8-50d14c4ee337","data_stream":{"dataset":"system.socket_summary","type":"metrics"},"period":"10s","metricsets":["socket_summary"]},{"id":"system/metrics-system.uptime-278c54f2-f62c-4efd-b4f8-50d14c4ee337","data_stream":{"dataset":"system.uptime","type":"metrics"},"period":"10s","metricsets":["uptime"]}]},{"id":"74abb3e2-a041-4684-8b3d-09e0e5eacd36","name":"Endgame","revision":28,"type":"endpoint","use_output":"default","meta":{"package":{"name":"endpoint","version":"0.18.0"}},"data_stream":{"namespace":"default"},"artifact_manifest":{"schema_version":"v1","manifest_version":"1.0.28","artifacts":{"endpoint-trustlist-windows-v1":{"relative_url":"/api/endpoint/artifacts/download/endpoint-trustlist-windows-v1/74c2255ce31e0b48ada298ed6dacf6d1be7b0fb40c1bcb251d2da66f4b060acf","compression_algorithm":"zlib","decoded_size":338,"decoded_sha256":"74c2255ce31e0b48ada298ed6dacf6d1be7b0fb40c1bcb251d2da66f4b060acf","encryption_algorithm":"none","encoded_sha256":"8e70ce05d25709b6bbd4fd6981e86e24e1a2f85e3f69d2733058c568830f25d2","encoded_size":185},"endpoint-trustlist-macos-v1":{"relative_url":"/api/endpoint/artifacts/download/endpoint-trustlist-macos-v1/d801aa1fb7ddcc330a5e3173372ea6af4a3d08ec58074478e85aa5603e926658","compression_algorithm":"zlib","decoded_size":14,"decoded_sha256":"d801aa1fb7ddcc330a5e3173372ea6af4a3d08ec58074478e85aa5603e926658","encryption_algorithm":"none","encoded_sha256":"f8e6afa1d5662f5b37f83337af774b5785b5b7f1daee08b7b00c2d6813874cda","encoded_size":22},"endpoint-exceptionlist-macos-v1":{"relative_url":"/api/endpoint/artifacts/download/endpoint-exceptionlist-macos-v1/d801aa1fb7ddcc330a5e3173372ea6af4a3d08ec58074478e85aa5603e926658","compression_algorithm":"zlib","decoded_size":14,"decoded_sha256":"d801aa1fb7ddcc330a5e3173372ea6af4a3d08ec58074478e85aa5603e926658","encryption_algorithm":"none","encoded_sha256":"f8e6afa1d5662f5b37f83337af774b5785b5b7f1daee08b7b00c2d6813874cda","encoded_size":22},"endpoint-trustlist-linux-v1":{"relative_url":"/api/endpoint/artifacts/download/endpoint-trustlist-linux-v1/d801aa1fb7ddcc330a5e3173372ea6af4a3d08ec58074478e85aa5603e926658","compression_algorithm":"zlib","decoded_size":14,"decoded_sha256":"d801aa1fb7ddcc330a5e3173372ea6af4a3d08ec58074478e85aa5603e926658","encryption_algorithm":"none","encoded_sha256":"f8e6afa1d5662f5b37f83337af774b5785b5b7f1daee08b7b00c2d6813874cda","encoded_size":22},"endpoint-exceptionlist-windows-v1":{"relative_url":"/api/endpoint/artifacts/download/endpoint-exceptionlist-windows-v1/d801aa1fb7ddcc330a5e3173372ea6af4a3d08ec58074478e85aa5603e926658","compression_algorithm":"zlib","decoded_size":14,"decoded_sha256":"d801aa1fb7ddcc330a5e3173372ea6af4a3d08ec58074478e85aa5603e926658","encryption_algorithm":"none","encoded_sha256":"f8e6afa1d5662f5b37f83337af774b5785b5b7f1daee08b7b00c2d6813874cda","encoded_size":22}}},"policy":{"linux":{"logging":{"file":"info"},"events":{"process":true,"file":true,"network":true}},"windows":{"popup":{"malware":{"enabled":true,"message":""},"ransomware":{"enabled":true,"message":""}},"malware":{"mode":"prevent"},"logging":{"file":"info"},"antivirus_registration":{"enabled":false},"events":{"registry":true,"process":true,"security":true,"file":true,"dns":true,"dll_and_driver_load":true,"network":true},"ransomware":{"mode":"prevent"}},"mac":{"popup":{"malware":{"enabled":true,"message":""}},"malware":{"mode":"prevent"},"logging":{"file":"info"},"events":{"process":true,"file":true,"network":true}}}}],"fleet":{"hosts":["http://10.128.0.4:8220"]}}` + +func TestNewParsedPolicy(t *testing.T) { + + // Run two formatting of the same payload to validate that the sha2 remains the same + payloads := []string{ + testPolicy, + minified, + } + + for _, payload := range payloads { + // Load the model into the policy object + var m model.Policy + if err := json.Unmarshal([]byte(payload), &m); err != nil { + t.Fatal(err) + } + + m.Data = json.RawMessage(testPolicy) + + pp, err := NewParsedPolicy(m) + if err != nil { + t.Fatal(err) + } + + fields := []string{ + "id", + "revision", + "outputs", + "output_permissions", + "agent", + "inputs", + "fleet", + } + + // Validate the fields; Expect the following top level items + if len(pp.Fields) != len(fields) { + t.Error("Expected N fields") + } + + for _, f := range fields { + if _, ok := pp.Fields[f]; !ok { + t.Error(fmt.Sprintf("Missing field %s", f)) + } + } + + // Now validate output perms hash + if len(pp.Roles) != 1 { + t.Error("Only expected one role") + } + + // Validate that default was found + if pp.Default.Name != "other" { + t.Error("other output should be identified as default") + } + if pp.Default.Role == nil { + t.Error("other output role should be identified") + } + + expectedSha2 := "d4d0840fe28ca4900129a749b56cee729562c0a88c935192c659252b5b0d762a" + if pp.Default.Role.Sha2 != expectedSha2 { + t.Fatal(fmt.Sprintf("Expected sha2: '%s', got '%s'.", expectedSha2, pp.Default.Role.Sha2)) + } + } +} diff --git a/internal/pkg/policy/revision.go b/internal/pkg/policy/revision.go index 11ff34380..506ce5462 100644 --- a/internal/pkg/policy/revision.go +++ b/internal/pkg/policy/revision.go @@ -10,8 +10,6 @@ import ( "strings" "github.com/elastic/fleet-server/v7/internal/pkg/model" - - "github.com/gofrs/uuid" ) // Revision is a policy revision that is sent as an action ID to an agent. @@ -39,9 +37,6 @@ func RevisionFromString(actionId string) (Revision, bool) { if split[0] != "policy" { return Revision{}, false } - if _, err := uuid.FromString(split[1]); err != nil { - return Revision{}, false - } revIdx, err := strconv.ParseInt(split[2], 10, 64) if err != nil { return Revision{}, false diff --git a/internal/pkg/policy/self.go b/internal/pkg/policy/self.go new file mode 100644 index 000000000..f1b87734e --- /dev/null +++ b/internal/pkg/policy/self.go @@ -0,0 +1,296 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package policy + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "sync" + "time" + + "github.com/elastic/elastic-agent-client/v7/pkg/proto" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + + "github.com/elastic/fleet-server/v7/internal/pkg/bulk" + "github.com/elastic/fleet-server/v7/internal/pkg/config" + "github.com/elastic/fleet-server/v7/internal/pkg/dl" + "github.com/elastic/fleet-server/v7/internal/pkg/es" + "github.com/elastic/fleet-server/v7/internal/pkg/model" + "github.com/elastic/fleet-server/v7/internal/pkg/monitor" + "github.com/elastic/fleet-server/v7/internal/pkg/status" +) + +// DefaultCheckTime is the default interval for self to check for its policy. +const DefaultCheckTime = 5 * time.Second + +type enrollmentTokenFetcher func(ctx context.Context, bulker bulk.Bulk, policyID string) ([]model.EnrollmentApiKey, error) + +type SelfMonitor interface { + // Run runs the monitor. + Run(ctx context.Context) error + // Status gets current status of monitor. + Status() proto.StateObserved_Status +} + +type selfMonitorT struct { + log zerolog.Logger + + mut sync.Mutex + fleet config.Fleet + bulker bulk.Bulk + monitor monitor.Monitor + + policyId string + status proto.StateObserved_Status + reporter status.Reporter + + policy *model.Policy + + policyF policyFetcher + policiesIndex string + enrollmentTokenF enrollmentTokenFetcher + checkTime time.Duration + + startCh chan struct{} +} + +// NewSelfMonitor creates the self policy monitor. +// +// Ensures that the policy that this Fleet Server attached to exists and that it +// has a Fleet Server input defined. +func NewSelfMonitor(fleet config.Fleet, bulker bulk.Bulk, monitor monitor.Monitor, policyId string, reporter status.Reporter) SelfMonitor { + return &selfMonitorT{ + log: log.With().Str("ctx", "policy self monitor").Logger(), + fleet: fleet, + bulker: bulker, + monitor: monitor, + policyId: policyId, + status: proto.StateObserved_STARTING, + reporter: reporter, + policyF: dl.QueryLatestPolicies, + policiesIndex: dl.FleetPolicies, + enrollmentTokenF: findEnrollmentAPIKeys, + checkTime: DefaultCheckTime, + startCh: make(chan struct{}), + } +} + +// Run runs the monitor. +func (m *selfMonitorT) Run(ctx context.Context) error { + s := m.monitor.Subscribe() + defer m.monitor.Unsubscribe(s) + + _, err := m.process(ctx) + if err != nil { + return err + } + + cT := time.NewTimer(m.checkTime) + defer cT.Stop() + + close(m.startCh) + +LOOP: + for { + select { + case <-ctx.Done(): + break LOOP + case <-cT.C: + status, err := m.process(ctx) + if err != nil { + return err + } + cT.Reset(m.checkTime) + if status == proto.StateObserved_HEALTHY { + // running; can stop + break LOOP + } + case hits := <-s.Output(): + policies := make([]model.Policy, len(hits)) + for i, hit := range hits { + err := hit.Unmarshal(&policies[i]) + if err != nil { + return err + } + } + status, err := m.processPolicies(ctx, policies) + if err != nil { + return err + } + if status == proto.StateObserved_HEALTHY { + // running; can stop + break LOOP + } + } + } + + return nil +} + +func (m *selfMonitorT) Status() proto.StateObserved_Status { + m.mut.Lock() + defer m.mut.Unlock() + return m.status +} + +func (m *selfMonitorT) waitStart(ctx context.Context) (err error) { + select { + case <-ctx.Done(): + err = ctx.Err() + case <-m.startCh: + } + return +} + +func (m *selfMonitorT) process(ctx context.Context) (proto.StateObserved_Status, error) { + policies, err := m.policyF(ctx, m.bulker, dl.WithIndexName(m.policiesIndex)) + if err != nil { + if !errors.Is(err, es.ErrIndexNotFound) { + return proto.StateObserved_FAILED, nil + } + m.log.Debug().Str("index", m.policiesIndex).Msg(es.ErrIndexNotFound.Error()) + } + if len(policies) == 0 { + return m.updateStatus(ctx) + } + return m.processPolicies(ctx, policies) +} + +func (m *selfMonitorT) processPolicies(ctx context.Context, policies []model.Policy) (proto.StateObserved_Status, error) { + if len(policies) == 0 { + // nothing to do + return proto.StateObserved_STARTING, nil + } + latest := m.groupByLatest(policies) + for _, policy := range latest { + if m.policyId != "" && policy.PolicyId == m.policyId { + m.policy = &policy + break + } else if m.policyId == "" && policy.DefaultFleetServer { + m.policy = &policy + break + } + } + return m.updateStatus(ctx) +} + +func (m *selfMonitorT) groupByLatest(policies []model.Policy) map[string]model.Policy { + latest := make(map[string]model.Policy) + for _, policy := range policies { + curr, ok := latest[policy.PolicyId] + if !ok { + latest[policy.PolicyId] = policy + continue + } + if policy.RevisionIdx > curr.RevisionIdx { + latest[policy.PolicyId] = policy + continue + } else if policy.RevisionIdx == curr.RevisionIdx && policy.CoordinatorIdx > curr.CoordinatorIdx { + latest[policy.PolicyId] = policy + } + } + return latest +} + +func (m *selfMonitorT) updateStatus(ctx context.Context) (proto.StateObserved_Status, error) { + m.mut.Lock() + defer m.mut.Unlock() + + if m.policy == nil { + // no policy found + m.status = proto.StateObserved_STARTING + if m.policyId == "" { + m.reporter.Status(proto.StateObserved_STARTING, "Waiting on default policy with Fleet Server integration", nil) + } else { + m.reporter.Status(proto.StateObserved_STARTING, fmt.Sprintf("Waiting on policy with Fleet Server integration: %s", m.policyId), nil) + } + return proto.StateObserved_STARTING, nil + } + + var data policyData + err := json.Unmarshal(m.policy.Data, &data) + if err != nil { + return proto.StateObserved_FAILED, err + } + if !data.HasType("fleet-server") { + // no fleet-server input + m.status = proto.StateObserved_STARTING + if m.policyId == "" { + m.reporter.Status(proto.StateObserved_STARTING, "Waiting on fleet-server input to be added to default policy", nil) + } else { + m.reporter.Status(proto.StateObserved_STARTING, fmt.Sprintf("Waiting on fleet-server input to be added to policy: %s", m.policyId), nil) + } + return proto.StateObserved_STARTING, nil + } + + status := proto.StateObserved_HEALTHY + extendMsg := "" + var payload map[string]interface{} + if m.fleet.Agent.ID == "" { + status = proto.StateObserved_DEGRADED + extendMsg = "; missing config fleet.agent.id (expected during bootstrap process)" + + // Elastic Agent has not been enrolled; Fleet Server passes back the enrollment token so the Elastic Agent + // can perform enrollment. + tokens, err := m.enrollmentTokenF(ctx, m.bulker, m.policy.PolicyId) + if err != nil { + return proto.StateObserved_FAILED, err + } + tokens = filterActiveTokens(tokens) + if len(tokens) == 0 { + // no tokens created for the policy, still starting + if m.policyId == "" { + m.reporter.Status(proto.StateObserved_STARTING, "Waiting on active enrollment keys to be created in default policy with Fleet Server integration", nil) + } else { + m.reporter.Status(proto.StateObserved_STARTING, fmt.Sprintf("Waiting on active enrollment keys to be created in policy with Fleet Server integration: %s", m.policyId), nil) + } + return proto.StateObserved_STARTING, nil + } + payload = map[string]interface{}{ + "enrollment_token": tokens[0].ApiKey, + } + } + m.status = status + if m.policyId == "" { + m.reporter.Status(status, fmt.Sprintf("Running on default policy with Fleet Server integration%s", extendMsg), payload) + } else { + m.reporter.Status(status, fmt.Sprintf("Running on policy with Fleet Server integration: %s%s", m.policyId, extendMsg), payload) + } + return status, nil +} + +type policyData struct { + Inputs []policyInput `json:"inputs"` +} + +type policyInput struct { + Type string `json:"type"` +} + +func (d *policyData) HasType(val string) bool { + for _, input := range d.Inputs { + if input.Type == val { + return true + } + } + return false +} + +func findEnrollmentAPIKeys(ctx context.Context, bulker bulk.Bulk, policyID string) ([]model.EnrollmentApiKey, error) { + return dl.FindEnrollmentAPIKeys(ctx, bulker, dl.QueryEnrollmentAPIKeyByPolicyID, dl.FieldPolicyId, policyID) +} + +func filterActiveTokens(tokens []model.EnrollmentApiKey) []model.EnrollmentApiKey { + active := make([]model.EnrollmentApiKey, 0, len(tokens)) + for _, t := range tokens { + if t.Active { + active = append(active, t) + } + } + return active +} diff --git a/internal/pkg/policy/self_test.go b/internal/pkg/policy/self_test.go new file mode 100644 index 000000000..38dcc7b4a --- /dev/null +++ b/internal/pkg/policy/self_test.go @@ -0,0 +1,671 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build !integration +// +build !integration + +package policy + +import ( + "context" + "encoding/json" + "fmt" + "github.com/elastic/fleet-server/v7/internal/pkg/config" + "sync" + "testing" + "time" + + "github.com/elastic/elastic-agent-client/v7/pkg/proto" + "github.com/gofrs/uuid" + "github.com/rs/xid" + + "github.com/elastic/fleet-server/v7/internal/pkg/bulk" + "github.com/elastic/fleet-server/v7/internal/pkg/dl" + "github.com/elastic/fleet-server/v7/internal/pkg/es" + "github.com/elastic/fleet-server/v7/internal/pkg/model" + "github.com/elastic/fleet-server/v7/internal/pkg/monitor/mock" + ftesting "github.com/elastic/fleet-server/v7/internal/pkg/testing" +) + +func TestSelfMonitor_DefaultPolicy(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg := config.Fleet{ + Agent: config.Agent{ + ID: "agent-id", + }, + } + reporter := &FakeReporter{} + bulker := ftesting.MockBulk{} + mm := mock.NewMockIndexMonitor() + monitor := NewSelfMonitor(cfg, bulker, mm, "", reporter) + sm := monitor.(*selfMonitorT) + sm.policyF = func(ctx context.Context, bulker bulk.Bulk, opt ...dl.Option) ([]model.Policy, error) { + return []model.Policy{}, nil + } + + var merr error + var mwg sync.WaitGroup + mwg.Add(1) + go func() { + defer mwg.Done() + merr = monitor.Run(ctx) + }() + + if err := monitor.(*selfMonitorT).waitStart(ctx); err != nil { + t.Fatal(err) + } + + // should be set to starting + ftesting.Retry(t, ctx, func(ctx context.Context) error { + status, msg, _ := reporter.Current() + if status != proto.StateObserved_STARTING { + return fmt.Errorf("should be reported as starting; instead its %s", status) + } + if msg != "Waiting on default policy with Fleet Server integration" { + return fmt.Errorf("should be matching with default policy") + } + return nil + }, ftesting.RetrySleep(1*time.Second)) + + policyId := uuid.Must(uuid.NewV4()).String() + rId := xid.New().String() + policyContents, err := json.Marshal(&policyData{Inputs: []policyInput{}}) + if err != nil { + t.Fatal(err) + } + policy := model.Policy{ + ESDocument: model.ESDocument{ + Id: rId, + Version: 1, + SeqNo: 1, + }, + PolicyId: policyId, + CoordinatorIdx: 1, + Data: policyContents, + RevisionIdx: 1, + DefaultFleetServer: true, + } + pData, err := json.Marshal(&policy) + if err != nil { + t.Fatal(err) + } + go func() { + mm.Notify(ctx, []es.HitT{ + { + Id: rId, + SeqNo: 1, + Version: 1, + Source: pData, + }, + }) + }() + + // should still be set to starting + ftesting.Retry(t, ctx, func(ctx context.Context) error { + status, msg, _ := reporter.Current() + if status != proto.StateObserved_STARTING { + return fmt.Errorf("should be reported as starting; instead its %s", status) + } + if msg != "Waiting on fleet-server input to be added to default policy" { + return fmt.Errorf("should be matching with default policy") + } + return nil + }) + + rId = xid.New().String() + policyContents, err = json.Marshal(&policyData{Inputs: []policyInput{ + { + Type: "fleet-server", + }, + }}) + if err != nil { + t.Fatal(err) + } + policy = model.Policy{ + ESDocument: model.ESDocument{ + Id: rId, + Version: 1, + SeqNo: 1, + }, + PolicyId: policyId, + CoordinatorIdx: 1, + Data: policyContents, + RevisionIdx: 2, + DefaultFleetServer: true, + } + pData, err = json.Marshal(&policy) + if err != nil { + t.Fatal(err) + } + go func() { + mm.Notify(ctx, []es.HitT{ + { + Id: rId, + SeqNo: 2, + Version: 1, + Source: pData, + }, + }) + }() + + // should now be set to healthy + ftesting.Retry(t, ctx, func(ctx context.Context) error { + status, msg, _ := reporter.Current() + if status != proto.StateObserved_HEALTHY { + return fmt.Errorf("should be reported as healthy; instead its %s", status) + } + if msg != "Running on default policy with Fleet Server integration" { + return fmt.Errorf("should be matching with default policy") + } + return nil + }) + + cancel() + mwg.Wait() + if merr != nil && merr != context.Canceled { + t.Fatal(merr) + } +} + +func TestSelfMonitor_DefaultPolicy_Degraded(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg := config.Fleet{ + Agent: config.Agent{ + ID: "", + }, + } + reporter := &FakeReporter{} + bulker := ftesting.MockBulk{} + mm := mock.NewMockIndexMonitor() + monitor := NewSelfMonitor(cfg, bulker, mm, "", reporter) + sm := monitor.(*selfMonitorT) + sm.checkTime = 100 * time.Millisecond + + var policyLock sync.Mutex + var policyResult []model.Policy + sm.policyF = func(ctx context.Context, bulker bulk.Bulk, opt ...dl.Option) ([]model.Policy, error) { + policyLock.Lock() + defer policyLock.Unlock() + return policyResult, nil + } + + var tokenLock sync.Mutex + var tokenResult []model.EnrollmentApiKey + sm.enrollmentTokenF = func(ctx context.Context, bulker bulk.Bulk, policyID string) ([]model.EnrollmentApiKey, error) { + tokenLock.Lock() + defer tokenLock.Unlock() + return tokenResult, nil + } + + var merr error + var mwg sync.WaitGroup + mwg.Add(1) + go func() { + defer mwg.Done() + merr = monitor.Run(ctx) + }() + + if err := monitor.(*selfMonitorT).waitStart(ctx); err != nil { + t.Fatal(err) + } + + // should be set to starting + ftesting.Retry(t, ctx, func(ctx context.Context) error { + status, msg, _ := reporter.Current() + if status != proto.StateObserved_STARTING { + return fmt.Errorf("should be reported as starting; instead its %s", status) + } + if msg != "Waiting on default policy with Fleet Server integration" { + return fmt.Errorf("should be matching with default policy") + } + return nil + }, ftesting.RetrySleep(1*time.Second)) + + policyId := uuid.Must(uuid.NewV4()).String() + rId := xid.New().String() + policyContents, err := json.Marshal(&policyData{Inputs: []policyInput{ + { + Type: "fleet-server", + }, + }}) + if err != nil { + t.Fatal(err) + } + policy := model.Policy{ + ESDocument: model.ESDocument{ + Id: rId, + Version: 1, + SeqNo: 1, + }, + PolicyId: policyId, + CoordinatorIdx: 1, + Data: policyContents, + RevisionIdx: 1, + DefaultFleetServer: true, + } + policyData, err := json.Marshal(&policy) + if err != nil { + t.Fatal(err) + } + + // add inactive token that should be filtered out + inactiveToken := model.EnrollmentApiKey{ + ESDocument: model.ESDocument{ + Id: xid.New().String(), + }, + Active: false, + ApiKey: "d2JndlFIWUJJUVVxWDVia2NJTV86X0d6ZmljZGNTc1d4R1otbklrZFFRZw==", + ApiKeyId: xid.New().String(), + Name: "Inactive", + PolicyId: policyId, + } + tokenLock.Lock() + tokenResult = append(tokenResult, inactiveToken) + tokenLock.Unlock() + + go func() { + mm.Notify(ctx, []es.HitT{ + { + Id: rId, + SeqNo: 1, + Version: 1, + Source: policyData, + }, + }) + policyLock.Lock() + defer policyLock.Unlock() + policyResult = append(policyResult, policy) + }() + + // should be set to starting because of missing active enrollment keys + ftesting.Retry(t, ctx, func(ctx context.Context) error { + status, msg, _ := reporter.Current() + if status != proto.StateObserved_STARTING { + return fmt.Errorf("should be reported as starting; instead its %s", status) + } + if msg != "Waiting on active enrollment keys to be created in default policy with Fleet Server integration" { + return fmt.Errorf("should be matching with default policy") + } + return nil + }, ftesting.RetrySleep(1*time.Second)) + + // add an active token + activeToken := model.EnrollmentApiKey{ + ESDocument: model.ESDocument{ + Id: xid.New().String(), + }, + Active: true, + ApiKey: "d2JndlFIWUJJUVVxWDVia2NJTV86X0d6ZmljZGNTc1d4R1otbklrZFFRZw==", + ApiKeyId: xid.New().String(), + Name: "Active", + PolicyId: policyId, + } + tokenLock.Lock() + tokenResult = append(tokenResult, activeToken) + tokenLock.Unlock() + + // should now be set to degraded + ftesting.Retry(t, ctx, func(ctx context.Context) error { + status, msg, payload := reporter.Current() + if status != proto.StateObserved_DEGRADED { + return fmt.Errorf("should be reported as degraded; instead its %s", status) + } + if msg != "Running on default policy with Fleet Server integration; missing config fleet.agent.id (expected during bootstrap process)" { + return fmt.Errorf("should be matching with default policy") + } + if payload == nil { + return fmt.Errorf("payload should not be nil") + } + token, set := payload["enrollment_token"] + if !set { + return fmt.Errorf("payload should have enrollment-token set") + } + if token != activeToken.ApiKey { + return fmt.Errorf("enrollment_token value is incorrect") + } + return nil + }) + + cancel() + mwg.Wait() + if merr != nil && merr != context.Canceled { + t.Fatal(merr) + } +} + +func TestSelfMonitor_SpecificPolicy(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg := config.Fleet{ + Agent: config.Agent{ + ID: "agent-id", + }, + } + policyId := uuid.Must(uuid.NewV4()).String() + reporter := &FakeReporter{} + bulker := ftesting.MockBulk{} + mm := mock.NewMockIndexMonitor() + monitor := NewSelfMonitor(cfg, bulker, mm, policyId, reporter) + sm := monitor.(*selfMonitorT) + sm.policyF = func(ctx context.Context, bulker bulk.Bulk, opt ...dl.Option) ([]model.Policy, error) { + return []model.Policy{}, nil + } + + var merr error + var mwg sync.WaitGroup + mwg.Add(1) + go func() { + defer mwg.Done() + merr = monitor.Run(ctx) + }() + + if err := monitor.(*selfMonitorT).waitStart(ctx); err != nil { + t.Fatal(err) + } + + // should be set to starting + ftesting.Retry(t, ctx, func(ctx context.Context) error { + status, msg, _ := reporter.Current() + if status != proto.StateObserved_STARTING { + return fmt.Errorf("should be reported as starting; instead its %s", status) + } + if msg != fmt.Sprintf("Waiting on policy with Fleet Server integration: %s", policyId) { + return fmt.Errorf("should be matching with specific policy") + } + return nil + }, ftesting.RetrySleep(1*time.Second)) + + rId := xid.New().String() + policyContents, err := json.Marshal(&policyData{Inputs: []policyInput{}}) + if err != nil { + t.Fatal(err) + } + policy := model.Policy{ + ESDocument: model.ESDocument{ + Id: rId, + Version: 1, + SeqNo: 1, + }, + PolicyId: policyId, + CoordinatorIdx: 1, + Data: policyContents, + RevisionIdx: 2, + DefaultFleetServer: true, + } + pData, err := json.Marshal(&policy) + if err != nil { + t.Fatal(err) + } + go func() { + mm.Notify(ctx, []es.HitT{ + { + Id: rId, + SeqNo: 1, + Version: 1, + Source: pData, + }, + }) + }() + + // should still be set to starting + ftesting.Retry(t, ctx, func(ctx context.Context) error { + status, msg, _ := reporter.Current() + if status != proto.StateObserved_STARTING { + return fmt.Errorf("should be reported as starting; instead its %s", status) + } + if msg != fmt.Sprintf("Waiting on fleet-server input to be added to policy: %s", policyId) { + return fmt.Errorf("should be matching with specific policy") + } + return nil + }, ftesting.RetrySleep(1*time.Second)) + + rId = xid.New().String() + policyContents, err = json.Marshal(&policyData{Inputs: []policyInput{ + { + Type: "fleet-server", + }, + }}) + if err != nil { + t.Fatal(err) + } + policy = model.Policy{ + ESDocument: model.ESDocument{ + Id: rId, + Version: 1, + SeqNo: 2, + }, + PolicyId: policyId, + CoordinatorIdx: 1, + Data: policyContents, + RevisionIdx: 1, + DefaultFleetServer: true, + } + pData, err = json.Marshal(&policy) + if err != nil { + t.Fatal(err) + } + go func() { + mm.Notify(ctx, []es.HitT{ + { + Id: rId, + SeqNo: 2, + Version: 1, + Source: pData, + }, + }) + }() + + // should now be set to healthy + ftesting.Retry(t, ctx, func(ctx context.Context) error { + status, msg, _ := reporter.Current() + if status != proto.StateObserved_HEALTHY { + return fmt.Errorf("should be reported as healthy; instead its %s", status) + } + if msg != fmt.Sprintf("Running on policy with Fleet Server integration: %s", policyId) { + return fmt.Errorf("should be matching with specific policy") + } + return nil + }) + + cancel() + mwg.Wait() + if merr != nil && merr != context.Canceled { + t.Fatal(merr) + } +} + +func TestSelfMonitor_SpecificPolicy_Degraded(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg := config.Fleet{ + Agent: config.Agent{ + ID: "", + }, + } + policyId := uuid.Must(uuid.NewV4()).String() + reporter := &FakeReporter{} + bulker := ftesting.MockBulk{} + mm := mock.NewMockIndexMonitor() + monitor := NewSelfMonitor(cfg, bulker, mm, policyId, reporter) + sm := monitor.(*selfMonitorT) + sm.checkTime = 100 * time.Millisecond + + var policyLock sync.Mutex + var policyResult []model.Policy + sm.policyF = func(ctx context.Context, bulker bulk.Bulk, opt ...dl.Option) ([]model.Policy, error) { + policyLock.Lock() + defer policyLock.Unlock() + return policyResult, nil + } + + var tokenLock sync.Mutex + var tokenResult []model.EnrollmentApiKey + sm.enrollmentTokenF = func(ctx context.Context, bulker bulk.Bulk, policyID string) ([]model.EnrollmentApiKey, error) { + tokenLock.Lock() + defer tokenLock.Unlock() + return tokenResult, nil + } + + var merr error + var mwg sync.WaitGroup + mwg.Add(1) + go func() { + defer mwg.Done() + merr = monitor.Run(ctx) + }() + + if err := monitor.(*selfMonitorT).waitStart(ctx); err != nil { + t.Fatal(err) + } + + // should be set to starting + ftesting.Retry(t, ctx, func(ctx context.Context) error { + status, msg, _ := reporter.Current() + if status != proto.StateObserved_STARTING { + return fmt.Errorf("should be reported as starting; instead its %s", status) + } + if msg != fmt.Sprintf("Waiting on policy with Fleet Server integration: %s", policyId) { + return fmt.Errorf("should be matching with specific policy") + } + return nil + }, ftesting.RetrySleep(1*time.Second)) + + rId := xid.New().String() + policyContents, err := json.Marshal(&policyData{Inputs: []policyInput{ + { + Type: "fleet-server", + }, + }}) + if err != nil { + t.Fatal(err) + } + policy := model.Policy{ + ESDocument: model.ESDocument{ + Id: rId, + Version: 1, + SeqNo: 1, + }, + PolicyId: policyId, + CoordinatorIdx: 1, + Data: policyContents, + RevisionIdx: 1, + DefaultFleetServer: true, + } + policyData, err := json.Marshal(&policy) + if err != nil { + t.Fatal(err) + } + + // add inactive token that should be filtered out + inactiveToken := model.EnrollmentApiKey{ + ESDocument: model.ESDocument{ + Id: xid.New().String(), + }, + Active: false, + ApiKey: "d2JndlFIWUJJUVVxWDVia2NJTV86X0d6ZmljZGNTc1d4R1otbklrZFFRZw==", + ApiKeyId: xid.New().String(), + Name: "Inactive", + PolicyId: policyId, + } + tokenLock.Lock() + tokenResult = append(tokenResult, inactiveToken) + tokenLock.Unlock() + + go func() { + mm.Notify(ctx, []es.HitT{ + { + Id: rId, + SeqNo: 1, + Version: 1, + Source: policyData, + }, + }) + policyLock.Lock() + defer policyLock.Unlock() + policyResult = append(policyResult, policy) + }() + + // should be set to starting because of missing active enrollment keys + ftesting.Retry(t, ctx, func(ctx context.Context) error { + status, msg, _ := reporter.Current() + if status != proto.StateObserved_STARTING { + return fmt.Errorf("should be reported as starting; instead its %s", status) + } + if msg != fmt.Sprintf("Waiting on active enrollment keys to be created in policy with Fleet Server integration: %s", policyId) { + return fmt.Errorf("should be matching with specific policy") + } + return nil + }, ftesting.RetrySleep(1*time.Second)) + + // add an active token + activeToken := model.EnrollmentApiKey{ + ESDocument: model.ESDocument{ + Id: xid.New().String(), + }, + Active: true, + ApiKey: "d2JndlFIWUJJUVVxWDVia2NJTV86X0d6ZmljZGNTc1d4R1otbklrZFFRZw==", + ApiKeyId: xid.New().String(), + Name: "Active", + PolicyId: policyId, + } + tokenLock.Lock() + tokenResult = append(tokenResult, activeToken) + tokenLock.Unlock() + + // should now be set to degraded + ftesting.Retry(t, ctx, func(ctx context.Context) error { + status, msg, payload := reporter.Current() + if status != proto.StateObserved_DEGRADED { + return fmt.Errorf("should be reported as degraded; instead its %s", status) + } + if msg != fmt.Sprintf("Running on policy with Fleet Server integration: %s; missing config fleet.agent.id (expected during bootstrap process)", policyId) { + return fmt.Errorf("should be matching with specific policy") + } + if payload == nil { + return fmt.Errorf("payload should not be nil") + } + token, set := payload["enrollment_token"] + if !set { + return fmt.Errorf("payload should have enrollment-token set") + } + if token != activeToken.ApiKey { + return fmt.Errorf("enrollment_token value is incorrect") + } + return nil + }) + + cancel() + mwg.Wait() + if merr != nil && merr != context.Canceled { + t.Fatal(merr) + } +} + +type FakeReporter struct { + lock sync.Mutex + status proto.StateObserved_Status + msg string + payload map[string]interface{} +} + +func (r *FakeReporter) Status(status proto.StateObserved_Status, message string, payload map[string]interface{}) error { + r.lock.Lock() + defer r.lock.Unlock() + r.status = status + r.msg = message + r.payload = payload + return nil +} + +func (r *FakeReporter) Current() (proto.StateObserved_Status, string, map[string]interface{}) { + r.lock.Lock() + defer r.lock.Unlock() + return r.status, r.msg, r.payload +} diff --git a/internal/pkg/policy/sub.go b/internal/pkg/policy/sub.go new file mode 100644 index 000000000..65f37188d --- /dev/null +++ b/internal/pkg/policy/sub.go @@ -0,0 +1,117 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package policy + +import ( + "github.com/elastic/fleet-server/v7/internal/pkg/model" +) + +type subT struct { + policyId string + agentId string // not logically necessary; cached for logging + revIdx int64 + coordIdx int64 + + next *subT + prev *subT + + ch chan *ParsedPolicy +} + +func NewSub(policyId, agentId string, revIdx, coordIdx int64) *subT { + return &subT{ + policyId: policyId, + agentId: agentId, + revIdx: revIdx, + coordIdx: coordIdx, + ch: make(chan *ParsedPolicy, 1), + } +} + +func makeHead() *subT { + sub := &subT{} + sub.next = sub + sub.prev = sub + return sub +} + +func (n *subT) pushFront(nn *subT) { + nn.next = n.next + nn.prev = n + n.next.prev = nn + n.next = nn +} + +func (n *subT) pushBack(nn *subT) { + nn.next = n + nn.prev = n.prev + n.prev.next = nn + n.prev = nn +} + +func (n *subT) popFront() *subT { + if n.next == n { + return nil + } + s := n.next + s.unlink() + return s +} + +func (n *subT) unlink() bool { + if n.next == nil || n.prev == nil { + return false + } + + n.prev.next = n.next + n.next.prev = n.prev + n.next = nil + n.prev = nil + return true +} + +func (n *subT) isEmpty() bool { + return n.next == n +} + +func (s *subT) isUpdate(policy *model.Policy) bool { + + pRevIdx := policy.RevisionIdx + pCoordIdx := policy.CoordinatorIdx + + return (pRevIdx > s.revIdx && pCoordIdx > 0) || (pRevIdx == s.revIdx && pCoordIdx > s.coordIdx) +} + +// Output returns a new policy that needs to be sent based on the current subscription. +func (sub *subT) Output() <-chan *ParsedPolicy { + return sub.ch +} + +type subIterT struct { + head *subT + cur *subT +} + +func NewIterator(head *subT) *subIterT { + return &subIterT{ + head: head, + cur: head, + } +} + +func (it *subIterT) Next() *subT { + next := it.cur.next + if next == it.head { + return nil + } + it.cur = next + return next +} + +func (it *subIterT) Unlink() { + prev := it.cur.prev + it.cur.unlink() + it.cur = prev +} diff --git a/internal/pkg/policy/sub_test.go b/internal/pkg/policy/sub_test.go new file mode 100644 index 000000000..770225454 --- /dev/null +++ b/internal/pkg/policy/sub_test.go @@ -0,0 +1,242 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +//go:build !integration +// +build !integration + +package policy + +import ( + "fmt" + "math/rand" + "testing" +) + +// Base case, should be empty +func TestSub_Empty(t *testing.T) { + + head := makeHead() + + if !head.isEmpty() { + t.Error("Expected empty list with only head") + } +} + +// Iteratively pushBack n items up to N. +// Validate order on popFront. +func TestSub_PushBackN(t *testing.T) { + + head := makeHead() + + N := 32 + + for n := 1; n <= N; n++ { + + nodes := make([]*subT, 0, n) + for i := 0; i < n; i++ { + name := fmt.Sprintf("policy%d", i) + nn := NewSub(name, "", 0, 0) + head.pushBack(nn) + nodes = append(nodes, nn) + } + + if head.isEmpty() { + t.Error("head should not be empty after push") + } + + // Validate all there + j := 0 + iter := NewIterator(head) + for sub := iter.Next(); sub != nil; sub = iter.Next() { + if sub.policyId != nodes[j].policyId { + t.Error(j, ": misaligned unlink", sub.policyId, nodes[j].policyId) + } + j = j + 1 + } + + for i := 0; i < n; i++ { + + sub := head.popFront() + if sub.policyId != nodes[i].policyId { + t.Error("misalign on popFront") + } + } + + if !head.isEmpty() { + t.Error("Expect empty list after popFront") + } + + } +} + +// Iteratively pushFront n items up to N. +// Validate order on popFront. +func TestSub_PushFrontN(t *testing.T) { + + head := makeHead() + + N := 32 + + for n := 1; n <= N; n++ { + + nodes := make([]*subT, 0, n) + for i := 0; i < n; i++ { + name := fmt.Sprintf("policy%d", i) + nn := NewSub(name, "", 0, 0) + head.pushFront(nn) + nodes = append(nodes, nn) + } + + if head.isEmpty() { + t.Error("head should not be empty after push") + } + + // Validate all there + j := n - 1 + iter := NewIterator(head) + for sub := iter.Next(); sub != nil; sub = iter.Next() { + if sub.policyId != nodes[j].policyId { + t.Error(j, ": misaligned unlink", sub.policyId, nodes[j].policyId) + } + j = j - 1 + } + + for i := 0; i < n; i++ { + + sub := head.popFront() + if sub.policyId != nodes[n-i-1].policyId { + t.Error("misalign on popFront") + } + } + + if !head.isEmpty() { + t.Error("Expect empty list after popFront") + } + + } +} + +// Push either to front or back randomly. Validate order. +func TestSub_PushRandom(t *testing.T) { + + head := makeHead() + + N := rand.Intn(4096) + 1 + + nodes := make([]*subT, 0, N) + for i := 0; i < N; i++ { + name := fmt.Sprintf("policy%d", i) + nn := NewSub(name, "", 0, 0) + + if rand.Intn(2) == 1 { + head.pushBack(nn) + nodes = append(nodes, nn) + } else { + head.pushFront(nn) + nodes = append([]*subT{nn}, nodes...) + } + } + + if head.isEmpty() { + t.Error("head should not be empty after push") + } + + j := 0 + iter := NewIterator(head) + for sub := iter.Next(); sub != nil; sub = iter.Next() { + if sub.policyId != nodes[j].policyId { + t.Error(j, ": misaligned unlink", sub.policyId, nodes[j].policyId) + } + j = j + 1 + } +} + +// Generate N nodes. Unlink randomly. +// Validate order on each unlink. +func TestSub_UnlinkRandomN(t *testing.T) { + + head := makeHead() + + N := rand.Intn(4096) + 1 + + nodes := make([]*subT, 0, N) + for i := 0; i < N; i++ { + name := fmt.Sprintf("policy%d", i) + nn := NewSub(name, "", 0, 0) + head.pushBack(nn) + nodes = append(nodes, nn) + } + + if head.isEmpty() { + t.Error("head should not be empty after push") + } + + for i := 0; i < N; i++ { + idx := rand.Intn(len(nodes)) + sub := nodes[idx] + sub.unlink() + nodes = append(nodes[:idx], nodes[idx+1:]...) + + j := 0 + iter := NewIterator(head) + for sub = iter.Next(); sub != nil; sub = iter.Next() { + if sub.policyId != nodes[j].policyId { + t.Error(j, ": misaligned unlink", sub.policyId, nodes[j].policyId) + } + j = j + 1 + } + } + + if !head.isEmpty() { + t.Error("head should be empty") + } +} + +func BenchmarkSubsSimple(b *testing.B) { + + head := makeHead() + nn := NewSub("", "", 0, 0) + for i := 0; i < b.N; i++ { + head.pushBack(nn) + head.popFront() + } +} + +func BenchmarkSubs(b *testing.B) { + benchmarks := []int{ + 32, + 1024, + 2048, + 65536, + 131072, + 524288, + } + + max := benchmarks[len(benchmarks)-1] + + head := makeHead() + subs := make([]*subT, 0, max) + + for i := 0; i < max; i++ { + name := fmt.Sprintf("policy%d", i) + nn := NewSub(name, "", 0, 0) + subs = append(subs, nn) + } + + for _, bm := range benchmarks { + b.Run(fmt.Sprintf("%d", bm), func(b *testing.B) { + + for i := 0; i < b.N; i++ { + for j := 0; j < bm; j++ { + head.pushBack(subs[j]) + } + + for j := 0; j < bm; j++ { + subs[j].unlink() + } + } + + }) + } +} diff --git a/internal/pkg/rate/rate.go b/internal/pkg/rate/rate.go deleted file mode 100644 index 0b2d45cc4..000000000 --- a/internal/pkg/rate/rate.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package rate - -// Listener limited by leaky bucket. -// TODO: Not enamored with this. More complicated than necessary. - -import ( - "context" - "net" - "time" - - xr "golang.org/x/time/rate" -) - -type rateListener struct { - net.Listener - lim *xr.Limiter - - ctx context.Context - cancelF context.CancelFunc -} - -func NewRateListener(ctx context.Context, l net.Listener, burst int, interval time.Duration) net.Listener { - - ctx, cfunc := context.WithCancel(ctx) - - return &rateListener{ - Listener: l, - lim: xr.NewLimiter(xr.Every(interval), burst), - ctx: ctx, - cancelF: cfunc, - } -} - -func (r *rateListener) Accept() (net.Conn, error) { - if err := r.lim.Wait(r.ctx); err != nil { - return nil, err - } - - return r.Listener.Accept() -} - -func (r *rateListener) Close() error { - r.cancelF() - return r.Listener.Close() -} diff --git a/internal/pkg/saved/crud.go b/internal/pkg/saved/crud.go deleted file mode 100644 index a3fd2aabf..000000000 --- a/internal/pkg/saved/crud.go +++ /dev/null @@ -1,338 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package saved - -import ( - "context" - "encoding/json" - "time" - - "github.com/elastic/fleet-server/v7/internal/pkg/bulk" - "github.com/elastic/fleet-server/v7/internal/pkg/dsl" - - "github.com/elastic/go-elasticsearch/v8" - "github.com/rs/zerolog/log" -) - -const ( - kIndexKibana = ".kibana*" - kMigrationVersion = "7.9.0" // TODO: bring in during build -) - -type Hit struct { - Id string - Type string - Space string - References []string - UpdatedAt string - Data json.RawMessage -} - -type UpdateT struct { - Id string - Type string - Fields map[string]interface{} -} - -type CRUD interface { - Create(ctx context.Context, ty string, src interface{}, opts ...Option) (id string, err error) - Read(ctx context.Context, ty, id string, dst interface{}, opts ...Option) error - - // AAD or Encrypted fields not supported; you will break your saved object; don't do that. - Update(ctx context.Context, ty, id string, fields map[string]interface{}, opts ...Option) error - MUpdate(ctx context.Context, updates []UpdateT, opts ...Option) error - - FindByField(ctx context.Context, ty string, fields map[string]interface{}) ([]Hit, error) - FindByNode(ctx context.Context, node *dsl.Node) ([]Hit, error) - FindRaw(ctx context.Context, json []byte) ([]Hit, error) - Decode(hit Hit, dst interface{}) error - - Client() *elasticsearch.Client -} - -type mgr struct { - idx bulk.Bulk - key string -} - -func NewMgr(idx bulk.Bulk, key string) CRUD { - return &mgr{idx, key} -} - -func (m *mgr) Client() *elasticsearch.Client { - return m.idx.Client() -} - -func (m *mgr) Create(ctx context.Context, ty string, src interface{}, options ...Option) (id string, err error) { - opts, err := processOpts(options...) - - if err != nil { - return - } - - if err = validateType(ty); err != nil { - return - } - - if id, err = genID(opts); err != nil { - return - } - - var data []byte - if data, err = m.encode(ty, id, opts.Space, src); err != nil { - return - } - - docID := fmtID(ty, id, opts.Space) - - nowStr := time.Now().UTC().Format(time.RFC3339) - - // TODO: hardcoded migration version - var objMap = map[string]interface{}{ - ty: json.RawMessage(data), - "type": ty, - "updated_at": nowStr, - "migrationVersion": map[string]string{ - "config": kMigrationVersion, - }, - "references": opts.References, - } - - if opts.Space != "" { - objMap["namespace"] = opts.Space - } - - var source []byte - if source, err = json.Marshal(objMap); err != nil { - return - } - - bulkOpts := m.makeBulkOpts(opts) - - if opts.Overwrite { - id, err = m.idx.Index(ctx, kIndexKibana, docID, source, bulkOpts...) - } else { - id, err = m.idx.Create(ctx, kIndexKibana, docID, source, bulkOpts...) - } - - log.Trace().Err(err).RawJSON("source", source).Msg("On create") - - return -} - -func (m *mgr) makeBulkOpts(opts optionsT) []bulk.Opt { - var bulkOpts []bulk.Opt - if opts.Refresh { - bulkOpts = append(bulkOpts, bulk.WithRefresh()) - } - return bulkOpts -} - -func (m *mgr) Read(ctx context.Context, ty, id string, dst interface{}, options ...Option) error { - opts, err := processOpts(options...) - if err != nil { - return err - } - - if err := validateType(ty); err != nil { - return err - } - - if err := validateId(id); err != nil { - return err - } - - docId := fmtID(ty, id, opts.Space) - - payload, err := m.idx.Read(ctx, kIndexKibana, docId, bulk.WithRefresh()) - if err != nil { - return err - } - - var tmap map[string]json.RawMessage - if err = json.Unmarshal(payload, &tmap); err != nil { - return err - } - - obj, ok := tmap[ty] - if !ok { - return ErrMalformedSavedObj - } - - return m.decode(ty, id, opts.Space, obj, dst) -} - -// Warning: If you pass encrypted or AAD fields, you broke something. Don't do that. -func (m *mgr) Update(ctx context.Context, ty, id string, fields map[string]interface{}, options ...Option) error { - opts, err := processOpts(options...) - if err != nil { - return err - } - - if err := validateType(ty); err != nil { - return err - } - - if err := validateId(id); err != nil { - return err - } - - docId := fmtID(ty, id, opts.Space) - - timeNow := time.Now().UTC().Format(time.RFC3339) - - source, err := json.Marshal(map[string]interface{}{ - "doc": map[string]interface{}{ - ty: fields, - "updated_at": timeNow, - }, - }) - - if err != nil { - return err - } - - bulkOpts := m.makeBulkOpts(opts) - - return m.idx.Update(ctx, kIndexKibana, docId, source, bulkOpts...) -} - -// Warning: If you pass encrypted or AAD fields, you broke something. Don't do that. -func (m *mgr) MUpdate(ctx context.Context, updates []UpdateT, options ...Option) error { - opts, err := processOpts(options...) - if err != nil { - return err - } - - timeNow := time.Now().UTC().Format(time.RFC3339) - - ops := make([]bulk.BulkOp, 0, len(updates)) - - for _, u := range updates { - - if err := validateType(u.Type); err != nil { - return err - } - - if err := validateId(u.Id); err != nil { - return err - } - - docId := fmtID(u.Type, u.Id, opts.Space) - - source, err := json.Marshal(map[string]interface{}{ - "doc": map[string]interface{}{ - u.Type: u.Fields, - "updated_at": timeNow, - }, - }) - - if err != nil { - return err - } - - ops = append(ops, bulk.BulkOp{ - Id: docId, - Body: source, - Index: kIndexKibana, - }) - } - - bulkOpts := m.makeBulkOpts(opts) - - return m.idx.MUpdate(ctx, ops, bulkOpts...) -} - -// Simple term query; does NOT support find on encrypted field. -func (m *mgr) FindByField(ctx context.Context, ty string, fields map[string]interface{}) ([]Hit, error) { - - query := NewQuery(ty) - mustNode := query.Query().Bool().Must() - for f, v := range fields { - mustNode.Term(ScopeField(ty, f), v, nil) - } - - return m.FindByNode(ctx, query) -} - -func (m *mgr) FindByNode(ctx context.Context, node *dsl.Node) ([]Hit, error) { - body, err := json.Marshal(node) - if err != nil { - return nil, err - } - - return m.FindRaw(ctx, body) -} - -func (m *mgr) FindRaw(ctx context.Context, body []byte) ([]Hit, error) { - - searcResult, err := m.idx.Search(ctx, []string{kIndexKibana}, body) - - if err != nil { - return nil, err - } - - var hits []Hit - - for _, h := range searcResult.Hits { - - o, err := parseId(h.Id) - if err != nil { - return nil, err - } - - // Decode the source, better way to do this? - var src map[string]json.RawMessage - if err := json.Unmarshal(h.Source, &src); err != nil { - return nil, err - } - - var t string - if err := json.Unmarshal(src["type"], &t); err != nil { - return nil, err - } - - var space string - if v, ok := src["namespace"]; ok { - if err := json.Unmarshal(v, &space); err != nil { - return nil, err - } - } - - if t != o.ty { - return nil, ErrTypeMismatch - } - - if space != o.ns { - return nil, ErrSpaceMismatch - } - - var refs []string - if err := json.Unmarshal(src["references"], &refs); err != nil { - return nil, err - } - - var updatedAt string - if err := json.Unmarshal(src["updated_at"], &updatedAt); err != nil { - return nil, err - } - - hits = append(hits, Hit{ - Id: o.id, - Type: t, - Space: space, - References: refs, - UpdatedAt: updatedAt, - Data: src[t], - }) - - } - - return hits, err -} - -func (m *mgr) Decode(hit Hit, dst interface{}) error { - return m.decode(hit.Type, hit.Id, hit.Space, hit.Data, dst) -} diff --git a/internal/pkg/saved/crypto.go b/internal/pkg/saved/crypto.go deleted file mode 100644 index 948db9ead..000000000 --- a/internal/pkg/saved/crypto.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package saved - -import ( - "bytes" - "crypto/aes" - "crypto/cipher" - "crypto/sha512" - "encoding/base64" - "encoding/json" - "golang.org/x/crypto/pbkdf2" -) - -const ( - tagLen = 16 - keyLengthInBytes = 32 - keyIterations = 10000 -) - -func encryptFields(key, aad []byte, fields Fields) error { - - for k, v := range fields { - ciphertext, err := encrypt(key, aad, v) - - if err != nil { - return err - } - fields[k] = ciphertext - } - - return nil -} - -func decryptFields(key, aad []byte, fields Fields) error { - - for k, v := range fields { - ciphertext, ok := v.(string) - if !ok { - return ErrBadCipherText - } - - v, err := decrypt(key, aad, ciphertext) - - if err != nil { - return err - } - fields[k] = v - } - - return nil -} - -// see: https://github.com/elastic/node-crypto/blob/master/src/crypto.ts#L119 -func encrypt(key, aad []byte, v interface{}) (string, error) { - - plaintext, err := json.Marshal(v) - if err != nil { - return "", err - } - - // Generate random data for iv and salt - nonce, err := newNonce() - if err != nil { - return "", err - } - - dk := deriveKey(key, nonce.salt()) - - block, err := aes.NewCipher(dk) - if err != nil { - return "", err - } - - aesgcm, err := cipher.NewGCMWithTagSize(block, tagLen) - if err != nil { - return "", err - } - - ciphertext := aesgcm.Seal(nil, nonce.iv(), plaintext, aad) - - // Expects binary buffer [salt, iv, tag, encrypted] - // goland slaps the tag on the back of the slice, so we have to reorg a bit - tagOffset := len(ciphertext) - tagLen - - buf := bytes.Buffer{} - buf.Grow(ivLen + saltLen + len(ciphertext)) - // Write salt:iv - buf.Write(nonce.both()) - // Write tag - buf.Write(ciphertext[tagOffset:]) - // Write cipher text - buf.Write(ciphertext[:tagOffset]) - - payload := base64.StdEncoding.EncodeToString(buf.Bytes()) - return payload, nil -} - -func decrypt(key, aad []byte, cipherText string) (interface{}, error) { - - ciphertext, err := base64.StdEncoding.DecodeString(cipherText) - if err != nil { - return nil, err - } - - // expects header [salt, iv, tag, encrypted] - if len(ciphertext) <= saltLen+ivLen+tagLen { - return nil, ErrBadCipherText - } - - tagOffset := saltLen + ivLen - dataOffset := tagOffset + tagLen - - salt := ciphertext[:saltLen] - iv := ciphertext[saltLen:tagOffset] - tag := ciphertext[tagOffset:dataOffset] - data := ciphertext[dataOffset:] - - dk := deriveKey(key, salt) - - block, err := aes.NewCipher(dk) - if err != nil { - return nil, err - } - - aesgcm, err := cipher.NewGCMWithTagSize(block, tagLen) - if err != nil { - return nil, err - } - - // aesgcm expects the tag to be after the ciphertext - buf := bytes.Buffer{} - buf.Grow(len(data) + len(tag)) - buf.Write(data) - buf.Write(tag) - - plaintext, err := aesgcm.Open(nil, iv, buf.Bytes(), aad) - if err != nil { - return nil, err - } - - // plaintext is raw JSON, decode - var v interface{} - err = json.Unmarshal(plaintext, &v) - return v, err -} - -func deriveKey(key, salt []byte) []byte { - - return pbkdf2.Key( - []byte(key), - salt, - keyIterations, - keyLengthInBytes, - sha512.New, - ) -} - -// Emulate Additional Authenticated Data (AAD) generation in Kibana -// Effectively stable_stringify([ {namespace}, type, id, attributesAAD]); -// -func deriveAAD(ty, id, space string, attrs map[string]interface{}) ([]byte, error) { - /* - if len(attrs) == 0 { - log.Debug().Str("type", ty).Str("id", id).Str("space", space).Msg("No AAD; that seems wrong.") - } - */ - - v := []interface{}{space, ty, id, attrs} - - if space == "" { - v = v[1:] - } - - // This MUST be stable; and 1x1 with what javascript stringify is doing. - // Milage may vary; we may have to implement this manually depending on types and formatting. - return json.Marshal(v) -} diff --git a/internal/pkg/saved/encode.go b/internal/pkg/saved/encode.go deleted file mode 100644 index 995a31f8c..000000000 --- a/internal/pkg/saved/encode.go +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package saved - -import ( - "bytes" - "encoding/json" - "fmt" - "reflect" - "strings" - "unicode" -) - -const ( - TagSaved = "saved" - TagAad = "aad" - TagEncrypt = "encrypt" - TagJSON = "json" -) - -type tagOptions string - -// From golang JSON code -func parseTag(tag string) (string, tagOptions) { - if idx := strings.Index(tag, ","); idx != -1 { - return tag[:idx], tagOptions(tag[idx+1:]) - } - return tag, tagOptions("") -} - -// From golang JSON code -func (o tagOptions) Contains(optionName string) bool { - if len(o) == 0 { - return false - } - s := string(o) - for s != "" { - var next string - i := strings.Index(s, ",") - if i >= 0 { - s, next = s[:i], s[i+1:] - } - if s == optionName { - return true - } - s = next - } - return false -} - -// From golang JSON code -func isValidTag(s string) bool { - if s == "" { - return false - } - for _, c := range s { - switch { - case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): - // Backslash and quote chars are reserved, but - // otherwise any punctuation chars are allowed - // in a tag name. - case !unicode.IsLetter(c) && !unicode.IsDigit(c): - return false - } - } - return true -} - -// From golang JSON code -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - return false -} - -func deriveFieldKey(field reflect.StructField) (string, tagOptions) { - - // Use json tag if available, otherwise lowercase name - tag := field.Tag.Get(TagJSON) - key, opts := parseTag(tag) - - if !isValidTag(key) { - key = strings.ToLower(field.Name) - } - - var out bytes.Buffer - json.HTMLEscape(&out, []byte(key)) - - return out.String(), opts -} - -func gatherAAD(src interface{}) (Fields, Fields) { - t := reflect.TypeOf(src) - v := reflect.ValueOf(src) - - if t.Kind() == reflect.Ptr { - v = v.Elem() - t = reflect.TypeOf(v.Interface()) - } - - aad := make(Fields) - encrypt := make(Fields) - - for i := 0; i < t.NumField(); i++ { - field := t.Field(i) - - // Get the field tag value - tag := field.Tag.Get(TagSaved) - - switch tag { - case TagAad: - key, _ := deriveFieldKey(field) - aad[key] = v.Field(i).Interface() - case TagEncrypt: - key, _ := deriveFieldKey(field) - encrypt[key] = v.Field(i).Interface() - case "", "-": - default: - panic(fmt.Sprintf("Unknown tag %s:\"%s\"", TagSaved, tag)) - } - } - - return aad, encrypt -} - -func isEncrypted(src interface{}) bool { - t := reflect.TypeOf(src) - - if t.Kind() == reflect.Ptr { - v := reflect.ValueOf(src).Elem().Interface() - t = reflect.TypeOf(v) - } - - for i := 0; i < t.NumField(); i++ { - field := t.Field(i) - - // Get the field tag value - tag := field.Tag.Get(TagSaved) - - switch tag { - case TagEncrypt: - return true - case TagAad, "", "-": - default: - panic(fmt.Sprintf("Unknown tag %s:\"%s\"", TagSaved, tag)) - } - } - - return false -} - -func (m *mgr) encode(ty, id, space string, src interface{}) ([]byte, error) { - if !isEncrypted(src) { - return json.Marshal(src) - } - - // scan for aad - aadSet, encryptSet := gatherAAD(src) - - aad, err := deriveAAD(ty, id, space, aadSet) - if err != nil { - return nil, err - } - - if err := encryptFields([]byte(m.key), aad, encryptSet); err != nil { - return nil, err - } - - fields := NewFields(src) - - for k, v := range encryptSet { - fields[k] = v - } - - return json.Marshal(fields) -} - -func (m *mgr) decode(ty, id, space string, data []byte, dst interface{}) error { - - if err := json.Unmarshal(data, dst); err != nil { - return err - } - - if !isEncrypted(dst) { - return nil - } - - fields := NewFields(dst) - - // scan for aad, this will return empty values, but we need the keys - aadSet, encryptSet := gatherAAD(dst) - - // Fix up aadSet with actual values retrieved from JSON - for k, _ := range aadSet { - aadSet[k] = fields[k] - } - - aad, err := deriveAAD(ty, id, space, aadSet) - if err != nil { - return err - } - - // Fix up encryptSet with actual values retrieved from JSON - for k, _ := range encryptSet { - encryptSet[k] = fields[k] - } - - if err := decryptFields([]byte(m.key), aad, encryptSet); err != nil { - return err - } - - // Overlay encrypted values on fields - for k, v := range encryptSet { - fields[k] = v - } - - return fields.MapInterface(dst) -} diff --git a/internal/pkg/saved/errors.go b/internal/pkg/saved/errors.go deleted file mode 100644 index 2344abd4a..000000000 --- a/internal/pkg/saved/errors.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package saved - -import ( - "errors" -) - -var ( - ErrNoType = errors.New("no type") - ErrRead = errors.New("read error") - ErrNoId = errors.New("no id") - ErrAttributeUnknown = errors.New("unknown attribute") - ErrAttributeType = errors.New("wrong attribute type") - ErrBadCipherText = errors.New("bad cipher text") - ErrNotEncrypted = errors.New("attribute not encrypted") - ErrMalformedSavedObj = errors.New("malformed saved object") - ErrMalformedIdentifier = errors.New("malformed saved object identifier") - ErrTypeMismatch = errors.New("type mismatch") - ErrSpaceMismatch = errors.New("namespace mismatch") -) diff --git a/internal/pkg/saved/fields.go b/internal/pkg/saved/fields.go deleted file mode 100644 index 2177aca3c..000000000 --- a/internal/pkg/saved/fields.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package saved - -import ( - "github.com/mitchellh/mapstructure" - "reflect" -) - -type Fields map[string]interface{} - -func NewFields(src interface{}) Fields { - t := reflect.TypeOf(src) - v := reflect.ValueOf(src) - - if t.Kind() == reflect.Ptr { - v = v.Elem() - t = reflect.TypeOf(v.Interface()) - } - - nFields := v.NumField() - - m := make(Fields, nFields) - - for i := 0; i < nFields; i++ { - key, opts := deriveFieldKey(t.Field(i)) - - if key == "-" || (opts.Contains("omitempty") && isEmptyValue(v.Field(i))) { - continue - } - - m[key] = v.Field(i).Interface() - } - - return m -} - -func (f Fields) MapInterface(dst interface{}) error { - - config := &mapstructure.DecoderConfig{ - TagName: TagJSON, - Result: dst, - } - - decoder, err := mapstructure.NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(f) -} diff --git a/internal/pkg/saved/id.go b/internal/pkg/saved/id.go deleted file mode 100644 index 5cb78eaa2..000000000 --- a/internal/pkg/saved/id.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package saved - -import ( - "fmt" - "strings" - - "github.com/gofrs/uuid" -) - -func genID(opts optionsT) (string, error) { - var id string - - if opts.Id != "" { - id = opts.Id - } else if u, err := uuid.NewV4(); err != nil { - return "", err - } else { - id = u.String() - } - - return id, nil -} - -func fmtID(ty, id, space string) string { - - if space != "" { - return fmt.Sprintf("%s:%s:%s", space, ty, id) - } - - return fmt.Sprintf("%s:%s", ty, id) -} - -type objectId struct { - id string - ns string - ty string -} - -// Deconstruct the ID. Expect namespace:type:id -func parseId(id string) (o objectId, err error) { - - tuple := strings.Split(id, ":") - - switch len(tuple) { - case 1: - o.id = tuple[0] - case 2: - o.ty = tuple[0] - o.id = tuple[1] - case 3: - o.ns = tuple[0] - o.ty = tuple[1] - o.id = tuple[2] - default: - err = ErrMalformedIdentifier - } - - return -} diff --git a/internal/pkg/saved/nonce.go b/internal/pkg/saved/nonce.go deleted file mode 100644 index b22124a3f..000000000 --- a/internal/pkg/saved/nonce.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package saved - -import ( - "crypto/rand" -) - -const ( - ivLen = 12 - saltLen = 64 -) - -type nonceT struct { - buf []byte -} - -func newNonce() (nonceT, error) { - n := nonceT{ - buf: make([]byte, saltLen+ivLen), - } - - _, err := rand.Read(n.buf) - return n, err -} - -func (n nonceT) iv() []byte { - return n.buf[saltLen:] -} - -func (n nonceT) salt() []byte { - return n.buf[:saltLen] -} - -func (n nonceT) both() []byte { - return n.buf -} diff --git a/internal/pkg/saved/opts.go b/internal/pkg/saved/opts.go deleted file mode 100644 index 542c25662..000000000 --- a/internal/pkg/saved/opts.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package saved - -type optionsT struct { - Id string - Space string - Overwrite bool - Flush bool - Refresh bool - References []string -} - -func (c optionsT) Validate() error { - // TODO: validate Space - // TODO: validate Id - // TODO: validate References - return nil -} - -type Option func(*optionsT) - -func WithId(id string) Option { - return func(opt *optionsT) { - opt.Id = id - } -} - -func WithSpace(space string) Option { - return func(opt *optionsT) { - opt.Space = space - } -} - -func WithOverwrite() Option { - return func(opt *optionsT) { - opt.Overwrite = true - } -} - -func WithFlush() Option { - return func(opt *optionsT) { - opt.Flush = true - } -} - -func WithRefresh() Option { - return func(opt *optionsT) { - opt.Refresh = true - } -} - -func WithRefs(refs []string) Option { - return func(opt *optionsT) { - opt.References = refs - } -} - -func processOpts(options ...Option) (opts optionsT, err error) { - for _, optF := range options { - optF(&opts) - } - - err = opts.Validate() - return -} - -func validateType(ty string) error { - // TODO: check for invalidate runes - if ty == "" { - return ErrNoType - } - return nil -} - -func validateId(id string) error { - // TODO: check for invalidate runes - if id == "" { - return ErrNoId - } - return nil -} diff --git a/internal/pkg/saved/query.go b/internal/pkg/saved/query.go deleted file mode 100644 index 2b39a1094..000000000 --- a/internal/pkg/saved/query.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package saved - -import ( - "fmt" - - "github.com/elastic/fleet-server/v7/internal/pkg/dsl" -) - -func NewQuery(ty string) *dsl.Node { - - root := dsl.NewRoot() - - // Require the type - root.Query().Bool().Must().Term("type", ty, nil) - - return root -} - -func ScopeField(ty, field string) string { - return fmt.Sprintf("%s.%s", ty, field) -} - -type ScopeFuncT func(field string) string - -func ScopeFunc(ty string) ScopeFuncT { - prefix := fmt.Sprintf("%s.", ty) - return func(field string) string { - return prefix + field - } -} - -/* - -1) saved.SearchNode(ctx, dsl.Node) -2) saved.SearchRaw(ctx, []byte) -3) fix policy to support N looksup in parallel -4) multisearch? how return hits? -5) strip out comments... -6) templatize call to get agent id at beginning of program - - - - q.Field(scopedField, value, boost) - -type treeMap map[string]*QueryN -type QueryN struct { - leaf interface{} - tree treeMap - array []*QueryN -} - - -func (q *QueryN) MarshalJSON() ([]byte, error) { - - switch { - case q.leaf != nil: - return json.Marshal(q.leaf) - case q.tree != nil: - return json.Marshal(q.tree) - case q.array != nil: - return json.Marshal(q.array) - } - - return []byte("null"), nil -} - -func (q *QueryN) Query() *QueryN { - if node, ok := q.tree["query"]; ok { - return node - } - - if q.tree == nil { - q.tree = make(map[string]*QueryN) - } - - node := &QueryN{} - q.tree["query"] = node - return node -} - -func (q *QueryN) Bool() *QueryN { - if node, ok := q.tree["bool"]; ok { - return node - } - - if q.tree == nil { - q.tree = make(map[string]*QueryN) - } - - node := &QueryN{} - q.tree["bool"] = node - return node -} - -func (q *QueryN) Must() *QueryN { - if node, ok := q.tree["must"]; ok { - return node - } - - if q.tree == nil { - q.tree = make(map[string]*QueryN) - } - - node := &QueryN{ - array: make([]*QueryN, 0), - } - q.tree["must"] = node - return node -} - -func (q *QueryN) Term() *QueryN { - return q.makeChildNode("term") -} - -func (q *QueryN) makeChildNode(key string) *QueryN { - node := &QueryN{} - if q.array != nil { - tNode := QueryN{ - tree: map[string]*QueryN{key:node}, - } - q.array = append(q.array, &tNode) - - } else { - if q.tree == nil { - q.tree = make(map[string]*QueryN) - } - q.tree[key] = node - } - - return node -} - -func (q *QueryN) Field(field string, value interface{}, boost *float64) { - if q.tree == nil { - q.tree = make(map[string]*QueryN) - } - - var leaf interface{} - - switch boost { - case nil: - leaf = value - default: - leaf = &struct { - Value interface{} `json:"value"` - Boost *float64 `json:"boost,omitempty"` - } { - value, - boost, - } - } - - node := &QueryN{ - leaf: leaf, - } - - q.tree[field] = node -} - -func (q *QueryN) SavedField(ty, field string, value interface{}, boost *float64) { - scopedField := fmt.Sprintf("%s.%s", ty, field) - q.Field(scopedField, value, boost) -} - -type RangeOpt func(treeMap) - -func WithRangeGT(v interface{}) RangeOpt { - return func(tmap treeMap) { - tmap["gt"] = &QueryN{leaf:v} - } -} - -func (q *QueryN) Range(field string, opts ...RangeOpt) { - - fieldNode := &QueryN{ - tree: make(treeMap), - } - - for _, o := range opts { - o(fieldNode.tree) - } - - node := q.makeChildNode("range") - node.tree = map[string]*QueryN{ - field: fieldNode, - } -} - -func (q *QueryN) Size(sz uint64) { - if q.tree == nil { - q.tree = make(treeMap) - } - q.tree["size"] = &QueryN { - leaf: sz, - } -} - -func (q *QueryN) Sort() *QueryN { - n := q.makeChildNode("sort") - n.array = make([]*QueryN, 0) - return n -} - -type SortOrderT string - -const ( - SortAscend SortOrderT = "asc" - SortDescend = "desc" -) - -func (q *QueryN) SortOrder(field string, order SortOrderT) { - if q.array == nil { - panic("Parent should be sort node") - } - - defaultOrder := SortAscend - if field == "_score" { - defaultOrder = SortDescend - } - - if order == defaultOrder { - q.array = append(q.array, &QueryN{leaf:field}) - } else { - n := q.makeChildNode(field) - n.leaf = order - } -} - - -func (q *QueryN) SortOpt(field string, order SortOrder, opts ...SortOpt) { - // TODO -} -*/ diff --git a/internal/pkg/smap/smap.go b/internal/pkg/smap/smap.go new file mode 100644 index 000000000..3c636dffd --- /dev/null +++ b/internal/pkg/smap/smap.go @@ -0,0 +1,74 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package smap + +import ( + "crypto/sha256" + "encoding/hex" + "encoding/json" +) + +type Map map[string]interface{} + +func (m Map) GetMap(k string) Map { + if m == nil { + return m + } + + v := m[k] + if v != nil { + if m, ok := v.(map[string]interface{}); ok { + return m + } + } + return nil +} + +func (m Map) GetString(k string) string { + if m == nil { + return "" + } + if v := m[k]; v != nil { + if s, ok := v.(string); ok { + return s + } + } + return "" +} + +func (m Map) Hash() (string, error) { + if m == nil { + return "", nil + } + + // Hashing through the json encoder + h := sha256.New() + enc := json.NewEncoder(h) + err := enc.Encode(m) + if err != nil { + return "", err + } + + return hex.EncodeToString(h.Sum(nil)), nil +} + +func (m Map) Marshal() ([]byte, error) { + if m == nil { + return nil, nil + } + return json.Marshal(m) +} + +func Parse(data []byte) (Map, error) { + if len(data) == 0 { + return nil, nil + } + + var m Map + + err := json.Unmarshal(data, &m) + + return m, err +} diff --git a/internal/pkg/sqn/sqn.go b/internal/pkg/sqn/sqn.go new file mode 100644 index 000000000..d9832fe4f --- /dev/null +++ b/internal/pkg/sqn/sqn.go @@ -0,0 +1,46 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package sqn + +import ( + "fmt" + "strings" +) + +const UndefinedSeqNo = -1 + +var DefaultSeqNo = []int64{UndefinedSeqNo} + +// Abstracts the array of document seq numbers +type SeqNo []int64 + +func (s SeqNo) String() string { + if len(s) == 0 { + return "" + } + return strings.Join(strings.Fields(strings.Trim(fmt.Sprint([]int64(s)), "[]")), ",") +} + +func (s SeqNo) IsSet() bool { + return len(s) > 0 && s[0] >= 0 +} + +// Returns one/first value until we get and API to get the next checkpoints on search +func (s SeqNo) Value() int64 { + if len(s) == 0 { + return UndefinedSeqNo + } + return s[0] +} + +func (s SeqNo) Clone() SeqNo { + if s == nil { + return nil + } + + r := make(SeqNo, len(s)) + copy(r, s) + return r +} diff --git a/internal/pkg/status/reporter.go b/internal/pkg/status/reporter.go new file mode 100644 index 000000000..d3f44fdad --- /dev/null +++ b/internal/pkg/status/reporter.go @@ -0,0 +1,51 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package status + +import ( + "github.com/rs/zerolog/log" + + "github.com/elastic/elastic-agent-client/v7/pkg/proto" +) + +// Reporter is interface that reports updated status on. +type Reporter interface { + // Status triggers updating the status. + Status(status proto.StateObserved_Status, message string, payload map[string]interface{}) error +} + +// Log logs the reported status. +type Log struct{} + +// NewLog creates a LogStatus. +func NewLog() *Log { + return &Log{} +} + +// Status triggers updating the status. +func (l *Log) Status(status proto.StateObserved_Status, message string, _ map[string]interface{}) error { + log.Info().Str("status", status.String()).Msg(message) + return nil +} + +// Chained calls Status on all the provided reporters in the provided order. +type Chained struct { + reporters []Reporter +} + +// NewChained creates a Chained with provided reporters. +func NewChained(reporters ...Reporter) *Chained { + return &Chained{reporters} +} + +// Status triggers updating the status. +func (l *Chained) Status(status proto.StateObserved_Status, message string, payload map[string]interface{}) error { + for _, reporter := range l.reporters { + if err := reporter.Status(status, message, payload); err != nil { + return err + } + } + return nil +} diff --git a/internal/pkg/testing/actions.go b/internal/pkg/testing/actions.go index 2bea9a4dc..95815b444 100644 --- a/internal/pkg/testing/actions.go +++ b/internal/pkg/testing/actions.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build integration // +build integration package testing @@ -9,12 +10,13 @@ package testing import ( "context" "encoding/json" + "testing" + "time" + "github.com/elastic/fleet-server/v7/internal/pkg/bulk" "github.com/elastic/fleet-server/v7/internal/pkg/es" "github.com/elastic/fleet-server/v7/internal/pkg/model" "github.com/elastic/fleet-server/v7/internal/pkg/rnd" - "testing" - "time" "github.com/gofrs/uuid" "github.com/rs/xid" @@ -60,7 +62,7 @@ func CreateRandomActions(min, max int) ([]model.Action, error) { Timestamp: r.Time(now, 2, 5, time.Second, rnd.TimeBefore).Format(time.RFC3339), Expiration: r.Time(now, 12, 25, time.Minute, rnd.TimeAfter).Format(time.RFC3339), Type: "APP_ACTION", - InputId: "osquery", + InputType: "osquery", Agents: aid, Data: data, } diff --git a/internal/pkg/testing/bulk.go b/internal/pkg/testing/bulk.go index 1b8fb12fe..c39bddded 100644 --- a/internal/pkg/testing/bulk.go +++ b/internal/pkg/testing/bulk.go @@ -7,7 +7,7 @@ package testing import ( "context" - "github.com/elastic/go-elasticsearch/v8" + "github.com/elastic/go-elasticsearch/v7" "github.com/gofrs/uuid" "github.com/elastic/fleet-server/v7/internal/pkg/bulk" @@ -42,11 +42,27 @@ func (m MockBulk) Read(ctx context.Context, index, id string, opts ...bulk.Opt) return nil, nil } -func (m MockBulk) MUpdate(ctx context.Context, ops []bulk.BulkOp, opts ...bulk.Opt) error { +func (m MockBulk) Delete(ctx context.Context, index, id string, opts ...bulk.Opt) error { return nil } -func (m MockBulk) Search(ctx context.Context, index []string, body []byte, opts ...bulk.Opt) (*es.ResultT, error) { +func (m MockBulk) MCreate(ctx context.Context, ops []bulk.MultiOp, opts ...bulk.Opt) ([]bulk.BulkIndexerResponseItem, error) { + return nil, nil +} + +func (m MockBulk) MIndex(ctx context.Context, ops []bulk.MultiOp, opts ...bulk.Opt) ([]bulk.BulkIndexerResponseItem, error) { + return nil, nil +} + +func (m MockBulk) MUpdate(ctx context.Context, ops []bulk.MultiOp, opts ...bulk.Opt) ([]bulk.BulkIndexerResponseItem, error) { + return nil, nil +} + +func (m MockBulk) MDelete(ctx context.Context, ops []bulk.MultiOp, opts ...bulk.Opt) ([]bulk.BulkIndexerResponseItem, error) { + return nil, nil +} + +func (m MockBulk) Search(ctx context.Context, index string, body []byte, opts ...bulk.Opt) (*es.ResultT, error) { return &es.ResultT{}, nil } @@ -54,4 +70,20 @@ func (m MockBulk) Client() *elasticsearch.Client { return nil } +func (m MockBulk) ApiKeyCreate(ctx context.Context, name, ttl string, roles []byte, meta interface{}) (*bulk.ApiKey, error) { + return nil, nil +} + +func (m MockBulk) ApiKeyRead(ctx context.Context, id string) (*bulk.ApiKeyMetadata, error) { + return nil, nil +} + +func (m MockBulk) ApiKeyAuth(ctx context.Context, key bulk.ApiKey) (*bulk.SecurityInfo, error) { + return nil, nil +} + +func (m MockBulk) ApiKeyInvalidate(ctx context.Context, ids ...string) error { + return nil +} + var _ bulk.Bulk = (*MockBulk)(nil) diff --git a/internal/pkg/esboot/README.MD b/internal/pkg/testing/esutil/README.MD similarity index 100% rename from internal/pkg/esboot/README.MD rename to internal/pkg/testing/esutil/README.MD diff --git a/internal/pkg/testing/esutil/bootstrap.go b/internal/pkg/testing/esutil/bootstrap.go new file mode 100644 index 000000000..e2aafce76 --- /dev/null +++ b/internal/pkg/testing/esutil/bootstrap.go @@ -0,0 +1,20 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package esutil + +import ( + "context" + + "github.com/elastic/go-elasticsearch/v7" +) + +// EnsureIndex sets up the index if it doesn't exists, utilized for integration tests at the moment +func EnsureIndex(ctx context.Context, cli *elasticsearch.Client, name, mapping string) error { + err := EnsureTemplate(ctx, cli, name, mapping, false) + if err != nil { + return err + } + return CreateIndex(ctx, cli, name) +} diff --git a/internal/pkg/esboot/datastream.go b/internal/pkg/testing/esutil/datastream.go similarity index 94% rename from internal/pkg/esboot/datastream.go rename to internal/pkg/testing/esutil/datastream.go index da518aaab..990288921 100644 --- a/internal/pkg/esboot/datastream.go +++ b/internal/pkg/testing/esutil/datastream.go @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package esboot +package esutil import ( "context" @@ -10,7 +10,7 @@ import ( "errors" "fmt" - "github.com/elastic/go-elasticsearch/v8" + "github.com/elastic/go-elasticsearch/v7" "github.com/rs/zerolog/log" ) diff --git a/internal/pkg/esboot/esutil.go b/internal/pkg/testing/esutil/esutil.go similarity index 97% rename from internal/pkg/esboot/esutil.go rename to internal/pkg/testing/esutil/esutil.go index a6c251e7d..97ba8c715 100644 --- a/internal/pkg/esboot/esutil.go +++ b/internal/pkg/testing/esutil/esutil.go @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package esboot +package esutil import ( "encoding/json" @@ -12,7 +12,7 @@ import ( "net/http" "strings" - "github.com/elastic/go-elasticsearch/v8/esapi" + "github.com/elastic/go-elasticsearch/v7/esapi" "github.com/rs/zerolog/log" ) diff --git a/internal/pkg/esboot/ilm.go b/internal/pkg/testing/esutil/ilm.go similarity index 99% rename from internal/pkg/esboot/ilm.go rename to internal/pkg/testing/esutil/ilm.go index 34256c3e5..00f92e08c 100644 --- a/internal/pkg/esboot/ilm.go +++ b/internal/pkg/testing/esutil/ilm.go @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package esboot +package esutil import ( "context" @@ -12,7 +12,7 @@ import ( "strconv" "strings" - "github.com/elastic/go-elasticsearch/v8" + "github.com/elastic/go-elasticsearch/v7" "github.com/rs/zerolog" "github.com/rs/zerolog/log" ) diff --git a/internal/pkg/esboot/index.go b/internal/pkg/testing/esutil/index.go similarity index 61% rename from internal/pkg/esboot/index.go rename to internal/pkg/testing/esutil/index.go index a6ebcd877..1cb7eff53 100644 --- a/internal/pkg/esboot/index.go +++ b/internal/pkg/testing/esutil/index.go @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package esboot +package esutil import ( "context" @@ -10,7 +10,7 @@ import ( "errors" "fmt" - "github.com/elastic/go-elasticsearch/v8" + "github.com/elastic/go-elasticsearch/v7" "github.com/rs/zerolog/log" ) @@ -45,3 +45,31 @@ func CreateIndex(ctx context.Context, cli *elasticsearch.Client, name string) er return nil } + +func DeleteIndices(ctx context.Context, cli *elasticsearch.Client, names ...string) error { + res, err := cli.Indices.Delete(names, + cli.Indices.Delete.WithContext(ctx), + ) + + if err != nil { + return err + } + + defer res.Body.Close() + + err = checkResponseError(res) + if err != nil { + return err + } + + var r AckResponse + err = json.NewDecoder(res.Body).Decode(&r) + if err != nil { + return fmt.Errorf("failed to parse delete indices response: %v, err: %v", names, err) + } + if !r.Acknowledged { + return fmt.Errorf("failed to receive acknowledgment for delete indices request: %v", names) + } + + return nil +} diff --git a/internal/pkg/esboot/strmap.go b/internal/pkg/testing/esutil/strmap.go similarity index 97% rename from internal/pkg/esboot/strmap.go rename to internal/pkg/testing/esutil/strmap.go index 9794655aa..3e0950ff1 100644 --- a/internal/pkg/esboot/strmap.go +++ b/internal/pkg/testing/esutil/strmap.go @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package esboot +package esutil type stringMap map[string]interface{} diff --git a/internal/pkg/esboot/template.go b/internal/pkg/testing/esutil/template.go similarity index 98% rename from internal/pkg/esboot/template.go rename to internal/pkg/testing/esutil/template.go index bbea76279..150b50c0e 100644 --- a/internal/pkg/esboot/template.go +++ b/internal/pkg/testing/esutil/template.go @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package esboot +package esutil import ( "context" @@ -12,7 +12,7 @@ import ( "net/http" "strings" - "github.com/elastic/go-elasticsearch/v8" + "github.com/elastic/go-elasticsearch/v7" "github.com/rs/zerolog/log" ) diff --git a/internal/pkg/testing/retry.go b/internal/pkg/testing/retry.go index 4814aa914..c02b29128 100644 --- a/internal/pkg/testing/retry.go +++ b/internal/pkg/testing/retry.go @@ -52,9 +52,7 @@ func Retry(t *testing.T, ctx context.Context, f RetryFunc, opts ...RetryOption) if err == nil { return } - if err = sleep.WithContext(ctx, o.sleep); err != nil { - break - } + sleep.WithContext(ctx, o.sleep) } t.Fatal(err) } diff --git a/internal/pkg/testing/setup.go b/internal/pkg/testing/setup.go index 84a30b5c7..dc692262f 100644 --- a/internal/pkg/testing/setup.go +++ b/internal/pkg/testing/setup.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build integration // +build integration package testing @@ -10,12 +11,14 @@ import ( "context" "testing" + "github.com/elastic/go-elasticsearch/v7" "github.com/elastic/go-ucfg/yaml" "github.com/rs/xid" "github.com/elastic/fleet-server/v7/internal/pkg/bulk" "github.com/elastic/fleet-server/v7/internal/pkg/config" - "github.com/elastic/fleet-server/v7/internal/pkg/esboot" + "github.com/elastic/fleet-server/v7/internal/pkg/es" + "github.com/elastic/fleet-server/v7/internal/pkg/testing/esutil" ) var defaultCfg config.Config @@ -41,19 +44,31 @@ func init() { } } -func SetupBulk(ctx context.Context, t *testing.T, opts ...bulk.BulkOpt) bulk.Bulk { +func SetupES(ctx context.Context, t *testing.T) *elasticsearch.Client { t.Helper() - _, bulker, err := bulk.InitES(ctx, &defaultCfg, opts...) + + cli, err := es.NewClient(ctx, &defaultCfg, false) if err != nil { t.Fatal(err) } + + return cli +} + +func SetupBulk(ctx context.Context, t *testing.T, opts ...bulk.BulkOpt) bulk.Bulk { + t.Helper() + + cli := SetupES(ctx, t) + opts = append(opts, bulk.BulkOptsFromCfg(&defaultCfg)...) + bulker := bulk.NewBulker(cli, opts...) + go bulker.Run(ctx) return bulker } func SetupIndex(ctx context.Context, t *testing.T, bulker bulk.Bulk, mapping string) string { t.Helper() index := xid.New().String() - err := esboot.EnsureIndex(ctx, bulker.Client(), index, mapping) + err := esutil.EnsureIndex(ctx, bulker.Client(), index, mapping) if err != nil { t.Fatal(err) } diff --git a/internal/pkg/testing/suite/suite.go b/internal/pkg/testing/suite/suite.go new file mode 100644 index 000000000..5ddd787a6 --- /dev/null +++ b/internal/pkg/testing/suite/suite.go @@ -0,0 +1,56 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// +build integration + +package suite + +import ( + "context" + + "github.com/stretchr/testify/require" + tsuite "github.com/stretchr/testify/suite" + + "github.com/elastic/fleet-server/v7/internal/pkg/dl" + "github.com/elastic/fleet-server/v7/internal/pkg/es" + ftesting "github.com/elastic/fleet-server/v7/internal/pkg/testing" + "github.com/elastic/fleet-server/v7/internal/pkg/testing/esutil" +) + +var prepareIndexes = map[string]string{ + dl.FleetActions: es.MappingAction, + dl.FleetActionsResults: es.MappingActionResult, + dl.FleetAgents: es.MappingAgent, + dl.FleetArtifacts: es.MappingArtifact, + dl.FleetEnrollmentAPIKeys: es.MappingEnrollmentApiKey, + dl.FleetPolicies: es.MappingPolicy, + dl.FleetPoliciesLeader: es.MappingPolicyLeader, + dl.FleetServers: es.MappingServer, +} + +type RunningSuite struct { + tsuite.Suite +} + +func (s *RunningSuite) SetupSuite() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + c := ftesting.SetupES(ctx, s.T()) + for index, mapping := range prepareIndexes { + err := esutil.EnsureIndex(ctx, c, index, mapping) + require.NoError(s.T(), err) + } +} + +func (s *RunningSuite) TearDownSuite() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + c := ftesting.SetupES(ctx, s.T()) + names := make([]string, 0, len(prepareIndexes)) + for index, _ := range prepareIndexes { + names = append(names, index) + } + err := esutil.DeleteIndices(ctx, c, names...) + require.NoError(s.T(), err) +} diff --git a/internal/pkg/throttle/throttle.go b/internal/pkg/throttle/throttle.go new file mode 100644 index 000000000..b58ef8470 --- /dev/null +++ b/internal/pkg/throttle/throttle.go @@ -0,0 +1,154 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package throttle + +import ( + "github.com/rs/zerolog/log" + "sync" + "time" +) + +type Token struct { + id uint64 + key string + throttle *Throttle +} + +type tstate struct { + id uint64 + expire time.Time +} + +type Throttle struct { + mut sync.Mutex + maxParallel int + tokenCnt uint64 + tokenMap map[string]tstate +} + +// Throttle provides two controls: +// 1) Only one Token per key at a time can be acquired. Token expires if not released by ttl. +// 2) Only max unexpired tokens acquired at any one time. + +func NewThrottle(max int) *Throttle { + return &Throttle{ + maxParallel: max, + tokenMap: make(map[string]tstate), + } +} + +func (tt *Throttle) Acquire(key string, ttl time.Duration) *Token { + var token *Token + + tt.mut.Lock() + defer tt.mut.Unlock() + + if tt.checkAtMaxPending(key) { + log.Trace(). + Str("key", key). + Int("max", tt.maxParallel). + Int("szMap", len(tt.tokenMap)). + Msg("Throttle fail acquire on max pending") + return nil + } + + // Is there already a pending request on this key? + state, ok := tt.tokenMap[key] + + // If there's nohting pending on 'key' or previous timed out create token + + now := time.Now() + if !ok || state.expire.Before(now) { + tt.tokenCnt += 1 + + token = &Token{ + id: tt.tokenCnt, + key: key, + throttle: tt, + } + + state := tstate{ + id: token.id, + expire: now.Add(ttl), + } + + tt.tokenMap[key] = state + + log.Trace(). + Str("key", key). + Uint64("token", token.id). + Time("expire", state.expire). + Msg("Throttle acquired") + + return token + } + + log.Trace(). + Str("key", key). + Msg("Throttle fail acquire on existing token") + + return token +} + +// WARNING: Assumes mutex already held +func (tt *Throttle) checkAtMaxPending(key string) bool { + + // Are we already at max parallel? + if tt.maxParallel == 0 || len(tt.tokenMap) < tt.maxParallel { + return false + } + + now := time.Now() + + // Try to eject the target key first + if state, ok := tt.tokenMap[key]; ok && state.expire.Before(now) { + delete(tt.tokenMap, key) + log.Trace(). + Str("key", key). + Msg("Ejected target token on expiration") + + return false + } + + // Scan through map looking for something to expire. + // Not very efficient, O(N), but perhaps not worth optimizing + var found bool + for skey, state := range tt.tokenMap { + if state.expire.Before(now) { + found = true + delete(tt.tokenMap, skey) + log.Trace(). + Str("key", key). + Msg("Ejected token on expiration") + break + } + } + + return !found +} + +func (tt *Throttle) release(id uint64, key string) bool { + + tt.mut.Lock() + defer tt.mut.Unlock() + + state, ok := tt.tokenMap[key] + if !ok { + log.Trace().Uint64("id", id).Str("key", key).Msg("Token not found to release") + return false + } + + if state.id == id { + log.Trace().Uint64("id", id).Str("key", key).Msg("Token released") + delete(tt.tokenMap, key) + return true + } + + return false +} + +func (t Token) Release() bool { + return t.throttle.release(t.id, t.key) +} diff --git a/internal/pkg/throttle/throttle_test.go b/internal/pkg/throttle/throttle_test.go new file mode 100644 index 000000000..91b2d9412 --- /dev/null +++ b/internal/pkg/throttle/throttle_test.go @@ -0,0 +1,235 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package throttle + +import ( + "github.com/rs/zerolog" + "math/rand" + "strconv" + "testing" + "time" +) + +func disableTraceLogging() func() { + lvl := zerolog.GlobalLevel() + zerolog.SetGlobalLevel(zerolog.InfoLevel) + return func() { + zerolog.SetGlobalLevel(lvl) + } +} + +func TestThrottleZero(t *testing.T) { + f := disableTraceLogging() + defer f() + + // Zero max parallel means we can acquire as many as we want, + // but still cannot acquire existing that has not timed out + throttle := NewThrottle(0) + + N := rand.Intn(64) + 10 + + var tokens []*Token + for i := 0; i < N; i++ { + + key := strconv.Itoa(i) + + // Acquire token for key with long timeout so doesn't trip unit test + token1 := throttle.Acquire(key, time.Hour) + if token1 == nil { + t.Fatal("Acquire failed") + } + tokens = append(tokens, token1) + + // Second acquire should fail because we have not released the orginal token, + // or possibly if i == N-1 we could max parallel + token2 := throttle.Acquire(key, time.Hour) + if token2 != nil { + t.Error("Expected second acquire to fail on conflict") + } + } + + // Validate again that all tokens are blocked after allocating N + for i := 0; i < N; i++ { + + key := strconv.Itoa(i) + + // Acquire should fail because we have not released the orginal token, + token := throttle.Acquire(key, time.Hour) + if token != nil { + t.Error("Expected acquire to fail on conflict") + } + } + + for i, token := range tokens { + + found := token.Release() + if !found { + t.Error("Expect token to be found") + } + + // Second release should return false + found = token.Release() + if found { + t.Error("Expect token to not found on second release") + } + + // We should now be able to to acquire + key := strconv.Itoa(i) + + token = throttle.Acquire(key, time.Hour) + if token == nil { + t.Fatal("Acquire failed") + } + + found = token.Release() + if !found { + t.Error("Expect token to be found") + } + } +} + +func TestThrottleN(t *testing.T) { + f := disableTraceLogging() + defer f() + + for N := 1; N < 11; N++ { + + throttle := NewThrottle(N) + + var tokens []*Token + for i := 0; i < N; i++ { + + key := strconv.Itoa(i) + + // Acquire token for key with long timeout so doesn't trip unit test + token1 := throttle.Acquire(key, time.Hour) + if token1 == nil { + t.Fatal("Acquire failed") + } + tokens = append(tokens, token1) + + // Second acquire should fail because we have not released the orginal token, + // or possibly if i == N-1 we could max parallel + token2 := throttle.Acquire(key, time.Hour) + if token2 != nil { + t.Error("Expected second acquire to fail on conflict") + } + } + + // Any subsequent request should fail because at max + try := rand.Intn(64) + 1 + for i := 0; i < try; i++ { + + key := strconv.Itoa(N + i) + + token1 := throttle.Acquire(key, time.Hour) + if token1 != nil { + t.Fatal("Expect acquire to fail on max tokens") + } + } + + // Release one at a time, validate that we can reacquire + for i, token := range tokens { + + found := token.Release() + if !found { + t.Error("Expect token to be found") + } + + // Second release should return false + found = token.Release() + if found { + t.Error("Expect token to not found on second release") + } + + // We should now be able to to acquire + key := strconv.Itoa(i) + + token = throttle.Acquire(key, time.Hour) + if token == nil { + t.Fatal("Acquire failed") + } + + found = token.Release() + if !found { + t.Error("Expect token to be found") + } + } + } +} + +func TestThrottleExpireIdentity(t *testing.T) { + f := disableTraceLogging() + defer f() + + throttle := NewThrottle(1) + + key := "xxx" + token := throttle.Acquire(key, time.Second) + + // Should *NOT* be able to re-acquire until TTL + token2 := throttle.Acquire(key, time.Hour) + if token2 != nil { + t.Error("Expected second acquire to fail on conflict") + } + + time.Sleep(time.Second) + + // Should be able to re-acquire on expiration + token3 := throttle.Acquire(key, time.Hour) + if token3 == nil { + t.Error("Expected third aquire to succeed") + } + + // Original token should fail release + found := token.Release() + if found { + t.Error("Expected token to have expired") + } + + // However, third token should release fine + found = token3.Release() + if !found { + t.Error("Expect recently acquired token to release cleanly") + } +} + +// Test that a token from a different key is expired when at max +func TestThrottleExpireAtMax(t *testing.T) { + f := disableTraceLogging() + defer f() + + throttle := NewThrottle(1) + + key1 := "xxx" + token1 := throttle.Acquire(key1, time.Second) + + // Should be at max, cannot acquire different key + key2 := "yyy" + token2 := throttle.Acquire(key2, time.Hour) + if token2 != nil { + t.Error("Expected second acquire to fail on max") + } + + time.Sleep(time.Second) + + // Should be able acquire second after timeout + token2 = throttle.Acquire(key2, time.Hour) + if token2 == nil { + t.Error("Expected third aquire to succeed") + } + + // Original token should fail release + found := token1.Release() + if found { + t.Error("Expected token to have expired") + } + + // However, third token should release fine + found = token2.Release() + if !found { + t.Error("Expect recently acquired token2 to release cleanly") + } +} diff --git a/internal/pkg/ver/check.go b/internal/pkg/ver/check.go new file mode 100644 index 000000000..e504bf727 --- /dev/null +++ b/internal/pkg/ver/check.go @@ -0,0 +1,91 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package ver + +import ( + "context" + "errors" + "fmt" + "strconv" + "strings" + + esh "github.com/elastic/fleet-server/v7/internal/pkg/es" + + "github.com/elastic/go-elasticsearch/v7" + "github.com/hashicorp/go-version" + "github.com/rs/zerolog/log" +) + +var ( + ErrUnsupportedVersion = errors.New("unsupported version") + ErrMalformedVersion = errors.New("malformed version") +) + +func CheckCompatibility(ctx context.Context, esCli *elasticsearch.Client, fleetVersion string) error { + log.Debug().Str("fleet_version", fleetVersion).Msg("check version compatibility with elasticsearch") + + esVersion, err := esh.FetchESVersion(ctx, esCli) + + if err != nil { + log.Error().Err(err).Msg("failed to fetch elasticsearch version") + return err + } + log.Debug().Str("elasticsearch_version", esVersion).Msg("fetched elasticsearch version") + + return checkCompatibility(fleetVersion, esVersion) +} + +func checkCompatibility(fleetVersion, esVersion string) error { + verConst, err := buildVersionConstraint(fleetVersion) + if err != nil { + log.Error().Err(err).Str("fleet_version", fleetVersion).Msg("failed to build constraint") + return err + } + + ver, err := parseVersion(esVersion) + if err != nil { + return err + } + + if !verConst.Check(ver) { + log.Error(). + Err(ErrUnsupportedVersion). + Str("constraint", verConst.String()). + Str("reported", ver.String()). + Msg("failed elasticsearch version check") + return ErrUnsupportedVersion + } + log.Info().Str("fleet_version", fleetVersion).Str("elasticsearch_version", esVersion).Msg("Elasticsearch compatibility check successful") + return nil +} + +func buildVersionConstraint(fleetVersion string) (version.Constraints, error) { + ver, err := parseVersion(fleetVersion) + if err != nil { + return nil, err + } + return version.NewConstraint(fmt.Sprintf(">= %s", minimizePatch(ver))) +} + +func minimizePatch(ver *version.Version) string { + segments := ver.Segments() + if len(segments) > 2 { + segments = segments[:2] + } + segments = append(segments, 0) + segStrs := make([]string, 0, len(segments)) + for _, segment := range segments { + segStrs = append(segStrs, strconv.Itoa(segment)) + } + return strings.Join(segStrs, ".") +} + +func parseVersion(sver string) (*version.Version, error) { + ver, err := version.NewVersion(strings.Split(sver, "-")[0]) + if err != nil { + return nil, fmt.Errorf("%v: %w", err, ErrMalformedVersion) + } + return ver, nil +} diff --git a/internal/pkg/ver/check_test.go b/internal/pkg/ver/check_test.go new file mode 100644 index 000000000..0a95c7477 --- /dev/null +++ b/internal/pkg/ver/check_test.go @@ -0,0 +1,104 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package ver + +import ( + "errors" + "testing" +) + +func TestCheckCompatibilityInternal(t *testing.T) { + tests := []struct { + name string + fleetVersion string + esVersion string + err error + }{ + { + name: "empty fleet and elasticsearch version", + fleetVersion: "", + esVersion: "", + err: ErrMalformedVersion, + }, + { + name: "empty fleet version", + fleetVersion: "", + esVersion: "8.0.0", + err: ErrMalformedVersion, + }, + { + name: "empty elasticsearch version", + fleetVersion: "7.13", + esVersion: "", + err: ErrMalformedVersion, + }, + { + name: "supported elasticsearch 713-713", + fleetVersion: "7.13.0", + esVersion: "7.13.0", + err: nil, + }, + { + name: "supported elasticsearch 7131-7132", + fleetVersion: "7.13.2", + esVersion: "7.13.1", + err: nil, + }, + { + name: "supported elasticsearch 713-714", + fleetVersion: "7.13.2", + esVersion: "7.14.2", + err: nil, + }, + { + name: "supported elasticsearch 715-800", + fleetVersion: "7.15.2", + esVersion: "8.0.0", + err: nil, + }, + { + name: "unsupported elasticsearch 714-713", + fleetVersion: "7.14.0", + esVersion: "7.13.1", + err: ErrUnsupportedVersion, + }, + { + name: "unsupported elasticsearch 800-718", + fleetVersion: "8.0.0", + esVersion: "7.18.0", + err: ErrUnsupportedVersion, + }, + { + name: "supported elasticsearch 800a1", + fleetVersion: "8.0.0-alpha1", + esVersion: "8.0.0-alpha1", + err: nil, + }, + { + name: "supported elasticsearch 715-800a1", + fleetVersion: "7.15.2", + esVersion: "8.0.0-alpha1", + err: nil, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + err := checkCompatibility(tc.fleetVersion, tc.esVersion) + if tc.err != nil { + if err == nil { + t.Error("expected error") + } else { + if !errors.Is(err, tc.err) { + t.Errorf("unexpected error kind: %v", err) + } + } + } else { + if err != nil { + t.Error("unexpected error") + } + } + }) + } +} diff --git a/main.go b/main.go index b1aec97ea..d2451366a 100644 --- a/main.go +++ b/main.go @@ -13,20 +13,24 @@ import ( "fmt" "os" - // Needed for the generator not to be nuked by go tidy. Fails make check otherwise. - _ "github.com/aleksmaus/generate" - "github.com/elastic/fleet-server/v7/cmd/fleet" + "github.com/elastic/fleet-server/v7/internal/pkg/build" ) -const defaultVersion = "8.0.0" +const defaultVersion = "7.16.1" var ( - Version string = defaultVersion + Version string = defaultVersion + Commit string + BuildTime string ) func main() { - cmd := fleet.NewCommand(Version) + cmd := fleet.NewCommand(build.Info{ + Version: Version, + Commit: Commit, + BuildTime: build.Time(BuildTime), + }) if err := cmd.Execute(); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) diff --git a/model/schema.json b/model/schema.json index 686c23124..1af57c407 100644 --- a/model/schema.json +++ b/model/schema.json @@ -1,6 +1,7 @@ { "$schema": "http://json-schema.org/draft-07/schema#", "definitions": { + "action": { "title": "Agent action", "description": "An Elastic Agent action", @@ -30,11 +31,19 @@ "format": "date-time" }, "type": { - "description": "The action type. APP_ACTION is the value for the actions that suppose to be routed to the endpoints/beats.", + "description": "The action type. INPUT_ACTION is the value for the actions that suppose to be routed to the endpoints/beats.", + "type": "string" + }, + "input_type": { + "description": "The input type the actions should be routed to.", "type": "string" }, - "input_id": { - "description": "The input identifier the actions should be routed to.", + "timeout": { + "description": "The optional action timeout in seconds", + "type": "integer" + }, + "user_id": { + "description": "The ID of the user who created the action.", "type": "string" }, "agents": { @@ -73,6 +82,26 @@ "description": "The action id.", "type": "string" }, + "started_at": { + "description": "Date/time the action was started", + "type": "string", + "format": "date-time" + }, + "completed_at": { + "description": "Date/time the action was completed", + "type": "string", + "format": "date-time" + }, + "action_data": { + "description": "The opaque payload.", + "type": "object", + "format": "raw" + }, + "action_response": { + "description": "The custom action response payload.", + "type": "object", + "format": "raw" + }, "error": { "description": "The action error message.", "type": "string" @@ -110,6 +139,67 @@ "version" ] }, + + "artifact": { + "title": "Artifact", + "description": "An artifact served by Fleet", + "type": "object", + "properties": { + "identifier": { + "description": "Human readable artifact identifier", + "type": "string" + }, + "compression_algorithm": { + "description": "Name of compression algorithm applied to artifact", + "type": "string" + }, + "encryption_algorithm": { + "description": "Name of encryption algorithm applied to artifact", + "type": "string" + }, + "encoded_sha256": { + "description": "SHA256 of artifact after encoding has been applied", + "type": "string" + }, + "encoded_size": { + "description": "Size of artifact after encoding has been applied", + "type": "integer" + }, + "decoded_sha256": { + "description": "SHA256 of artifact before encoding has been applied", + "type": "string" + }, + "decoded_size": { + "description": "Size of artifact before encoding has been applied", + "type": "integer" + }, + "created": { + "description": "Timestamp artifact was created", + "type": "string", + "format": "date-time" + }, + "body": { + "description": "Encoded artifact data", + "type": "object", + "format": "raw" + }, + "package_name": { + "description": "Name of the package that owns this artifact", + "type": "string" + } + }, + "required": [ + "identifier", + "compressionAlgorithm", + "encodedSha256", + "encodedSize", + "decodedSha256", + "decodedSize", + "created", + "body" + ] + }, + "host-metadata": { "title": "Host Metadata", "description": "The host metadata for the Elastic Agent", @@ -212,6 +302,10 @@ "default_fleet_server": { "description": "True when this policy is the default policy to start Fleet Server", "type": "boolean" + }, + "unenroll_timeout": { + "description": "Timeout (seconds) that an Elastic Agent should be un-enrolled.", + "type": "integer" } }, "required": [ @@ -274,6 +368,11 @@ "type": "string", "format": "date-time" }, + "unenrolled_reason": { + "description": "Reason the Elastic Agent was unenrolled", + "type": "string", + "enum": ["manual", "timeout"] + }, "unenrollment_started_at": { "description": "Date/time the Elastic Agent unenrolled started", "type": "string", @@ -317,6 +416,10 @@ "description": "The current policy coordinator for the Elastic Agent", "type": "integer" }, + "policy_output_permissions_hash": { + "description": "The policy output permissions hash", + "type": "string" + }, "last_updated": { "description": "Date/time the Elastic Agent was last updated", "type": "string", @@ -353,7 +456,10 @@ }, "action_seq_no": { "description": "The last acknowledged action sequence number for the Elastic Agent", - "type": "integer" + "type": "array", + "items": { + "type": "integer" + } } }, "required": [ diff --git a/systemd/fleet.service b/systemd/fleet.service deleted file mode 100644 index 9c64765f7..000000000 --- a/systemd/fleet.service +++ /dev/null @@ -1,13 +0,0 @@ -[Unit] -Description=Elastic Fleet Daemon -After=network.target -Requires=network.target - -[Service] -ExecStart=/usr/bin/fleet-server -c /usr/share/fleet/fleet-server.yml -Type=simple -Restart=always -RestartSec=3 -StartLimitInterval=40 -StartLimitBurst=10 -LimitNOFILE=999999