From 622f27579de2e5482a395992e0d047744692cf75 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Wed, 20 Jan 2021 10:07:14 -0500 Subject: [PATCH 001/240] Add cherrypick_pr dev-tools helper. (#67) (cherry picked from commit 50affd54c1d2a056b78ee421bd8979dbc24497db) --- dev-tools/cherrypick_pr | 209 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 209 insertions(+) create mode 100755 dev-tools/cherrypick_pr diff --git a/dev-tools/cherrypick_pr b/dev-tools/cherrypick_pr new file mode 100755 index 000000000..cfa73a0e6 --- /dev/null +++ b/dev-tools/cherrypick_pr @@ -0,0 +1,209 @@ +#!/usr/bin/env python3 +"""Cherry pick and backport a PR""" +from __future__ import print_function + +from builtins import input +import sys +import os +import argparse +from os.path import expanduser +import re +from subprocess import check_call, call, check_output +import requests + +usage = """ +Example usage: + +./dev-tools/cherrypick_pr --create_pr 5.0 2565 6490604aa0cf7fa61932a90700e6ca988fc8a527 + +In case of backporting errors, fix them, then run: + +git cherry-pick --continue +./dev-tools/cherrypick_pr --create_pr 5.0 2565 6490604aa0cf7fa61932a90700e6ca988fc8a527 --continue + +This script does the following: + +* cleanups both from_branch and to_branch (warning: drops local changes) +* creates a temporary branch named something like "branch_2565" +* calls the git cherry-pick command in this branch +* after fixing the merge errors (if needed), pushes the branch to your + remote +* if the --create_pr flag is used, it uses the GitHub API to create the PR + for you. Note that this requires you to have a Github token with the + public_repo scope in the `~/.elastic/github.token` file. This token + should be also authorized to Elastic organization so as to work with single-sign-on. + (see https://help.github.com/en/articles/authorizing-a-personal-access-token-for-use-with-saml-single-sign-on) + +Note that you need to take the commit hashes from `git log` on the +from_branch, copying the IDs from Github doesn't work in case we squashed the +PR. +""" + + +def main(): + """Main""" + parser = argparse.ArgumentParser( + description="Creates a PR for cherry-picking commits", + formatter_class=argparse.RawDescriptionHelpFormatter, + epilog=usage) + parser.add_argument("to_branch", + help="To branch (e.g 7.x)") + parser.add_argument("pr_number", + help="The PR number being merged (e.g. 2345)") + parser.add_argument("commit_hashes", metavar="hash", nargs="+", + help="The commit hashes to cherry pick." + + " You can specify multiple.") + parser.add_argument("--yes", action="store_true", + help="Assume yes. Warning: discards local changes.") + parser.add_argument("--continue", action="store_true", + help="Continue after fixing merging errors.") + parser.add_argument("--from_branch", default="master", + help="From branch") + parser.add_argument("--create_pr", action="store_true", + help="Create a PR using the Github API " + + "(requires token in ~/.elastic/github.token)") + parser.add_argument("--diff", action="store_true", + help="Display the diff before pushing the PR") + parser.add_argument("--remote", default="", + help="Which remote to push the backport branch to") + parser.add_argument("--zube-team", default="", + help="Team the PR belongs to") + parser.add_argument("--keep-backport-label", action="store_true", + help="Preserve label needs_backport in original PR") + args = parser.parse_args() + + print(args) + + tmp_branch = "backport_{}_{}".format(args.pr_number, args.to_branch) + + if not vars(args)["continue"]: + if not args.yes and input("This will destroy all local changes. " + + "Continue? [y/n]: ") != "y": + return 1 + check_call("git reset --hard", shell=True) + check_call("git clean -df", shell=True) + check_call("git fetch", shell=True) + + check_call("git checkout {}".format(args.from_branch), shell=True) + check_call("git pull", shell=True) + + check_call("git checkout {}".format(args.to_branch), shell=True) + check_call("git pull", shell=True) + + call("git branch -D {} > /dev/null".format(tmp_branch), shell=True) + check_call("git checkout -b {}".format(tmp_branch), shell=True) + if call("git cherry-pick -x {}".format(" ".join(args.commit_hashes)), + shell=True) != 0: + print("Looks like you have cherry-pick errors.") + print("Fix them, then run: ") + print(" git cherry-pick --continue") + print(" {} --continue".format(" ".join(sys.argv))) + return 1 + + if len(check_output("git status -s", shell=True).strip()) > 0: + print("Looks like you have uncommitted changes." + + " Please execute first: git cherry-pick --continue") + return 1 + + if len(check_output("git log HEAD...{}".format(args.to_branch), + shell=True).strip()) == 0: + print("No commit to push") + return 1 + + if args.diff: + call("git diff {}".format(args.to_branch), shell=True) + if input("Continue? [y/n]: ") != "y": + print("Aborting cherry-pick.") + return 1 + + print("Ready to push branch.") + + remote = args.remote + if not remote: + remote = input("To which remote should I push? (your fork): ") + + call("git push {} :{} > /dev/null".format(remote, tmp_branch), + shell=True) + check_call("git push --set-upstream {} {}" + .format(remote, tmp_branch), shell=True) + if not args.create_pr: + print("Done. Open PR by following this URL: \n\t" + + "https://github.com/elastic/fleet-server/compare/{}...{}:{}?expand=1" + .format(args.to_branch, remote, tmp_branch)) + else: + token = open(expanduser("~/.elastic/github.token"), "r").read().strip() + base = "https://api.github.com/repos/elastic/fleet-server" + session = requests.Session() + session.headers.update({"Authorization": "token " + token}) + + original_pr = session.get(base + "/pulls/" + args.pr_number).json() + + # get the github username from the remote where we pushed + remote_url = check_output("git remote get-url {}".format(remote), + shell=True) + remote_user = re.search("github.com[:/](.+)/fleet-server", str(remote_url)).group(1) + + # create PR + request = session.post(base + "/pulls", json=dict( + title="Cherry-pick #{} to {}: {}".format(args.pr_number, args.to_branch, original_pr["title"]), + head=remote_user + ":" + tmp_branch, + base=args.to_branch, + body="Cherry-pick of PR #{} to {} branch. Original message: \n\n{}" + .format(args.pr_number, args.to_branch, original_pr["body"]) + )) + if request.status_code > 299: + print("Creating PR failed: {}".format(request.json())) + sys.exit(1) + new_pr = request.json() + + # add labels + labels = ["backport"] + + zube_teams = zube_team_labels(original_pr) + if args.zube_team: + resp = session.get(base + "/labels/Team:"+args.zube_team) + if resp.status_code != 200: + print("Cannot find team label", resp.text) + sys.exit(1) + zube_teams = ["Team:" + args.zube_team] + + if len(zube_teams) > 0: + labels += zube_teams + labels.append("[zube]: In Review") + else: + labels.append("review") + + session.post( + base + "/issues/{}/labels".format(new_pr["number"]), json=labels) + + if not args.keep_backport_label: + # remove needs backport label from the original PR + session.delete(base + "/issues/{}/labels/needs_backport".format(args.pr_number)) + + # get version and set a version label on the original PR + version = get_version(os.getcwd()) + if version: + session.post( + base + "/issues/{}/labels".format(args.pr_number), json=["v" + version]) + + print("\nDone. PR created: {}".format(new_pr["html_url"])) + print("Please go and check it and add the review tags") + +def get_version(repo_dir): + pattern = re.compile(r'(const\s|)\w*(v|V)ersion\s=\s"(?P.*)"') + with open(os.path.join(repo_dir, "main.go"), "r") as f: + for line in f: + match = pattern.match(line) + if match: + return match.group('version') + +def zube_team_labels(pr): + teams = [] + for label in pr.get('labels', []): + name = label.get('name', '') + if name.startswith('Team:'): + teams.append(name) + return teams + +if __name__ == "__main__": + sys.exit(main()) From bea660f135fb25283d123ef86cd373df227b81ca Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Wed, 20 Jan 2021 11:29:31 -0500 Subject: [PATCH 002/240] Set version to 7.12.0. (#69) --- main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main.go b/main.go index b1aec97ea..13bd1d067 100644 --- a/main.go +++ b/main.go @@ -19,7 +19,7 @@ import ( "github.com/elastic/fleet-server/v7/cmd/fleet" ) -const defaultVersion = "8.0.0" +const defaultVersion = "7.12.0" var ( Version string = defaultVersion From f6bebab816ddd54c9c1c9d33ae816ddcf49042df Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Thu, 28 Jan 2021 17:06:27 -0500 Subject: [PATCH 003/240] Add communication to Elastic Agent (#71) (#75) * Add communication to Elastic Agent. * Fix command-line args and config from Agent. * Fix tests. * Handle config errors on initial config from Agent. * Log to err on failure. * Add syncing of the log writter. * Add docstring. * Re-structure the log init. * Remove unused code in logger. * Fix logging of failed start. * Don't become a leader until fleet.agent.id is set. * Fixes from code review. (cherry picked from commit d40570d79808bc3857488b567333482f530d4935) --- .gitignore | 3 +- NOTICE.txt | 466 +++++++++--------- cmd/fleet/main.go | 324 +++++++++++- cmd/fleet/server.go | 2 +- cmd/fleet/server_integration_test.go | 3 +- .../integration/wait-for-elasticsearch.sh | 4 +- fleet-server.yml | 3 + go.mod | 1 + go.sum | 8 + internal/pkg/config/config.go | 97 +++- internal/pkg/config/config_test.go | 48 +- internal/pkg/config/fleet.go | 12 +- internal/pkg/config/input.go | 6 + internal/pkg/config/logging.go | 68 +++ internal/pkg/config/output.go | 3 - internal/pkg/coordinator/monitor.go | 14 +- internal/pkg/logger/logger.go | 147 ++++-- internal/pkg/policy/monitor.go | 2 +- internal/pkg/policy/self.go | 202 ++++++++ internal/pkg/policy/self_test.go | 426 ++++++++++++++++ internal/pkg/status/reporter.go | 53 ++ internal/pkg/testing/retry.go | 4 +- 22 files changed, 1543 insertions(+), 353 deletions(-) create mode 100644 internal/pkg/config/logging.go create mode 100644 internal/pkg/policy/self.go create mode 100644 internal/pkg/policy/self_test.go create mode 100644 internal/pkg/status/reporter.go diff --git a/.gitignore b/.gitignore index 5efb2dd97..30820b819 100644 --- a/.gitignore +++ b/.gitignore @@ -4,9 +4,10 @@ .vscode/ bin/ -*.rpm build/ fleet-server fleet_server fleet-server.dev.yml +*.log +*.log.* diff --git a/NOTICE.txt b/NOTICE.txt index 334b5810d..af04cc594 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -249,6 +249,239 @@ binaries that contain `-oss` in the artifact name are licensed under the Apache License Version 2.0. +-------------------------------------------------------------------------------- +Dependency : github.com/elastic/elastic-agent-client/v7 +Version: v7.0.0-20200709172729-d43b7ad5833a +Licence type (autodetected): Elastic +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-client/v7@v7.0.0-20200709172729-d43b7ad5833a/LICENSE.txt: + +ELASTIC LICENSE AGREEMENT + +PLEASE READ CAREFULLY THIS ELASTIC LICENSE AGREEMENT (THIS "AGREEMENT"), WHICH +CONSTITUTES A LEGALLY BINDING AGREEMENT AND GOVERNS ALL OF YOUR USE OF ALL OF +THE ELASTIC SOFTWARE WITH WHICH THIS AGREEMENT IS INCLUDED ("ELASTIC SOFTWARE") +THAT IS PROVIDED IN OBJECT CODE FORMAT, AND, IN ACCORDANCE WITH SECTION 2 BELOW, +CERTAIN OF THE ELASTIC SOFTWARE THAT IS PROVIDED IN SOURCE CODE FORMAT. BY +INSTALLING OR USING ANY OF THE ELASTIC SOFTWARE GOVERNED BY THIS AGREEMENT, YOU +ARE ASSENTING TO THE TERMS AND CONDITIONS OF THIS AGREEMENT. IF YOU DO NOT AGREE +WITH SUCH TERMS AND CONDITIONS, YOU MAY NOT INSTALL OR USE THE ELASTIC SOFTWARE +GOVERNED BY THIS AGREEMENT. IF YOU ARE INSTALLING OR USING THE SOFTWARE ON +BEHALF OF A LEGAL ENTITY, YOU REPRESENT AND WARRANT THAT YOU HAVE THE ACTUAL +AUTHORITY TO AGREE TO THE TERMS AND CONDITIONS OF THIS AGREEMENT ON BEHALF OF +SUCH ENTITY. + +Posted Date: April 20, 2018 + +This Agreement is entered into by and between Elasticsearch BV ("Elastic") and +You, or the legal entity on behalf of whom You are acting (as applicable, +"You"). + +1. OBJECT CODE END USER LICENSES, RESTRICTIONS AND THIRD PARTY OPEN SOURCE +SOFTWARE + + 1.1 Object Code End User License. Subject to the terms and conditions of + Section 1.2 of this Agreement, Elastic hereby grants to You, AT NO CHARGE and + for so long as you are not in breach of any provision of this Agreement, a + License to the Basic Features and Functions of the Elastic Software. + + 1.2 Reservation of Rights; Restrictions. As between Elastic and You, Elastic + and its licensors own all right, title and interest in and to the Elastic + Software, and except as expressly set forth in Sections 1.1, and 2.1 of this + Agreement, no other license to the Elastic Software is granted to You under + this Agreement, by implication, estoppel or otherwise. You agree not to: (i) + reverse engineer or decompile, decrypt, disassemble or otherwise reduce any + Elastic Software provided to You in Object Code, or any portion thereof, to + Source Code, except and only to the extent any such restriction is prohibited + by applicable law, (ii) except as expressly permitted in this Agreement, + prepare derivative works from, modify, copy or use the Elastic Software Object + Code or the Commercial Software Source Code in any manner; (iii) except as + expressly permitted in Section 1.1 above, transfer, sell, rent, lease, + distribute, sublicense, loan or otherwise transfer, Elastic Software Object + Code, in whole or in part, to any third party; (iv) use Elastic Software + Object Code for providing time-sharing services, any software-as-a-service, + service bureau services or as part of an application services provider or + other service offering (collectively, "SaaS Offering") where obtaining access + to the Elastic Software or the features and functions of the Elastic Software + is a primary reason or substantial motivation for users of the SaaS Offering + to access and/or use the SaaS Offering ("Prohibited SaaS Offering"); (v) + circumvent the limitations on use of Elastic Software provided to You in + Object Code format that are imposed or preserved by any License Key, or (vi) + alter or remove any Marks and Notices in the Elastic Software. If You have any + question as to whether a specific SaaS Offering constitutes a Prohibited SaaS + Offering, or are interested in obtaining Elastic's permission to engage in + commercial or non-commercial distribution of the Elastic Software, please + contact elastic_license@elastic.co. + + 1.3 Third Party Open Source Software. The Commercial Software may contain or + be provided with third party open source libraries, components, utilities and + other open source software (collectively, "Open Source Software"), which Open + Source Software may have applicable license terms as identified on a website + designated by Elastic. Notwithstanding anything to the contrary herein, use of + the Open Source Software shall be subject to the license terms and conditions + applicable to such Open Source Software, to the extent required by the + applicable licensor (which terms shall not restrict the license rights granted + to You hereunder, but may contain additional rights). To the extent any + condition of this Agreement conflicts with any license to the Open Source + Software, the Open Source Software license will govern with respect to such + Open Source Software only. Elastic may also separately provide you with + certain open source software that is licensed by Elastic. Your use of such + Elastic open source software will not be governed by this Agreement, but by + the applicable open source license terms. + +2. COMMERCIAL SOFTWARE SOURCE CODE + + 2.1 Limited License. Subject to the terms and conditions of Section 2.2 of + this Agreement, Elastic hereby grants to You, AT NO CHARGE and for so long as + you are not in breach of any provision of this Agreement, a limited, + non-exclusive, non-transferable, fully paid up royalty free right and license + to the Commercial Software in Source Code format, without the right to grant + or authorize sublicenses, to prepare Derivative Works of the Commercial + Software, provided You (i) do not hack the licensing mechanism, or otherwise + circumvent the intended limitations on the use of Elastic Software to enable + features other than Basic Features and Functions or those features You are + entitled to as part of a Subscription, and (ii) use the resulting object code + only for reasonable testing purposes. + + 2.2 Restrictions. Nothing in Section 2.1 grants You the right to (i) use the + Commercial Software Source Code other than in accordance with Section 2.1 + above, (ii) use a Derivative Work of the Commercial Software outside of a + Non-production Environment, in any production capacity, on a temporary or + permanent basis, or (iii) transfer, sell, rent, lease, distribute, sublicense, + loan or otherwise make available the Commercial Software Source Code, in whole + or in part, to any third party. Notwithstanding the foregoing, You may + maintain a copy of the repository in which the Source Code of the Commercial + Software resides and that copy may be publicly accessible, provided that you + include this Agreement with Your copy of the repository. + +3. TERMINATION + + 3.1 Termination. This Agreement will automatically terminate, whether or not + You receive notice of such Termination from Elastic, if You breach any of its + provisions. + + 3.2 Post Termination. Upon any termination of this Agreement, for any reason, + You shall promptly cease the use of the Elastic Software in Object Code format + and cease use of the Commercial Software in Source Code format. For the + avoidance of doubt, termination of this Agreement will not affect Your right + to use Elastic Software, in either Object Code or Source Code formats, made + available under the Apache License Version 2.0. + + 3.3 Survival. Sections 1.2, 2.2. 3.3, 4 and 5 shall survive any termination or + expiration of this Agreement. + +4. DISCLAIMER OF WARRANTIES AND LIMITATION OF LIABILITY + + 4.1 Disclaimer of Warranties. TO THE MAXIMUM EXTENT PERMITTED UNDER APPLICABLE + LAW, THE ELASTIC SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, + AND ELASTIC AND ITS LICENSORS MAKE NO WARRANTIES WHETHER EXPRESSED, IMPLIED OR + STATUTORY REGARDING OR RELATING TO THE ELASTIC SOFTWARE. TO THE MAXIMUM EXTENT + PERMITTED UNDER APPLICABLE LAW, ELASTIC AND ITS LICENSORS SPECIFICALLY + DISCLAIM ALL IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR + PURPOSE AND NON-INFRINGEMENT WITH RESPECT TO THE ELASTIC SOFTWARE, AND WITH + RESPECT TO THE USE OF THE FOREGOING. FURTHER, ELASTIC DOES NOT WARRANT RESULTS + OF USE OR THAT THE ELASTIC SOFTWARE WILL BE ERROR FREE OR THAT THE USE OF THE + ELASTIC SOFTWARE WILL BE UNINTERRUPTED. + + 4.2 Limitation of Liability. IN NO EVENT SHALL ELASTIC OR ITS LICENSORS BE + LIABLE TO YOU OR ANY THIRD PARTY FOR ANY DIRECT OR INDIRECT DAMAGES, + INCLUDING, WITHOUT LIMITATION, FOR ANY LOSS OF PROFITS, LOSS OF USE, BUSINESS + INTERRUPTION, LOSS OF DATA, COST OF SUBSTITUTE GOODS OR SERVICES, OR FOR ANY + SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, IN CONNECTION WITH + OR ARISING OUT OF THE USE OR INABILITY TO USE THE ELASTIC SOFTWARE, OR THE + PERFORMANCE OF OR FAILURE TO PERFORM THIS AGREEMENT, WHETHER ALLEGED AS A + BREACH OF CONTRACT OR TORTIOUS CONDUCT, INCLUDING NEGLIGENCE, EVEN IF ELASTIC + HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +5. MISCELLANEOUS + + This Agreement completely and exclusively states the entire agreement of the + parties regarding the subject matter herein, and it supersedes, and its terms + govern, all prior proposals, agreements, or other communications between the + parties, oral or written, regarding such subject matter. This Agreement may be + modified by Elastic from time to time, and any such modifications will be + effective upon the "Posted Date" set forth at the top of the modified + Agreement. If any provision hereof is held unenforceable, this Agreement will + continue without said provision and be interpreted to reflect the original + intent of the parties. This Agreement and any non-contractual obligation + arising out of or in connection with it, is governed exclusively by Dutch law. + This Agreement shall not be governed by the 1980 UN Convention on Contracts + for the International Sale of Goods. All disputes arising out of or in + connection with this Agreement, including its existence and validity, shall be + resolved by the courts with jurisdiction in Amsterdam, The Netherlands, except + where mandatory law provides for the courts at another location in The + Netherlands to have jurisdiction. The parties hereby irrevocably waive any and + all claims and defenses either might otherwise have in any such action or + proceeding in any of such courts based upon any alleged lack of personal + jurisdiction, improper venue, forum non conveniens or any similar claim or + defense. A breach or threatened breach, by You of Section 2 may cause + irreparable harm for which damages at law may not provide adequate relief, and + therefore Elastic shall be entitled to seek injunctive relief without being + required to post a bond. You may not assign this Agreement (including by + operation of law in connection with a merger or acquisition), in whole or in + part to any third party without the prior written consent of Elastic, which + may be withheld or granted by Elastic in its sole and absolute discretion. + Any assignment in violation of the preceding sentence is void. Notices to + Elastic may also be sent to legal@elastic.co. + +6. DEFINITIONS + + The following terms have the meanings ascribed: + + 6.1 "Affiliate" means, with respect to a party, any entity that controls, is + controlled by, or which is under common control with, such party, where + "control" means ownership of at least fifty percent (50%) of the outstanding + voting shares of the entity, or the contractual right to establish policy for, + and manage the operations of, the entity. + + 6.2 "Basic Features and Functions" means those features and functions of the + Elastic Software that are eligible for use under a Basic license, as set forth + at https://www.elastic.co/subscriptions, as may be modified by Elastic from + time to time. + + 6.3 "Commercial Software" means the Elastic Software Source Code in any file + containing a header stating the contents are subject to the Elastic License or + which is contained in the repository folder labeled "x-pack", unless a LICENSE + file present in the directory subtree declares a different license. + + 6.4 "Derivative Work of the Commercial Software" means, for purposes of this + Agreement, any modification(s) or enhancement(s) to the Commercial Software, + which represent, as a whole, an original work of authorship. + + 6.5 "License" means a limited, non-exclusive, non-transferable, fully paid up, + royalty free, right and license, without the right to grant or authorize + sublicenses, solely for Your internal business operations to (i) install and + use the applicable Features and Functions of the Elastic Software in Object + Code, and (ii) permit Contractors and Your Affiliates to use the Elastic + software as set forth in (i) above, provided that such use by Contractors must + be solely for Your benefit and/or the benefit of Your Affiliates, and You + shall be responsible for all acts and omissions of such Contractors and + Affiliates in connection with their use of the Elastic software that are + contrary to the terms and conditions of this Agreement. + + 6.6 "License Key" means a sequence of bytes, including but not limited to a + JSON blob, that is used to enable certain features and functions of the + Elastic Software. + + 6.7 "Marks and Notices" means all Elastic trademarks, trade names, logos and + notices present on the Documentation as originally provided by Elastic. + + 6.8 "Non-production Environment" means an environment for development, testing + or quality assurance, where software is not used for production purposes. + + 6.9 "Object Code" means any form resulting from mechanical transformation or + translation of Source Code form, including but not limited to compiled object + code, generated documentation, and conversions to other media types. + + 6.10 "Source Code" means the preferred form of computer software for making + modifications, including but not limited to software source code, + documentation source, and configuration files. + + 6.11 "Subscription" means the right to receive Support Services and a License + to the Commercial Software. + + -------------------------------------------------------------------------------- Dependency : github.com/elastic/go-elasticsearch/v8 Version: v8.0.0-20200728144331-527225d8e836 @@ -13938,239 +14171,6 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/ecs@v1.6.0/LICE limitations under the License. --------------------------------------------------------------------------------- -Dependency : github.com/elastic/elastic-agent-client/v7 -Version: v7.0.0-20200709172729-d43b7ad5833a -Licence type (autodetected): Elastic --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-client/v7@v7.0.0-20200709172729-d43b7ad5833a/LICENSE.txt: - -ELASTIC LICENSE AGREEMENT - -PLEASE READ CAREFULLY THIS ELASTIC LICENSE AGREEMENT (THIS "AGREEMENT"), WHICH -CONSTITUTES A LEGALLY BINDING AGREEMENT AND GOVERNS ALL OF YOUR USE OF ALL OF -THE ELASTIC SOFTWARE WITH WHICH THIS AGREEMENT IS INCLUDED ("ELASTIC SOFTWARE") -THAT IS PROVIDED IN OBJECT CODE FORMAT, AND, IN ACCORDANCE WITH SECTION 2 BELOW, -CERTAIN OF THE ELASTIC SOFTWARE THAT IS PROVIDED IN SOURCE CODE FORMAT. BY -INSTALLING OR USING ANY OF THE ELASTIC SOFTWARE GOVERNED BY THIS AGREEMENT, YOU -ARE ASSENTING TO THE TERMS AND CONDITIONS OF THIS AGREEMENT. IF YOU DO NOT AGREE -WITH SUCH TERMS AND CONDITIONS, YOU MAY NOT INSTALL OR USE THE ELASTIC SOFTWARE -GOVERNED BY THIS AGREEMENT. IF YOU ARE INSTALLING OR USING THE SOFTWARE ON -BEHALF OF A LEGAL ENTITY, YOU REPRESENT AND WARRANT THAT YOU HAVE THE ACTUAL -AUTHORITY TO AGREE TO THE TERMS AND CONDITIONS OF THIS AGREEMENT ON BEHALF OF -SUCH ENTITY. - -Posted Date: April 20, 2018 - -This Agreement is entered into by and between Elasticsearch BV ("Elastic") and -You, or the legal entity on behalf of whom You are acting (as applicable, -"You"). - -1. OBJECT CODE END USER LICENSES, RESTRICTIONS AND THIRD PARTY OPEN SOURCE -SOFTWARE - - 1.1 Object Code End User License. Subject to the terms and conditions of - Section 1.2 of this Agreement, Elastic hereby grants to You, AT NO CHARGE and - for so long as you are not in breach of any provision of this Agreement, a - License to the Basic Features and Functions of the Elastic Software. - - 1.2 Reservation of Rights; Restrictions. As between Elastic and You, Elastic - and its licensors own all right, title and interest in and to the Elastic - Software, and except as expressly set forth in Sections 1.1, and 2.1 of this - Agreement, no other license to the Elastic Software is granted to You under - this Agreement, by implication, estoppel or otherwise. You agree not to: (i) - reverse engineer or decompile, decrypt, disassemble or otherwise reduce any - Elastic Software provided to You in Object Code, or any portion thereof, to - Source Code, except and only to the extent any such restriction is prohibited - by applicable law, (ii) except as expressly permitted in this Agreement, - prepare derivative works from, modify, copy or use the Elastic Software Object - Code or the Commercial Software Source Code in any manner; (iii) except as - expressly permitted in Section 1.1 above, transfer, sell, rent, lease, - distribute, sublicense, loan or otherwise transfer, Elastic Software Object - Code, in whole or in part, to any third party; (iv) use Elastic Software - Object Code for providing time-sharing services, any software-as-a-service, - service bureau services or as part of an application services provider or - other service offering (collectively, "SaaS Offering") where obtaining access - to the Elastic Software or the features and functions of the Elastic Software - is a primary reason or substantial motivation for users of the SaaS Offering - to access and/or use the SaaS Offering ("Prohibited SaaS Offering"); (v) - circumvent the limitations on use of Elastic Software provided to You in - Object Code format that are imposed or preserved by any License Key, or (vi) - alter or remove any Marks and Notices in the Elastic Software. If You have any - question as to whether a specific SaaS Offering constitutes a Prohibited SaaS - Offering, or are interested in obtaining Elastic's permission to engage in - commercial or non-commercial distribution of the Elastic Software, please - contact elastic_license@elastic.co. - - 1.3 Third Party Open Source Software. The Commercial Software may contain or - be provided with third party open source libraries, components, utilities and - other open source software (collectively, "Open Source Software"), which Open - Source Software may have applicable license terms as identified on a website - designated by Elastic. Notwithstanding anything to the contrary herein, use of - the Open Source Software shall be subject to the license terms and conditions - applicable to such Open Source Software, to the extent required by the - applicable licensor (which terms shall not restrict the license rights granted - to You hereunder, but may contain additional rights). To the extent any - condition of this Agreement conflicts with any license to the Open Source - Software, the Open Source Software license will govern with respect to such - Open Source Software only. Elastic may also separately provide you with - certain open source software that is licensed by Elastic. Your use of such - Elastic open source software will not be governed by this Agreement, but by - the applicable open source license terms. - -2. COMMERCIAL SOFTWARE SOURCE CODE - - 2.1 Limited License. Subject to the terms and conditions of Section 2.2 of - this Agreement, Elastic hereby grants to You, AT NO CHARGE and for so long as - you are not in breach of any provision of this Agreement, a limited, - non-exclusive, non-transferable, fully paid up royalty free right and license - to the Commercial Software in Source Code format, without the right to grant - or authorize sublicenses, to prepare Derivative Works of the Commercial - Software, provided You (i) do not hack the licensing mechanism, or otherwise - circumvent the intended limitations on the use of Elastic Software to enable - features other than Basic Features and Functions or those features You are - entitled to as part of a Subscription, and (ii) use the resulting object code - only for reasonable testing purposes. - - 2.2 Restrictions. Nothing in Section 2.1 grants You the right to (i) use the - Commercial Software Source Code other than in accordance with Section 2.1 - above, (ii) use a Derivative Work of the Commercial Software outside of a - Non-production Environment, in any production capacity, on a temporary or - permanent basis, or (iii) transfer, sell, rent, lease, distribute, sublicense, - loan or otherwise make available the Commercial Software Source Code, in whole - or in part, to any third party. Notwithstanding the foregoing, You may - maintain a copy of the repository in which the Source Code of the Commercial - Software resides and that copy may be publicly accessible, provided that you - include this Agreement with Your copy of the repository. - -3. TERMINATION - - 3.1 Termination. This Agreement will automatically terminate, whether or not - You receive notice of such Termination from Elastic, if You breach any of its - provisions. - - 3.2 Post Termination. Upon any termination of this Agreement, for any reason, - You shall promptly cease the use of the Elastic Software in Object Code format - and cease use of the Commercial Software in Source Code format. For the - avoidance of doubt, termination of this Agreement will not affect Your right - to use Elastic Software, in either Object Code or Source Code formats, made - available under the Apache License Version 2.0. - - 3.3 Survival. Sections 1.2, 2.2. 3.3, 4 and 5 shall survive any termination or - expiration of this Agreement. - -4. DISCLAIMER OF WARRANTIES AND LIMITATION OF LIABILITY - - 4.1 Disclaimer of Warranties. TO THE MAXIMUM EXTENT PERMITTED UNDER APPLICABLE - LAW, THE ELASTIC SOFTWARE IS PROVIDED "AS IS" WITHOUT WARRANTY OF ANY KIND, - AND ELASTIC AND ITS LICENSORS MAKE NO WARRANTIES WHETHER EXPRESSED, IMPLIED OR - STATUTORY REGARDING OR RELATING TO THE ELASTIC SOFTWARE. TO THE MAXIMUM EXTENT - PERMITTED UNDER APPLICABLE LAW, ELASTIC AND ITS LICENSORS SPECIFICALLY - DISCLAIM ALL IMPLIED WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR - PURPOSE AND NON-INFRINGEMENT WITH RESPECT TO THE ELASTIC SOFTWARE, AND WITH - RESPECT TO THE USE OF THE FOREGOING. FURTHER, ELASTIC DOES NOT WARRANT RESULTS - OF USE OR THAT THE ELASTIC SOFTWARE WILL BE ERROR FREE OR THAT THE USE OF THE - ELASTIC SOFTWARE WILL BE UNINTERRUPTED. - - 4.2 Limitation of Liability. IN NO EVENT SHALL ELASTIC OR ITS LICENSORS BE - LIABLE TO YOU OR ANY THIRD PARTY FOR ANY DIRECT OR INDIRECT DAMAGES, - INCLUDING, WITHOUT LIMITATION, FOR ANY LOSS OF PROFITS, LOSS OF USE, BUSINESS - INTERRUPTION, LOSS OF DATA, COST OF SUBSTITUTE GOODS OR SERVICES, OR FOR ANY - SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES OF ANY KIND, IN CONNECTION WITH - OR ARISING OUT OF THE USE OR INABILITY TO USE THE ELASTIC SOFTWARE, OR THE - PERFORMANCE OF OR FAILURE TO PERFORM THIS AGREEMENT, WHETHER ALLEGED AS A - BREACH OF CONTRACT OR TORTIOUS CONDUCT, INCLUDING NEGLIGENCE, EVEN IF ELASTIC - HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. - -5. MISCELLANEOUS - - This Agreement completely and exclusively states the entire agreement of the - parties regarding the subject matter herein, and it supersedes, and its terms - govern, all prior proposals, agreements, or other communications between the - parties, oral or written, regarding such subject matter. This Agreement may be - modified by Elastic from time to time, and any such modifications will be - effective upon the "Posted Date" set forth at the top of the modified - Agreement. If any provision hereof is held unenforceable, this Agreement will - continue without said provision and be interpreted to reflect the original - intent of the parties. This Agreement and any non-contractual obligation - arising out of or in connection with it, is governed exclusively by Dutch law. - This Agreement shall not be governed by the 1980 UN Convention on Contracts - for the International Sale of Goods. All disputes arising out of or in - connection with this Agreement, including its existence and validity, shall be - resolved by the courts with jurisdiction in Amsterdam, The Netherlands, except - where mandatory law provides for the courts at another location in The - Netherlands to have jurisdiction. The parties hereby irrevocably waive any and - all claims and defenses either might otherwise have in any such action or - proceeding in any of such courts based upon any alleged lack of personal - jurisdiction, improper venue, forum non conveniens or any similar claim or - defense. A breach or threatened breach, by You of Section 2 may cause - irreparable harm for which damages at law may not provide adequate relief, and - therefore Elastic shall be entitled to seek injunctive relief without being - required to post a bond. You may not assign this Agreement (including by - operation of law in connection with a merger or acquisition), in whole or in - part to any third party without the prior written consent of Elastic, which - may be withheld or granted by Elastic in its sole and absolute discretion. - Any assignment in violation of the preceding sentence is void. Notices to - Elastic may also be sent to legal@elastic.co. - -6. DEFINITIONS - - The following terms have the meanings ascribed: - - 6.1 "Affiliate" means, with respect to a party, any entity that controls, is - controlled by, or which is under common control with, such party, where - "control" means ownership of at least fifty percent (50%) of the outstanding - voting shares of the entity, or the contractual right to establish policy for, - and manage the operations of, the entity. - - 6.2 "Basic Features and Functions" means those features and functions of the - Elastic Software that are eligible for use under a Basic license, as set forth - at https://www.elastic.co/subscriptions, as may be modified by Elastic from - time to time. - - 6.3 "Commercial Software" means the Elastic Software Source Code in any file - containing a header stating the contents are subject to the Elastic License or - which is contained in the repository folder labeled "x-pack", unless a LICENSE - file present in the directory subtree declares a different license. - - 6.4 "Derivative Work of the Commercial Software" means, for purposes of this - Agreement, any modification(s) or enhancement(s) to the Commercial Software, - which represent, as a whole, an original work of authorship. - - 6.5 "License" means a limited, non-exclusive, non-transferable, fully paid up, - royalty free, right and license, without the right to grant or authorize - sublicenses, solely for Your internal business operations to (i) install and - use the applicable Features and Functions of the Elastic Software in Object - Code, and (ii) permit Contractors and Your Affiliates to use the Elastic - software as set forth in (i) above, provided that such use by Contractors must - be solely for Your benefit and/or the benefit of Your Affiliates, and You - shall be responsible for all acts and omissions of such Contractors and - Affiliates in connection with their use of the Elastic software that are - contrary to the terms and conditions of this Agreement. - - 6.6 "License Key" means a sequence of bytes, including but not limited to a - JSON blob, that is used to enable certain features and functions of the - Elastic Software. - - 6.7 "Marks and Notices" means all Elastic trademarks, trade names, logos and - notices present on the Documentation as originally provided by Elastic. - - 6.8 "Non-production Environment" means an environment for development, testing - or quality assurance, where software is not used for production purposes. - - 6.9 "Object Code" means any form resulting from mechanical transformation or - translation of Source Code form, including but not limited to compiled object - code, generated documentation, and conversions to other media types. - - 6.10 "Source Code" means the preferred form of computer software for making - modifications, including but not limited to software source code, - documentation source, and configuration files. - - 6.11 "Subscription" means the right to receive Support Services and a License - to the Commercial Software. - - -------------------------------------------------------------------------------- Dependency : github.com/elastic/go-concert Version: v0.0.4 diff --git a/cmd/fleet/main.go b/cmd/fleet/main.go index 2c0e1abd2..a197ec178 100644 --- a/cmd/fleet/main.go +++ b/cmd/fleet/main.go @@ -6,6 +6,12 @@ package fleet import ( "context" + "fmt" + "github.com/elastic/go-ucfg" + "github.com/elastic/go-ucfg/yaml" + "io" + "os" + "sync" "time" "github.com/elastic/fleet-server/v7/internal/pkg/action" @@ -21,22 +27,23 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/monitor" "github.com/elastic/fleet-server/v7/internal/pkg/policy" "github.com/elastic/fleet-server/v7/internal/pkg/profile" + "github.com/elastic/fleet-server/v7/internal/pkg/reload" "github.com/elastic/fleet-server/v7/internal/pkg/saved" "github.com/elastic/fleet-server/v7/internal/pkg/signal" + "github.com/elastic/fleet-server/v7/internal/pkg/status" + "github.com/elastic/elastic-agent-client/v7/pkg/client" + "github.com/elastic/elastic-agent-client/v7/pkg/proto" "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/spf13/cobra" "golang.org/x/sync/errgroup" ) -const kPolicyThrottle = time.Millisecond * 5 - -func checkErr(err error) { - if err != nil && err != context.Canceled { - panic(err) - } -} +const ( + kPolicyThrottle = time.Millisecond * 5 + kAgentMode = "agent-mode" +) func savedObjectKey() string { key := env.GetStr( @@ -54,26 +61,75 @@ func installSignalHandler() context.Context { func getRunCommand(version string) func(cmd *cobra.Command, args []string) error { return func(cmd *cobra.Command, args []string) error { - - cfgPath, err := cmd.Flags().GetString("config") + c, err := cache.New() if err != nil { return err } - cfg, err := config.LoadFile(cfgPath) + + cfgObject := cmd.Flags().Lookup("E").Value.(*config.Flag) + cliCfg := cfgObject.Config() + + agentMode, err := cmd.Flags().GetBool(kAgentMode) if err != nil { return err } - logger.Init(cfg) + var l *logger.Logger + var runErr error + if agentMode { + cfg, err := config.FromConfig(cliCfg) + if err != nil { + return err + } + l, err = logger.Init(cfg) + if err != nil { + return err + } - ctx := installSignalHandler() - c, err := cache.New() - checkErr(err) + agent, err := NewAgentMode(cliCfg, os.Stdin, c, version, l) + if err != nil { + return err + } - srv, err := NewFleetServer(cfg, c, version) - checkErr(err) + runErr = agent.Run(installSignalHandler()) + } else { + cfgPath, err := cmd.Flags().GetString("config") + if err != nil { + return err + } + cfgData, err := yaml.NewConfigWithFile(cfgPath, config.DefaultOptions...) + if err != nil { + return err + } + err = cfgData.Merge(cliCfg, config.DefaultOptions...) + if err != nil { + return err + } + cfg, err := config.FromConfig(cfgData) + if err != nil { + return err + } + + l, err = logger.Init(cfg) + if err != nil { + return err + } - return srv.Run(ctx) + srv, err := NewFleetServer(cfg, c, version, status.NewLog()) + if err != nil { + return err + } + + runErr = srv.Run(installSignalHandler()) + } + + if runErr != nil && runErr != context.Canceled { + log.Error().Err(runErr).Msg("Exiting") + l.Sync() + return runErr + } + l.Sync() + return nil } } @@ -84,24 +140,224 @@ func NewCommand(version string) *cobra.Command { RunE: getRunCommand(version), } cmd.Flags().StringP("config", "c", "fleet-server.yml", "Configuration for Fleet Server") + cmd.Flags().Bool(kAgentMode, false, "Running under execution of the Elastic Agent") + cmd.Flags().VarP(config.NewFlag(), "E", "E", "Overwrite configuration value") return cmd } -type FleetServer struct { +type firstCfg struct { + cfg *config.Config + err error +} + +type AgentMode struct { + cliCfg *ucfg.Config + cache cache.Cache version string - cfg *config.Config - cfgCh chan *config.Config - cache cache.Cache + reloadables []reload.Reloadable + + agent client.Client + + mux sync.Mutex + firstCfg chan firstCfg + srv *FleetServer + srvCtx context.Context + srvCanceller context.CancelFunc + startChan chan struct{} +} + +func NewAgentMode(cliCfg *ucfg.Config, reader io.Reader, c cache.Cache, version string, reloadables ...reload.Reloadable) (*AgentMode, error) { + var err error + + a := &AgentMode{ + cliCfg: cliCfg, + cache: c, + version: version, + reloadables: reloadables, + } + a.agent, err = client.NewFromReader(reader, a) + if err != nil { + return nil, err + } + return a, nil +} + +func (a *AgentMode) Run(ctx context.Context) error { + ctx, canceller := context.WithCancel(ctx) + defer canceller() + + a.firstCfg = make(chan firstCfg) + a.startChan = make(chan struct{}, 1) + log.Info().Msg("starting communication connection back to Elastic Agent") + err := a.agent.Start(ctx) + if err != nil { + return err + } + + // wait for the initial configuration to be sent from the + // Elastic Agent before starting the actual Fleet Server. + log.Info().Msg("waiting for Elastic Agent to send initial configuration") + var cfg firstCfg + select { + case <-ctx.Done(): + return fmt.Errorf("never received initial configuration") + case cfg = <-a.firstCfg: + } + + // possible that first configuration resulted in an error + if cfg.err != nil { + // unblock startChan even though there was an error + a.startChan <- struct{}{} + return cfg.err + } + + // start fleet server with the initial configuration and its + // own context (needed so when OnStop occurs the fleet server + // is stopped and not the elastic-agent-client as well) + srvCtx, srvCancel := context.WithCancel(ctx) + defer srvCancel() + log.Info().Msg("received initial configuration starting Fleet Server") + srv, err := NewFleetServer(cfg.cfg, a.cache, a.version, status.NewChained(status.NewLog(), a.agent)) + if err != nil { + // unblock startChan even though there was an error + a.startChan <- struct{}{} + return err + } + a.mux.Lock() + close(a.firstCfg) + a.firstCfg = nil + a.srv = srv + a.srvCtx = srvCtx + a.srvCanceller = srvCancel + a.mux.Unlock() + + // trigger startChan so OnConfig can continue + a.startChan <- struct{}{} + + return a.srv.Run(srvCtx) +} + +func (a *AgentMode) OnConfig(s string) { + a.mux.Lock() + cliCfg := ucfg.MustNewFrom(a.cliCfg, config.DefaultOptions...) + srv := a.srv + ctx := a.srvCtx + canceller := a.srvCanceller + cfgChan := a.firstCfg + startChan := a.startChan + a.mux.Unlock() + + var cfg *config.Config + var err error + defer func() { + if err != nil { + if cfgChan != nil { + // failure on first config + cfgChan <- firstCfg{ + cfg: nil, + err: err, + } + // block until startChan signalled + <-startChan + return + } + + log.Err(err).Msg("failed to reload configuration") + if canceller != nil { + canceller() + } + } + }() + + // load configuration and then merge it on top of the CLI configuration + var cfgData *ucfg.Config + cfgData, err = yaml.NewConfig([]byte(s), config.DefaultOptions...) + if err != nil { + return + } + err = cliCfg.Merge(cfgData, config.DefaultOptions...) + if err != nil { + return + } + cfg, err = config.FromConfig(cliCfg) + if err != nil { + return + } + + if cfgChan != nil { + // reload the generic reloadables + for _, r := range a.reloadables { + err = r.Reload(ctx, cfg) + if err != nil { + return + } + } + + // send starting configuration so Fleet Server can start + cfgChan <- firstCfg{ + cfg: cfg, + err: nil, + } + + // block handling more OnConfig calls until the Fleet Server + // has been fully started + <-startChan + } else if srv != nil { + // reload the generic reloadables + for _, r := range a.reloadables { + err = r.Reload(ctx, cfg) + if err != nil { + return + } + } + + // reload the server + err = srv.Reload(ctx, cfg) + if err != nil { + return + } + } else { + err = fmt.Errorf("internal service should have been started") + return + } +} + +func (a *AgentMode) OnStop() { + a.mux.Lock() + canceller := a.srvCanceller + a.mux.Unlock() + + if canceller != nil { + canceller() + } +} + +func (a *AgentMode) OnError(err error) { + // Log communication error through the logger. These errors are only + // provided for logging purposes. The elastic-agent-client handles + // retries and reconnects internally automatically. + log.Err(err) +} + +type FleetServer struct { + version string + policyId string + + cfg *config.Config + cfgCh chan *config.Config + cache cache.Cache + reporter status.Reporter } // NewFleetServer creates the actual fleet server service. -func NewFleetServer(cfg *config.Config, c cache.Cache, version string) (*FleetServer, error) { +func NewFleetServer(cfg *config.Config, c cache.Cache, version string, reporter status.Reporter) (*FleetServer, error) { return &FleetServer{ - version: version, - cfg: cfg, - cfgCh: make(chan *config.Config, 1), - cache: c, + version: version, + cfg: cfg, + cfgCh: make(chan *config.Config, 1), + cache: c, + reporter: reporter, }, nil } @@ -146,9 +402,17 @@ func (f *FleetServer) Run(ctx context.Context) error { proEg, srvEg *errgroup.Group ) + started := false for { ech := make(chan error, 2) + if started { + f.reporter.Status(proto.StateObserved_CONFIGURING, "Re-configuring", nil) + } else { + started = true + f.reporter.Status(proto.StateObserved_STARTING, "Starting", nil) + } + // Restart profiler if curCfg == nil || curCfg.Inputs[0].Server.Profile.Bind != newCfg.Inputs[0].Server.Profile.Bind { stop(proCancel, proEg) @@ -171,9 +435,11 @@ func (f *FleetServer) Run(ctx context.Context) error { case newCfg = <-f.cfgCh: log.Debug().Msg("Server configuration update") case err := <-ech: + f.reporter.Status(proto.StateObserved_FAILED, err.Error(), nil) log.Error().Err(err).Msg("Fleet Server failed") - return nil + return err case <-ctx.Done(): + f.reporter.Status(proto.StateObserved_STOPPING, "Stopping", nil) log.Info().Msg("Fleet Server exited") return nil } @@ -234,6 +500,10 @@ func (f *FleetServer) runServer(ctx context.Context, cfg *config.Config) (err er pm := policy.NewMonitor(bulker, pim, kPolicyThrottle) g.Go(loggedRunFunc(ctx, "Policy monitor", pm.Run)) + // Policy self monitor + sm := policy.NewSelfMonitor(cfg.Fleet, bulker, pim, cfg.Inputs[0].Policy.ID, f.reporter) + g.Go(loggedRunFunc(ctx, "Policy self monitor", sm.Run)) + // Actions monitoring var am monitor.SimpleMonitor var ad *action.Dispatcher diff --git a/cmd/fleet/server.go b/cmd/fleet/server.go index ce32fad2f..0e418adee 100644 --- a/cmd/fleet/server.go +++ b/cmd/fleet/server.go @@ -71,7 +71,7 @@ func runServer(ctx context.Context, router *httprouter.Router, cfg *config.Serve defer ln.Close() - // TODO: Use tls.Config to properly lock down tls connection + // TODO: Use tls.Config to properly mux down tls connection keyFile := cfg.TLS.Key certFile := cfg.TLS.Cert diff --git a/cmd/fleet/server_integration_test.go b/cmd/fleet/server_integration_test.go index 8dc86fbb3..61cbd3bf4 100644 --- a/cmd/fleet/server_integration_test.go +++ b/cmd/fleet/server_integration_test.go @@ -10,6 +10,7 @@ import ( "bytes" "context" "fmt" + "github.com/elastic/fleet-server/v7/internal/pkg/status" "io/ioutil" "net/http" "path" @@ -81,7 +82,7 @@ func startTestServer(ctx context.Context) (*tserver, error) { cfg.Inputs[0].Server = *srvcfg log.Info().Uint16("port", port).Msg("Test fleet server") - srv, err := NewFleetServer(cfg, c, serverVersion) + srv, err := NewFleetServer(cfg, c, serverVersion, status.NewLog()) if err != nil { return nil, err } diff --git a/dev-tools/integration/wait-for-elasticsearch.sh b/dev-tools/integration/wait-for-elasticsearch.sh index ec9d251f7..509f1cad7 100755 --- a/dev-tools/integration/wait-for-elasticsearch.sh +++ b/dev-tools/integration/wait-for-elasticsearch.sh @@ -30,9 +30,9 @@ until [ "$health" = 'green' ]; do health="$(curl -fsSL "$host/_cat/health?h=status")" echo $health health="$(echo "$health" | tr -d '[:space:]')" - >&2 echo "Elastic Search is unavailable - sleeping" + >&2 echo "Elasticsearch is unavailable - sleeping" sleep 1 done ->&2 echo "Elastic Search is up" +>&2 echo "Elasticsearch is up" exec $cmd \ No newline at end of file diff --git a/fleet-server.yml b/fleet-server.yml index bd1d94944..c0fa2c593 100644 --- a/fleet-server.yml +++ b/fleet-server.yml @@ -9,3 +9,6 @@ fleet: id: 1e4954ce-af37-4731-9f4a-407b08e69e42 logging: level: '${LOG_LEVEL:INFO}' + +logging: + to_stderr: true diff --git a/go.mod b/go.mod index d570bf3d6..dcf5ad298 100644 --- a/go.mod +++ b/go.mod @@ -6,6 +6,7 @@ require ( github.com/aleksmaus/generate v0.0.0-20201213151810-c5bc68a6a42f github.com/dgraph-io/ristretto v0.0.3 github.com/elastic/beats/v7 v7.10.0 + github.com/elastic/elastic-agent-client/v7 v7.0.0-20200709172729-d43b7ad5833a github.com/elastic/go-elasticsearch/v8 v8.0.0-20200728144331-527225d8e836 github.com/elastic/go-ucfg v0.8.3 github.com/gofrs/uuid v3.3.0+incompatible diff --git a/go.sum b/go.sum index 2275a17e8..15f04d599 100644 --- a/go.sum +++ b/go.sum @@ -143,6 +143,7 @@ github.com/eclipse/paho.mqtt.golang v1.2.1-0.20200121105743-0d940dd29fd2/go.mod github.com/elastic/beats/v7 v7.10.0 h1:MpXREz0PzwuHpJnNAHcjmRoQRfVUnJFJvYQdzRjBZKg= github.com/elastic/beats/v7 v7.10.0/go.mod h1:GV6Gy80eRYpJ4Dk4MZcQFMxXbmOnWrj9ZPK5UhwCkhU= github.com/elastic/ecs v1.6.0/go.mod h1:pgiLbQsijLOJvFR8OTILLu0Ni/R/foUNg0L+T6mU9b4= +github.com/elastic/elastic-agent-client/v7 v7.0.0-20200709172729-d43b7ad5833a h1:2NHgf1RUw+f240lpTnLrCp1aBNvq2wDi0E1A423/S1k= github.com/elastic/elastic-agent-client/v7 v7.0.0-20200709172729-d43b7ad5833a/go.mod h1:uh/Gj9a0XEbYoM4NYz4LvaBVARz3QXLmlNjsrKY9fTc= github.com/elastic/fsevents v0.0.0-20181029231046-e1d381a4d270/go.mod h1:Msl1pdboCbArMF/nSCDUXgQuWTeoMmE/z8607X+k7ng= github.com/elastic/go-concert v0.0.4/go.mod h1:9MtFarjXroUgmm0m6HY3NSe1XiKhdktiNRRj9hWvIaM= @@ -228,6 +229,7 @@ github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:x github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= @@ -427,6 +429,7 @@ github.com/rs/zerolog v1.19.0/go.mod h1:IzD0RJ65iWH0w97OQQebJEvTZYvsCUm9WVLWBQrJ github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/samuel/go-parser v0.0.0-20130731160455-ca8abbf65d0e/go.mod h1:Sb6li54lXV0yYEjI4wX8cucdQ9gqUJV3+Ngg3l9g30I= github.com/samuel/go-thrift v0.0.0-20140522043831-2187045faa54/go.mod h1:Vrkh1pnjV9Bl8c3P9zH0/D4NlOHWP5d4/hF4YTULaec= +github.com/sanathkr/go-yaml v0.0.0-20170819195128-ed9d249f429b h1:jUK33OXuZP/l6babJtnLo1qsGvq6G9so9KMflGAm4YA= github.com/sanathkr/go-yaml v0.0.0-20170819195128-ed9d249f429b/go.mod h1:8458kAagoME2+LN5//WxE71ysZ3B7r22fdgb7qVmXSY= github.com/sanathkr/yaml v0.0.0-20170819201035-0056894fa522/go.mod h1:tQTYKOQgxoH3v6dEmdHiz4JG+nbxWwM5fgPQUpSZqVQ= github.com/sanathkr/yaml v1.0.1-0.20170819201035-0056894fa522/go.mod h1:tQTYKOQgxoH3v6dEmdHiz4JG+nbxWwM5fgPQUpSZqVQ= @@ -579,6 +582,7 @@ golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -634,6 +638,7 @@ golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -696,6 +701,7 @@ google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb h1:ADPHZzpzM4tk4V4S5cnCrr5SwzvlrPRmqqCuJDB8UTs= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= @@ -704,12 +710,14 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= diff --git a/internal/pkg/config/config.go b/internal/pkg/config/config.go index a868a1da6..956457014 100644 --- a/internal/pkg/config/config.go +++ b/internal/pkg/config/config.go @@ -5,9 +5,10 @@ package config import ( - "fmt" + "errors" "github.com/elastic/go-ucfg" + "github.com/elastic/go-ucfg/flag" "github.com/elastic/go-ucfg/yaml" ) @@ -21,9 +22,10 @@ var DefaultOptions = []ucfg.Option{ // Config is the global configuration. type Config struct { - Fleet Fleet `config:"fleet"` - Output Output `config:"output"` - Inputs []Input `config:"inputs"` + Fleet Fleet `config:"fleet"` + Output Output `config:"output"` + Inputs []Input `config:"inputs"` + Logging Logging `config:"logging"` } // InitDefaults initializes the defaults for the configuration. @@ -34,25 +36,98 @@ func (c *Config) InitDefaults() { // Validate ensures that the configuration is valid. func (c *Config) Validate() error { - if c.Inputs == nil || len(c.Inputs) == 0 { - return fmt.Errorf("a fleet-server input can be defined") + if len(c.Inputs) == 0 { + return errors.New("a fleet-server input must be defined") } if len(c.Inputs) > 1 { - return fmt.Errorf("only 1 fleet-server input can be defined") + return errors.New("only 1 fleet-server input can be defined") } return nil } -// LoadFile take a path and load the file and return a new configuration. -func LoadFile(path string) (*Config, error) { +// Merge merges two configurations together. +func (c *Config) Merge(other *Config) (*Config, error) { + repr, err := ucfg.NewFrom(c, DefaultOptions...) + if err != nil { + return nil, err + } + err = repr.Merge(other, DefaultOptions...) + if err != nil { + return nil, err + } cfg := &Config{} - c, err := yaml.NewConfigWithFile(path, DefaultOptions...) + err = repr.Unpack(cfg, DefaultOptions...) if err != nil { return nil, err } - err = c.Unpack(cfg, DefaultOptions...) + return cfg, nil +} + +// FromConfig returns Config from the ucfg.Config. +func FromConfig(c *ucfg.Config) (*Config, error) { + cfg := &Config{} + err := c.Unpack(cfg, DefaultOptions...) if err != nil { return nil, err } return cfg, nil } + +// LoadFile take a path and load the file and return a new configuration. +func LoadFile(path string) (*Config, error) { + c, err := yaml.NewConfigWithFile(path, DefaultOptions...) + if err != nil { + return nil, err + } + return FromConfig(c) +} + +// Flag captures key/values pairs into an ucfg.Config object. +type Flag flag.FlagValue + +// NewFlag creates an instance that allows the `-E` flag to overwrite +// the configuration from the command-line. +func NewFlag() *Flag { + opts := append( + []ucfg.Option{ + ucfg.MetaData(ucfg.Meta{Source: "command line flag"}), + }, + DefaultOptions..., + ) + + tmp := flag.NewFlagKeyValue(ucfg.New(), true, opts...) + return (*Flag)(tmp) +} + +func (f *Flag) access() *flag.FlagValue { + return (*flag.FlagValue)(f) +} + +// Config returns the config object the Flag stores applied settings to. +func (f *Flag) Config() *ucfg.Config { + return f.access().Config() +} + +// Set sets a settings value in the Config object. The input string must be a +// key-value pair like `key=value`. If the value is missing, the value is set +// to the boolean value `true`. +func (f *Flag) Set(s string) error { + return f.access().Set(s) +} + +// Get returns the Config object used to store values. +func (f *Flag) Get() interface{} { + return f.Config() +} + +// String always returns an empty string. It is required to fulfil +// the flag.Value interface. +func (f *Flag) String() string { + return "" +} + +// Type reports the type of contents (setting=value) expected to be parsed by Set. +// It is used to build the CLI usage string. +func (f *Flag) Type() string { + return "setting=value" +} diff --git a/internal/pkg/config/config_test.go b/internal/pkg/config/config_test.go index ad0539a10..6e9ed874d 100644 --- a/internal/pkg/config/config_test.go +++ b/internal/pkg/config/config_test.go @@ -26,10 +26,8 @@ func TestConfig(t *testing.T) { cfg: &Config{ Fleet: Fleet{ Agent: Agent{ - ID: "1e4954ce-af37-4731-9f4a-407b08e69e42", - Logging: AgentLogging{ - Level: "info", - }, + ID: "1e4954ce-af37-4731-9f4a-407b08e69e42", + Logging: AgentLogging{}, }, }, Output: Output{ @@ -62,6 +60,12 @@ func TestConfig(t *testing.T) { }, }, }, + Logging: Logging{ + Level: "info", + ToStderr: false, + ToFiles: true, + Files: nil, + }, }, }, "fleet-logging": { @@ -104,16 +108,20 @@ func TestConfig(t *testing.T) { }, }, }, + Logging: Logging{ + Level: "info", + ToStderr: false, + ToFiles: true, + Files: nil, + }, }, }, "input": { cfg: &Config{ Fleet: Fleet{ Agent: Agent{ - ID: "1e4954ce-af37-4731-9f4a-407b08e69e42", - Logging: AgentLogging{ - Level: "info", - }, + ID: "1e4954ce-af37-4731-9f4a-407b08e69e42", + Logging: AgentLogging{}, }, }, Output: Output{ @@ -146,16 +154,20 @@ func TestConfig(t *testing.T) { }, }, }, + Logging: Logging{ + Level: "info", + ToStderr: false, + ToFiles: true, + Files: nil, + }, }, }, "input-config": { cfg: &Config{ Fleet: Fleet{ Agent: Agent{ - ID: "1e4954ce-af37-4731-9f4a-407b08e69e42", - Logging: AgentLogging{ - Level: "info", - }, + ID: "1e4954ce-af37-4731-9f4a-407b08e69e42", + Logging: AgentLogging{}, }, }, Output: Output{ @@ -188,6 +200,12 @@ func TestConfig(t *testing.T) { }, }, }, + Logging: Logging{ + Level: "info", + ToStderr: false, + ToFiles: true, + Files: nil, + }, }, }, "bad-input": { @@ -202,12 +220,6 @@ func TestConfig(t *testing.T) { "bad-output": { err: "can only contain elasticsearch key", }, - "bad-no-output": { - err: "cannot connect to elasticsearch without username/password", - }, - "bad-no-agent-id": { - err: "string value is not set", - }, } for name, test := range testcases { diff --git a/internal/pkg/config/fleet.go b/internal/pkg/config/fleet.go index a3fd7e502..7f17afbe3 100644 --- a/internal/pkg/config/fleet.go +++ b/internal/pkg/config/fleet.go @@ -15,13 +15,13 @@ type AgentLogging struct { Level string `config:"level"` } -// InitDefaults initializes the defaults for the configuration. -func (c *AgentLogging) InitDefaults() { - c.Level = "info" -} - // Validate ensures that the configuration is valid. func (c *AgentLogging) Validate() error { + if c.Level == "" { + // allowed to be empty because `agent.logging.level` is only + // an override of the logging level from `logging.level` + return nil + } if _, err := strToLevel(c.Level); err != nil { return err } @@ -36,7 +36,7 @@ func (c *AgentLogging) LogLevel() zerolog.Level { // Agent is the ID and logging configuration of the Agent running this Fleet Server. type Agent struct { - ID string `config:"id" validate:"required"` + ID string `config:"id"` Version string `config:"version"` Logging AgentLogging `config:"logging"` } diff --git a/internal/pkg/config/input.go b/internal/pkg/config/input.go index 609e97df8..d1c84f641 100644 --- a/internal/pkg/config/input.go +++ b/internal/pkg/config/input.go @@ -10,6 +10,11 @@ import ( "time" ) +// Policy is the configuration policy to use. +type Policy struct { + ID string `config:"id"` +} + // ServerTimeouts is the configuration for the server timeouts type ServerTimeouts struct { Read time.Duration `config:"read"` @@ -75,6 +80,7 @@ func (c *Server) BindAddress() string { // Input is the input defined by Agent to run Fleet Server. type Input struct { Type string `config:"type"` + Policy Policy `config:"policy"` Server Server `config:"server"` } diff --git a/internal/pkg/config/logging.go b/internal/pkg/config/logging.go new file mode 100644 index 000000000..12ac2b03a --- /dev/null +++ b/internal/pkg/config/logging.go @@ -0,0 +1,68 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package config + +import ( + "github.com/rs/zerolog" + "os" + "time" +) + +// LoggingFiles configuration for the logging file output. +type LoggingFiles struct { + Path string `config:"path"` + Name string `config:"name"` + MaxSize uint `config:"rotateeverybytes" validate:"min=1"` + MaxBackups uint `config:"keepfiles" validate:"max=1024"` + Permissions uint32 `config:"permissions"` + Interval time.Duration `config:"interval"` + RotateOnStartup bool `config:"rotateonstartup"` + RedirectStderr bool `config:"redirect_stderr"` +} + +// InitDefaults initializes the defaults for the configuration. +func (c *LoggingFiles) InitDefaults() { + cwd, err := os.Getwd() + if err != nil { + // something really wrong here + panic(err) + } + + c.Path = cwd + c.Name = "fleet-server.log" + c.MaxSize = 10 * 1024 * 1024 + c.MaxBackups = 7 + c.Permissions = 0600 + c.Interval = 0 + c.RotateOnStartup = true +} + +// Logging configuration. +type Logging struct { + Level string `config:"level"` + ToStderr bool `config:"to_stderr"` + ToFiles bool `config:"to_files"` + Files *LoggingFiles `config:"files"` +} + +// InitDefaults initializes the defaults for the configuration. +func (c *Logging) InitDefaults() { + c.Level = "info" + c.ToFiles = true +} + +// Validate ensures that the configuration is valid. +func (c *Logging) Validate() error { + if _, err := strToLevel(c.Level); err != nil { + return err + } + return nil +} + +// LogLevel returns configured zerolog.Level +func (c *Logging) LogLevel() zerolog.Level { + l, _ := strToLevel(c.Level) + return l +} diff --git a/internal/pkg/config/output.go b/internal/pkg/config/output.go index c17c19336..759a912ab 100644 --- a/internal/pkg/config/output.go +++ b/internal/pkg/config/output.go @@ -55,9 +55,6 @@ func (c *Elasticsearch) Validate() error { if c.APIKey != "" { return fmt.Errorf("cannot connect to elasticsearch with api_key; must use username/password") } - if c.Username == "" || c.Password == "" { - return fmt.Errorf("cannot connect to elasticsearch without username/password") - } if c.ProxyURL != "" && !c.ProxyDisable { if _, err := common.ParseURL(c.ProxyURL); err != nil { return err diff --git a/internal/pkg/coordinator/monitor.go b/internal/pkg/coordinator/monitor.go index 6544610e0..239e6ef8d 100644 --- a/internal/pkg/coordinator/monitor.go +++ b/internal/pkg/coordinator/monitor.go @@ -89,13 +89,17 @@ func NewMonitor(fleet config.Fleet, version string, bulker bulk.Bulk, monitor mo // Run runs the monitor. func (m *monitorT) Run(ctx context.Context) (err error) { - m.log.Info().Msg("start") - defer func() { - m.log.Info().Err(err).Msg("exited") - }() + // When ID of the Agent is not provided to Fleet Server then the Agent + // has not enrolled. The Fleet Server cannot become a leader until the + // Agent it is running under has been enrolled. + m.calcMetadata() + if m.agentMetadata.Id == "" { + m.log.Warn().Msg("missing config fleet.agent.id; acceptable until Elastic Agent has enrolled") + <-ctx.Done() + return ctx.Err() + } // Ensure leadership on startup - m.calcMetadata() err = m.ensureLeadership(ctx) if err != nil { return err diff --git a/internal/pkg/logger/logger.go b/internal/pkg/logger/logger.go index a84f79716..3e30280c5 100644 --- a/internal/pkg/logger/logger.go +++ b/internal/pkg/logger/logger.go @@ -6,81 +6,146 @@ package logger import ( "context" - "github.com/elastic/fleet-server/v7/internal/pkg/reload" + "io/ioutil" "os" - "strings" + "path/filepath" "sync" "time" + "github.com/elastic/beats/v7/libbeat/common/file" "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/elastic/fleet-server/v7/internal/pkg/config" ) -const ( - kPrettyTimeFormat = "15:04:05.000000" -) - var once sync.Once -var gLogger *logger - -func strToLevel(s string) zerolog.Level { - l := zerolog.DebugLevel - - s = strings.ToLower(s) - switch strings.TrimSpace(s) { - case "trace": - l = zerolog.TraceLevel - case "debug": - l = zerolog.DebugLevel - case "info": - l = zerolog.InfoLevel - case "warn": - l = zerolog.WarnLevel - case "error": - l = zerolog.ErrorLevel - case "fatal": - l = zerolog.FatalLevel - case "panic": - l = zerolog.PanicLevel - } +var gLogger *Logger - return l +// WriterSync implements a Sync function. +type WriterSync interface { + // Sync syncs the logger to its output. + Sync() error } -type logger struct { - cfg *config.Config +// Logger for the Fleet Server. +type Logger struct { + cfg *config.Config + sync WriterSync } // Reload reloads the logger configuration. -func (l *logger) Reload(_ context.Context, cfg *config.Config) error { - if l.cfg.Fleet.Agent.Logging != cfg.Fleet.Agent.Logging { - // reload the logger to new config level - log.Logger = log.Output(os.Stdout).Level(cfg.Fleet.Agent.Logging.LogLevel()) +func (l *Logger) Reload(_ context.Context, cfg *config.Config) error { + if changed(l.cfg, cfg) { + // sync before reload + l.Sync() + + // reload the logger + logger, w, err := configure(cfg) + if err != nil { + return err + } + log.Logger = logger + l.sync = w } l.cfg = cfg return nil } +// Sync syncs the logger to its output. +func (l *Logger) Sync() { + if l.sync != nil { + l.sync.Sync() + } +} + // Init initializes the logger. -func Init(cfg *config.Config) reload.Reloadable { +func Init(cfg *config.Config) (*Logger, error) { + var err error once.Do(func() { - gLogger = &logger{ - cfg: cfg, + var l zerolog.Logger + var w WriterSync + l, w, err = configure(cfg) + if err != nil { + return + } + + log.Logger = l + gLogger = &Logger{ + cfg: cfg, + sync: w, } zerolog.TimeFieldFormat = time.StampMicro - log.Logger = log.Output(os.Stdout).Level(cfg.Fleet.Agent.Logging.LogLevel()) log.Info(). Int("pid", os.Getpid()). Int("ppid", os.Getppid()). Str("exe", os.Args[0]). Strs("args", os.Args[1:]). Msg("boot") - log.Debug().Strs("env", os.Environ()).Msg("environment") }) - return gLogger + return gLogger, err +} + +func changed(a *config.Config, b *config.Config) bool { + if a.Fleet.Agent.Logging != b.Fleet.Agent.Logging { + return true + } + al := a.Logging + aFiles := al.Files + al.Files = nil + bl := b.Logging + bFiles := bl.Files + bl.Files = nil + if al != bl { + return true + } + if (aFiles == nil && bFiles != nil) || (aFiles != nil && bFiles == nil) || (*aFiles != *bFiles) { + return true + } + return false +} + +func level(cfg *config.Config) zerolog.Level { + if cfg.Fleet.Agent.Logging.Level != "" { + return cfg.Fleet.Agent.Logging.LogLevel() + } + return cfg.Logging.LogLevel() +} + +func configure(cfg *config.Config) (zerolog.Logger, WriterSync, error) { + if cfg.Logging.ToStderr { + return log.Output(os.Stderr).Level(level(cfg)), os.Stderr, nil + } + if cfg.Logging.ToFiles { + files := cfg.Logging.Files + if files == nil { + files = &config.LoggingFiles{} + files.InitDefaults() + } + filename := filepath.Join(files.Path, files.Name) + rotator, err := file.NewFileRotator(filename, + file.MaxSizeBytes(files.MaxSize), + file.MaxBackups(files.MaxBackups), + file.Permissions(os.FileMode(files.Permissions)), + file.Interval(files.Interval), + file.RotateOnStartup(files.RotateOnStartup), + file.RedirectStderr(files.RedirectStderr), + ) + if err != nil { + return zerolog.Logger{}, nil, err + } + return log.Output(rotator).Level(level(cfg)), rotator, nil + } + return log.Output(ioutil.Discard).Level(level(cfg)), &nopSync{}, nil +} + +type nopSync struct { +} + +// Sync does nothing. +func (*nopSync) Sync() error { + return nil } diff --git a/internal/pkg/policy/monitor.go b/internal/pkg/policy/monitor.go index 2c09836a0..52724a231 100644 --- a/internal/pkg/policy/monitor.go +++ b/internal/pkg/policy/monitor.go @@ -80,7 +80,7 @@ func (s *subT) Output() <-chan model.Policy { // NewMonitor creates the policy monitor for subscribing agents. func NewMonitor(bulker bulk.Bulk, monitor monitor.Monitor, throttle time.Duration) Monitor { return &monitorT{ - log: log.With().Str("ctx", "policy agent manager").Logger(), + log: log.With().Str("ctx", "policy agent monitor").Logger(), bulker: bulker, monitor: monitor, kickCh: make(chan struct{}, 1), diff --git a/internal/pkg/policy/self.go b/internal/pkg/policy/self.go new file mode 100644 index 000000000..64aa8c7a6 --- /dev/null +++ b/internal/pkg/policy/self.go @@ -0,0 +1,202 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package policy + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "github.com/elastic/fleet-server/v7/internal/pkg/config" + "net/http" + "sync" + + "github.com/elastic/elastic-agent-client/v7/pkg/proto" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + + "github.com/elastic/fleet-server/v7/internal/pkg/bulk" + "github.com/elastic/fleet-server/v7/internal/pkg/dl" + "github.com/elastic/fleet-server/v7/internal/pkg/es" + "github.com/elastic/fleet-server/v7/internal/pkg/model" + "github.com/elastic/fleet-server/v7/internal/pkg/monitor" + "github.com/elastic/fleet-server/v7/internal/pkg/status" +) + +type SelfMonitor interface { + // Run runs the monitor. + Run(ctx context.Context) error +} + +type selfMonitorT struct { + log zerolog.Logger + + mut sync.Mutex + fleet config.Fleet + bulker bulk.Bulk + monitor monitor.Monitor + + policyId string + reporter status.Reporter + + policy *model.Policy + + policyF policyFetcher + policiesIndex string +} + +// NewSelfMonitor creates the self policy monitor. +// +// Ensures that the policy that this Fleet Server attached to exists and that it +// has a Fleet Server input defined. +func NewSelfMonitor(fleet config.Fleet, bulker bulk.Bulk, monitor monitor.Monitor, policyId string, reporter status.Reporter) SelfMonitor { + return &selfMonitorT{ + log: log.With().Str("ctx", "policy self monitor").Logger(), + fleet: fleet, + bulker: bulker, + monitor: monitor, + policyId: policyId, + reporter: reporter, + policyF: dl.QueryLatestPolicies, + policiesIndex: dl.FleetPolicies, + } +} + +// Run runs the monitor. +func (m *selfMonitorT) Run(ctx context.Context) error { + s := m.monitor.Subscribe() + defer m.monitor.Unsubscribe(s) + + err := m.process(ctx) + if err != nil { + return err + } + +LOOP: + for { + select { + case <-ctx.Done(): + break LOOP + case hits := <-s.Output(): + policies := make([]model.Policy, len(hits)) + for i, hit := range hits { + err := hit.Unmarshal(&policies[i]) + if err != nil { + return err + } + } + if err := m.processPolicies(ctx, policies); err != nil { + return err + } + } + } + + return nil +} + +func (m *selfMonitorT) process(ctx context.Context) error { + policies, err := m.policyF(ctx, m.bulker, dl.WithIndexName(m.policiesIndex)) + if err != nil { + elasticErr, ok := err.(*es.ErrElastic) + if !ok { + return err + } + if elasticErr.Status != http.StatusNotFound { + return err + } + } + if len(policies) == 0 { + m.updateStatus() + return nil + } + return m.processPolicies(ctx, policies) +} + +func (m *selfMonitorT) processPolicies(ctx context.Context, policies []model.Policy) error { + if len(policies) == 0 { + // nothing to do + return nil + } + latest := m.groupByLatest(policies) + for _, policy := range latest { + if m.policyId != "" && policy.PolicyId == m.policyId { + m.policy = &policy + break + } else if m.policyId == "" && policy.DefaultFleetServer { + m.policy = &policy + break + } + } + return m.updateStatus() +} + +func (m *selfMonitorT) groupByLatest(policies []model.Policy) map[string]model.Policy { + latest := make(map[string]model.Policy) + for _, policy := range policies { + curr, ok := latest[policy.PolicyId] + if !ok { + latest[policy.PolicyId] = policy + continue + } + if policy.RevisionIdx > curr.RevisionIdx { + latest[policy.PolicyId] = policy + continue + } else if policy.RevisionIdx == curr.RevisionIdx && policy.CoordinatorIdx > curr.CoordinatorIdx { + latest[policy.PolicyId] = policy + } + } + return latest +} + +func (m *selfMonitorT) updateStatus() error { + if m.policy == nil { + // no policy found + if m.policyId == "" { + m.reporter.Status(proto.StateObserved_STARTING, "Waiting on default policy with Fleet Server integration", nil) + } else { + m.reporter.Status(proto.StateObserved_STARTING, fmt.Sprintf("Waiting on policy with Fleet Server integration: %s", m.policyId), nil) + } + return nil + } + + var data policyData + err := json.Unmarshal(m.policy.Data, &data) + if err != nil { + return err + } + if !data.HasType("fleet-server") { + return errors.New("assigned policy does not have fleet-server input") + } + + status := proto.StateObserved_HEALTHY + extendMsg := "" + if m.fleet.Agent.ID == "" { + status = proto.StateObserved_DEGRADED + extendMsg = "; missing config fleet.agent.id" + } + if m.policyId == "" { + m.reporter.Status(status, fmt.Sprintf("Running on default policy with Fleet Server integration%s", extendMsg), nil) + } else { + m.reporter.Status(status, fmt.Sprintf("Running on policy with Fleet Server integration: %s%s", m.policyId, extendMsg), nil) + } + return nil +} + +type policyData struct { + Inputs []policyInput `json:"inputs"` +} + +type policyInput struct { + Type string `json:"type"` +} + +func (d *policyData) HasType(val string) bool { + for _, input := range d.Inputs { + if input.Type == val { + return true + } + } + return false +} diff --git a/internal/pkg/policy/self_test.go b/internal/pkg/policy/self_test.go new file mode 100644 index 000000000..33df692dc --- /dev/null +++ b/internal/pkg/policy/self_test.go @@ -0,0 +1,426 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// +build !integration + +package policy + +import ( + "context" + "encoding/json" + "fmt" + "github.com/elastic/fleet-server/v7/internal/pkg/config" + "sync" + "testing" + "time" + + "github.com/elastic/elastic-agent-client/v7/pkg/proto" + "github.com/gofrs/uuid" + "github.com/rs/xid" + + "github.com/elastic/fleet-server/v7/internal/pkg/bulk" + "github.com/elastic/fleet-server/v7/internal/pkg/dl" + "github.com/elastic/fleet-server/v7/internal/pkg/es" + "github.com/elastic/fleet-server/v7/internal/pkg/model" + "github.com/elastic/fleet-server/v7/internal/pkg/monitor/mock" + ftesting "github.com/elastic/fleet-server/v7/internal/pkg/testing" +) + +func TestSelfMonitor_DefaultPolicy(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg := config.Fleet{ + Agent: config.Agent{ + ID: "agent-id", + }, + } + reporter := &FakeReporter{} + bulker := ftesting.MockBulk{} + mm := mock.NewMockIndexMonitor() + monitor := NewSelfMonitor(cfg, bulker, mm, "", reporter) + sm := monitor.(*selfMonitorT) + sm.policyF = func(ctx context.Context, bulker bulk.Bulk, opt ...dl.Option) ([]model.Policy, error) { + return []model.Policy{}, nil + } + + var merr error + var mwg sync.WaitGroup + mwg.Add(1) + go func() { + defer mwg.Done() + merr = monitor.Run(ctx) + }() + + // should be set to starting + ftesting.Retry(t, ctx, func(ctx context.Context) error { + status, msg, _ := reporter.Current() + if status != proto.StateObserved_STARTING { + return fmt.Errorf("should be reported as starting") + } + if msg != "Waiting on default policy with Fleet Server integration" { + return fmt.Errorf("should be matching with default policy") + } + return nil + }, ftesting.RetrySleep(1*time.Second)) + + policyId := uuid.Must(uuid.NewV4()).String() + rId := xid.New().String() + policyContents, err := json.Marshal(&policyData{Inputs: []policyInput{ + { + Type: "fleet-server", + }, + }}) + if err != nil { + t.Fatal(err) + } + policy := model.Policy{ + ESDocument: model.ESDocument{ + Id: rId, + Version: 1, + SeqNo: 1, + }, + PolicyId: policyId, + CoordinatorIdx: 1, + Data: policyContents, + RevisionIdx: 1, + DefaultFleetServer: true, + } + policyData, err := json.Marshal(&policy) + if err != nil { + t.Fatal(err) + } + go func() { + mm.Notify(ctx, []es.HitT{ + { + Id: rId, + SeqNo: 1, + Version: 1, + Source: policyData, + }, + }) + }() + + // should now be set to healthy + ftesting.Retry(t, ctx, func(ctx context.Context) error { + status, msg, _ := reporter.Current() + if status != proto.StateObserved_HEALTHY { + return fmt.Errorf("should be reported as healthy") + } + if msg != "Running on default policy with Fleet Server integration" { + return fmt.Errorf("should be matching with default policy") + } + return nil + }) + + cancel() + mwg.Wait() + if merr != nil && merr != context.Canceled { + t.Fatal(merr) + } +} + +func TestSelfMonitor_DefaultPolicy_Degraded(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg := config.Fleet{ + Agent: config.Agent{ + ID: "", + }, + } + reporter := &FakeReporter{} + bulker := ftesting.MockBulk{} + mm := mock.NewMockIndexMonitor() + monitor := NewSelfMonitor(cfg, bulker, mm, "", reporter) + sm := monitor.(*selfMonitorT) + sm.policyF = func(ctx context.Context, bulker bulk.Bulk, opt ...dl.Option) ([]model.Policy, error) { + return []model.Policy{}, nil + } + + var merr error + var mwg sync.WaitGroup + mwg.Add(1) + go func() { + defer mwg.Done() + merr = monitor.Run(ctx) + }() + + // should be set to starting + ftesting.Retry(t, ctx, func(ctx context.Context) error { + status, msg, _ := reporter.Current() + if status != proto.StateObserved_STARTING { + return fmt.Errorf("should be reported as starting") + } + if msg != "Waiting on default policy with Fleet Server integration" { + return fmt.Errorf("should be matching with default policy") + } + return nil + }, ftesting.RetrySleep(1*time.Second)) + + policyId := uuid.Must(uuid.NewV4()).String() + rId := xid.New().String() + policyContents, err := json.Marshal(&policyData{Inputs: []policyInput{ + { + Type: "fleet-server", + }, + }}) + if err != nil { + t.Fatal(err) + } + policy := model.Policy{ + ESDocument: model.ESDocument{ + Id: rId, + Version: 1, + SeqNo: 1, + }, + PolicyId: policyId, + CoordinatorIdx: 1, + Data: policyContents, + RevisionIdx: 1, + DefaultFleetServer: true, + } + policyData, err := json.Marshal(&policy) + if err != nil { + t.Fatal(err) + } + go func() { + mm.Notify(ctx, []es.HitT{ + { + Id: rId, + SeqNo: 1, + Version: 1, + Source: policyData, + }, + }) + }() + + // should now be set to healthy + ftesting.Retry(t, ctx, func(ctx context.Context) error { + status, msg, _ := reporter.Current() + if status != proto.StateObserved_DEGRADED { + return fmt.Errorf("should be reported as healthy") + } + if msg != "Running on default policy with Fleet Server integration; missing config fleet.agent.id" { + return fmt.Errorf("should be matching with default policy") + } + return nil + }) + + cancel() + mwg.Wait() + if merr != nil && merr != context.Canceled { + t.Fatal(merr) + } +} + +func TestSelfMonitor_SpecificPolicy(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg := config.Fleet{ + Agent: config.Agent{ + ID: "agent-id", + }, + } + policyId := uuid.Must(uuid.NewV4()).String() + reporter := &FakeReporter{} + bulker := ftesting.MockBulk{} + mm := mock.NewMockIndexMonitor() + monitor := NewSelfMonitor(cfg, bulker, mm, policyId, reporter) + sm := monitor.(*selfMonitorT) + sm.policyF = func(ctx context.Context, bulker bulk.Bulk, opt ...dl.Option) ([]model.Policy, error) { + return []model.Policy{}, nil + } + + var merr error + var mwg sync.WaitGroup + mwg.Add(1) + go func() { + defer mwg.Done() + merr = monitor.Run(ctx) + }() + + // should be set to starting + ftesting.Retry(t, ctx, func(ctx context.Context) error { + status, msg, _ := reporter.Current() + if status != proto.StateObserved_STARTING { + return fmt.Errorf("should be reported as starting") + } + if msg != fmt.Sprintf("Waiting on policy with Fleet Server integration: %s", policyId) { + return fmt.Errorf("should be matching with specific policy") + } + return nil + }, ftesting.RetrySleep(1*time.Second)) + + rId := xid.New().String() + policyContents, err := json.Marshal(&policyData{Inputs: []policyInput{ + { + Type: "fleet-server", + }, + }}) + if err != nil { + t.Fatal(err) + } + policy := model.Policy{ + ESDocument: model.ESDocument{ + Id: rId, + Version: 1, + SeqNo: 1, + }, + PolicyId: policyId, + CoordinatorIdx: 1, + Data: policyContents, + RevisionIdx: 1, + DefaultFleetServer: true, + } + policyData, err := json.Marshal(&policy) + if err != nil { + t.Fatal(err) + } + go func() { + mm.Notify(ctx, []es.HitT{ + { + Id: rId, + SeqNo: 1, + Version: 1, + Source: policyData, + }, + }) + }() + + // should now be set to healthy + ftesting.Retry(t, ctx, func(ctx context.Context) error { + status, msg, _ := reporter.Current() + if status != proto.StateObserved_HEALTHY { + return fmt.Errorf("should be reported as healthy") + } + if msg != fmt.Sprintf("Running on policy with Fleet Server integration: %s", policyId) { + return fmt.Errorf("should be matching with specific policy") + } + return nil + }) + + cancel() + mwg.Wait() + if merr != nil && merr != context.Canceled { + t.Fatal(merr) + } +} + +func TestSelfMonitor_SpecificPolicy_Degraded(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + cfg := config.Fleet{ + Agent: config.Agent{ + ID: "", + }, + } + policyId := uuid.Must(uuid.NewV4()).String() + reporter := &FakeReporter{} + bulker := ftesting.MockBulk{} + mm := mock.NewMockIndexMonitor() + monitor := NewSelfMonitor(cfg, bulker, mm, policyId, reporter) + sm := monitor.(*selfMonitorT) + sm.policyF = func(ctx context.Context, bulker bulk.Bulk, opt ...dl.Option) ([]model.Policy, error) { + return []model.Policy{}, nil + } + + var merr error + var mwg sync.WaitGroup + mwg.Add(1) + go func() { + defer mwg.Done() + merr = monitor.Run(ctx) + }() + + // should be set to starting + ftesting.Retry(t, ctx, func(ctx context.Context) error { + status, msg, _ := reporter.Current() + if status != proto.StateObserved_STARTING { + return fmt.Errorf("should be reported as starting") + } + if msg != fmt.Sprintf("Waiting on policy with Fleet Server integration: %s", policyId) { + return fmt.Errorf("should be matching with specific policy") + } + return nil + }, ftesting.RetrySleep(1*time.Second)) + + rId := xid.New().String() + policyContents, err := json.Marshal(&policyData{Inputs: []policyInput{ + { + Type: "fleet-server", + }, + }}) + if err != nil { + t.Fatal(err) + } + policy := model.Policy{ + ESDocument: model.ESDocument{ + Id: rId, + Version: 1, + SeqNo: 1, + }, + PolicyId: policyId, + CoordinatorIdx: 1, + Data: policyContents, + RevisionIdx: 1, + DefaultFleetServer: true, + } + policyData, err := json.Marshal(&policy) + if err != nil { + t.Fatal(err) + } + go func() { + mm.Notify(ctx, []es.HitT{ + { + Id: rId, + SeqNo: 1, + Version: 1, + Source: policyData, + }, + }) + }() + + // should now be set to healthy + ftesting.Retry(t, ctx, func(ctx context.Context) error { + status, msg, _ := reporter.Current() + if status != proto.StateObserved_DEGRADED { + return fmt.Errorf("should be reported as healthy") + } + if msg != fmt.Sprintf("Running on policy with Fleet Server integration: %s; missing config fleet.agent.id", policyId) { + return fmt.Errorf("should be matching with specific policy") + } + return nil + }) + + cancel() + mwg.Wait() + if merr != nil && merr != context.Canceled { + t.Fatal(merr) + } +} + +type FakeReporter struct { + lock sync.Mutex + status proto.StateObserved_Status + msg string + payload map[string]interface{} +} + +func (r *FakeReporter) Status(status proto.StateObserved_Status, message string, payload map[string]interface{}) error { + r.lock.Lock() + defer r.lock.Unlock() + r.status = status + r.msg = message + r.payload = payload + return nil +} + +func (r *FakeReporter) Current() (proto.StateObserved_Status, string, map[string]interface{}) { + r.lock.Lock() + defer r.lock.Unlock() + return r.status, r.msg, r.payload +} diff --git a/internal/pkg/status/reporter.go b/internal/pkg/status/reporter.go new file mode 100644 index 000000000..eaa3bb405 --- /dev/null +++ b/internal/pkg/status/reporter.go @@ -0,0 +1,53 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package status + +import ( + "github.com/rs/zerolog/log" + + "github.com/elastic/elastic-agent-client/v7/pkg/proto" +) + +// Reporter is interface that reports updated status on. +type Reporter interface { + // Status triggers updating the status. + Status(status proto.StateObserved_Status, message string, payload map[string]interface{}) error +} + +// Log logs the reported status. +type Log struct{} + +// NewLog creates a LogStatus. +func NewLog() *Log { + return &Log{} +} + +// Status triggers updating the status. +func (l *Log) Status(status proto.StateObserved_Status, message string, payload map[string]interface{}) error { + log.Info().Str("status", status.String()).Fields(map[string]interface{}{ + "payload": payload, + }).Msg(message) + return nil +} + +// Chained calls Status on all the provided reporters in the provided order. +type Chained struct { + reporters []Reporter +} + +// NewChained creates a Chained with provided reporters. +func NewChained(reporters ...Reporter) *Chained { + return &Chained{reporters} +} + +// Status triggers updating the status. +func (l *Chained) Status(status proto.StateObserved_Status, message string, payload map[string]interface{}) error { + for _, reporter := range l.reporters { + if err := reporter.Status(status, message, payload); err != nil { + return err + } + } + return nil +} diff --git a/internal/pkg/testing/retry.go b/internal/pkg/testing/retry.go index 4814aa914..c02b29128 100644 --- a/internal/pkg/testing/retry.go +++ b/internal/pkg/testing/retry.go @@ -52,9 +52,7 @@ func Retry(t *testing.T, ctx context.Context, f RetryFunc, opts ...RetryOption) if err == nil { return } - if err = sleep.WithContext(ctx, o.sleep); err != nil { - break - } + sleep.WithContext(ctx, o.sleep) } t.Fatal(err) } From 5775b0535b004b1d7526eaf962aede6fd1d314b3 Mon Sep 17 00:00:00 2001 From: Aleksandr Maus Date: Wed, 20 Jan 2021 10:42:14 -0500 Subject: [PATCH 004/240] Make fleet server components resilent if the indices do not exist --- internal/pkg/coordinator/monitor.go | 13 +++++++++++-- internal/pkg/esboot/bootstrap.go | 17 +++++++++-------- internal/pkg/migrate/migrate.go | 14 ++++++++++++-- internal/pkg/monitor/monitor.go | 10 +++++++++- internal/pkg/policy/monitor.go | 6 ++++++ 5 files changed, 47 insertions(+), 13 deletions(-) diff --git a/internal/pkg/coordinator/monitor.go b/internal/pkg/coordinator/monitor.go index 239e6ef8d..e88c82c07 100644 --- a/internal/pkg/coordinator/monitor.go +++ b/internal/pkg/coordinator/monitor.go @@ -6,6 +6,7 @@ package coordinator import ( "context" + "errors" "net" "os" "runtime" @@ -188,7 +189,11 @@ func (m *monitorT) ensureLeadership(ctx context.Context) error { leaders := map[string]model.PolicyLeader{} policies, err := dl.QueryLatestPolicies(ctx, m.bulker, dl.WithIndexName(m.policiesIndex)) if err != nil { - return err + if errors.Is(err, es.ErrIndexNotFound) { + err = nil + } else { + return err + } } if len(policies) > 0 { ids := make([]string, len(policies)) @@ -197,7 +202,11 @@ func (m *monitorT) ensureLeadership(ctx context.Context) error { } leaders, err = dl.SearchPolicyLeaders(ctx, m.bulker, ids, dl.WithIndexName(m.leadersIndex)) if err != nil { - return err + if errors.Is(err, es.ErrIndexNotFound) { + err = nil + } else { + return err + } } } diff --git a/internal/pkg/esboot/bootstrap.go b/internal/pkg/esboot/bootstrap.go index 614d500d6..c000f965d 100644 --- a/internal/pkg/esboot/bootstrap.go +++ b/internal/pkg/esboot/bootstrap.go @@ -6,7 +6,6 @@ package esboot import ( "context" - "github.com/elastic/fleet-server/v7/internal/pkg/es" "github.com/elastic/go-elasticsearch/v8" ) @@ -20,13 +19,15 @@ type indexConfig struct { } var indexConfigs = map[string]indexConfig{ - ".fleet-actions": {mapping: es.MappingAction}, - ".fleet-actions-results": {mapping: es.MappingActionResult, datastream: true}, - ".fleet-agents": {mapping: es.MappingAgent}, - ".fleet-enrollment-api-keys": {mapping: es.MappingEnrollmentApiKey}, - ".fleet-policies": {mapping: es.MappingPolicy}, - ".fleet-policies-leader": {mapping: es.MappingPolicyLeader}, - ".fleet-servers": {mapping: es.MappingServer}, + // Commenting out the boostrapping for now here, just in case if it needs to be "enabled" again. + // Will remove all the boostrapping code completely later once all is fully integrated + // ".fleet-actions": {mapping: es.MappingAction}, + // ".fleet-actions-results": {mapping: es.MappingActionResult, datastream: true}, + // ".fleet-agents": {mapping: es.MappingAgent}, + // ".fleet-enrollment-api-keys": {mapping: es.MappingEnrollmentApiKey}, + // ".fleet-policies": {mapping: es.MappingPolicy}, + // ".fleet-policies-leader": {mapping: es.MappingPolicyLeader}, + // ".fleet-servers": {mapping: es.MappingServer}, } // Bootstrap creates .fleet-actions data stream diff --git a/internal/pkg/migrate/migrate.go b/internal/pkg/migrate/migrate.go index dbcc8cb83..4eed7d7ed 100644 --- a/internal/pkg/migrate/migrate.go +++ b/internal/pkg/migrate/migrate.go @@ -7,8 +7,11 @@ package migrate import ( "context" "encoding/json" + "errors" + "github.com/elastic/fleet-server/v7/internal/pkg/bulk" "github.com/elastic/fleet-server/v7/internal/pkg/dl" + "github.com/elastic/fleet-server/v7/internal/pkg/es" "github.com/elastic/fleet-server/v7/internal/pkg/model" "github.com/elastic/fleet-server/v7/internal/pkg/saved" ) @@ -42,12 +45,19 @@ func MigrateEnrollmentAPIKeys(ctx context.Context, sv saved.CRUD, bulker bulk.Bu } var recs []model.EnrollmentApiKey + var resHits []es.HitT res, err := bulker.Search(ctx, []string{dl.FleetEnrollmentAPIKeys}, raw, bulk.WithRefresh()) if err != nil { - return err + if errors.Is(err, es.ErrIndexNotFound) { + err = nil + } else { + return err + } + } else { + resHits = res.Hits } - for _, hit := range res.Hits { + for _, hit := range resHits { var rec model.EnrollmentApiKey err := json.Unmarshal(hit.Source, &rec) if err != nil { diff --git a/internal/pkg/monitor/monitor.go b/internal/pkg/monitor/monitor.go index 78df63db9..a6f28936b 100644 --- a/internal/pkg/monitor/monitor.go +++ b/internal/pkg/monitor/monitor.go @@ -8,6 +8,7 @@ import ( "bytes" "context" "encoding/json" + "errors" "sync/atomic" "time" @@ -291,7 +292,14 @@ func (m *simpleMonitorT) search(ctx context.Context, tmpl *dsl.Tmpl, params map[ } if res.IsError() { - return nil, es.TranslateError(res.StatusCode, esres.Error) + err = es.TranslateError(res.StatusCode, esres.Error) + } + + if err != nil { + if errors.Is(err, es.ErrIndexNotFound) { + return nil, nil + } + return nil, err } return esres.Hits.Hits, nil diff --git a/internal/pkg/policy/monitor.go b/internal/pkg/policy/monitor.go index 52724a231..9e2b10635 100644 --- a/internal/pkg/policy/monitor.go +++ b/internal/pkg/policy/monitor.go @@ -18,6 +18,7 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/bulk" "github.com/elastic/fleet-server/v7/internal/pkg/dl" + "github.com/elastic/fleet-server/v7/internal/pkg/es" "github.com/elastic/fleet-server/v7/internal/pkg/model" "github.com/elastic/fleet-server/v7/internal/pkg/monitor" ) @@ -125,6 +126,11 @@ LOOP: func (m *monitorT) process(ctx context.Context) error { policies, err := m.policyF(ctx, m.bulker, dl.WithIndexName(m.policiesIndex)) if err != nil { + if errors.Is(err, es.ErrIndexNotFound) { + err = nil + } else { + err = nil + } return err } if len(policies) == 0 { From 06a7d379439cc3f62ff8cedbca86d2ded4b2756b Mon Sep 17 00:00:00 2001 From: Aleksandr Maus Date: Thu, 21 Jan 2021 10:14:12 -0500 Subject: [PATCH 005/240] Address code review comments --- cmd/fleet/main.go | 2 +- internal/pkg/coordinator/monitor.go | 12 ++++++------ internal/pkg/migrate/migrate.go | 9 ++++++--- internal/pkg/monitor/monitor.go | 1 + internal/pkg/policy/monitor.go | 5 ++--- 5 files changed, 16 insertions(+), 13 deletions(-) diff --git a/cmd/fleet/main.go b/cmd/fleet/main.go index a197ec178..be73843f0 100644 --- a/cmd/fleet/main.go +++ b/cmd/fleet/main.go @@ -478,7 +478,7 @@ func (f *FleetServer) runServer(ctx context.Context, cfg *config.Config) (err er if err != nil { return err } - err = migrate.Migrate(ctx, sv, bulker) + err = migrate.Migrate(ctx, log.Logger, sv, bulker) if err != nil { return err } diff --git a/internal/pkg/coordinator/monitor.go b/internal/pkg/coordinator/monitor.go index e88c82c07..272d76f2d 100644 --- a/internal/pkg/coordinator/monitor.go +++ b/internal/pkg/coordinator/monitor.go @@ -190,10 +190,10 @@ func (m *monitorT) ensureLeadership(ctx context.Context) error { policies, err := dl.QueryLatestPolicies(ctx, m.bulker, dl.WithIndexName(m.policiesIndex)) if err != nil { if errors.Is(err, es.ErrIndexNotFound) { - err = nil - } else { - return err + m.log.Debug().Str("index", m.policiesIndex).Msg(es.ErrIndexNotFound.Error()) + return nil } + return err } if len(policies) > 0 { ids := make([]string, len(policies)) @@ -203,10 +203,10 @@ func (m *monitorT) ensureLeadership(ctx context.Context) error { leaders, err = dl.SearchPolicyLeaders(ctx, m.bulker, ids, dl.WithIndexName(m.leadersIndex)) if err != nil { if errors.Is(err, es.ErrIndexNotFound) { - err = nil - } else { - return err + m.log.Debug().Str("index", m.leadersIndex).Msg(es.ErrIndexNotFound.Error()) + return nil } + return err } } diff --git a/internal/pkg/migrate/migrate.go b/internal/pkg/migrate/migrate.go index 4eed7d7ed..f18a4bde0 100644 --- a/internal/pkg/migrate/migrate.go +++ b/internal/pkg/migrate/migrate.go @@ -14,6 +14,7 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/es" "github.com/elastic/fleet-server/v7/internal/pkg/model" "github.com/elastic/fleet-server/v7/internal/pkg/saved" + "github.com/rs/zerolog" ) type enrollmentApiKey struct { @@ -32,11 +33,11 @@ type enrollmentApiKey struct { // This is for development only (1 instance of fleet) // Not safe for multiple instances of fleet // Initially needed to migrate the enrollment-api-keys that kibana creates -func Migrate(ctx context.Context, sv saved.CRUD, bulker bulk.Bulk) error { - return MigrateEnrollmentAPIKeys(ctx, sv, bulker) +func Migrate(ctx context.Context, log zerolog.Logger, sv saved.CRUD, bulker bulk.Bulk) error { + return MigrateEnrollmentAPIKeys(ctx, log, sv, bulker) } -func MigrateEnrollmentAPIKeys(ctx context.Context, sv saved.CRUD, bulker bulk.Bulk) error { +func MigrateEnrollmentAPIKeys(ctx context.Context, log zerolog.Logger, sv saved.CRUD, bulker bulk.Bulk) error { // Query all enrollment keys from the new schema raw, err := dl.RenderAllEnrollmentAPIKeysQuery(1000) @@ -49,6 +50,8 @@ func MigrateEnrollmentAPIKeys(ctx context.Context, sv saved.CRUD, bulker bulk.Bu res, err := bulker.Search(ctx, []string{dl.FleetEnrollmentAPIKeys}, raw, bulk.WithRefresh()) if err != nil { if errors.Is(err, es.ErrIndexNotFound) { + log.Debug().Str("index", dl.FleetEnrollmentAPIKeys).Msg(es.ErrIndexNotFound.Error()) + // Continue with migration if the .fleet-enrollment-api-keys index is not found err = nil } else { return err diff --git a/internal/pkg/monitor/monitor.go b/internal/pkg/monitor/monitor.go index a6f28936b..bdea51fce 100644 --- a/internal/pkg/monitor/monitor.go +++ b/internal/pkg/monitor/monitor.go @@ -297,6 +297,7 @@ func (m *simpleMonitorT) search(ctx context.Context, tmpl *dsl.Tmpl, params map[ if err != nil { if errors.Is(err, es.ErrIndexNotFound) { + m.log.Debug().Str("index", m.index).Msg(es.ErrIndexNotFound.Error()) return nil, nil } return nil, err diff --git a/internal/pkg/policy/monitor.go b/internal/pkg/policy/monitor.go index 9e2b10635..e647bb612 100644 --- a/internal/pkg/policy/monitor.go +++ b/internal/pkg/policy/monitor.go @@ -127,9 +127,8 @@ func (m *monitorT) process(ctx context.Context) error { policies, err := m.policyF(ctx, m.bulker, dl.WithIndexName(m.policiesIndex)) if err != nil { if errors.Is(err, es.ErrIndexNotFound) { - err = nil - } else { - err = nil + m.log.Debug().Str("index", m.policiesIndex).Msg(es.ErrIndexNotFound.Error()) + return nil } return err } From fe4fdc5a96a3b82bb1879a9a29a986591c5ddb0d Mon Sep 17 00:00:00 2001 From: Aleksandr Maus Date: Mon, 25 Jan 2021 13:45:33 -0500 Subject: [PATCH 006/240] Hide old bootstrap logic behind FLEET_ES_BOOT environment variable --- cmd/fleet/main.go | 21 ++++++++++++++------- internal/pkg/esboot/bootstrap.go | 15 ++++++++------- 2 files changed, 22 insertions(+), 14 deletions(-) diff --git a/cmd/fleet/main.go b/cmd/fleet/main.go index be73843f0..730ab9f9e 100644 --- a/cmd/fleet/main.go +++ b/cmd/fleet/main.go @@ -474,13 +474,20 @@ func (f *FleetServer) runServer(ctx context.Context, cfg *config.Config) (err er // Initial indices bootstrapping, needed for agents actions development // TODO: remove this after the indices bootstrapping logic implemented in ES plugin - err = esboot.EnsureESIndices(ctx, es) - if err != nil { - return err - } - err = migrate.Migrate(ctx, log.Logger, sv, bulker) - if err != nil { - return err + bootFlag := env.GetStr( + "FLEET_ES_BOOT", + "", + ) + if bootFlag == "1" { + log.Debug().Msg("FLEET_ES_BOOT is set to true, perform bootstrap") + err = esboot.EnsureESIndices(ctx, es) + if err != nil { + return err + } + err = migrate.Migrate(ctx, log.Logger, sv, bulker) + if err != nil { + return err + } } // Replacing to errgroup context diff --git a/internal/pkg/esboot/bootstrap.go b/internal/pkg/esboot/bootstrap.go index c000f965d..9906a8f52 100644 --- a/internal/pkg/esboot/bootstrap.go +++ b/internal/pkg/esboot/bootstrap.go @@ -7,6 +7,7 @@ package esboot import ( "context" + "github.com/elastic/fleet-server/v7/internal/pkg/es" "github.com/elastic/go-elasticsearch/v8" ) @@ -21,13 +22,13 @@ type indexConfig struct { var indexConfigs = map[string]indexConfig{ // Commenting out the boostrapping for now here, just in case if it needs to be "enabled" again. // Will remove all the boostrapping code completely later once all is fully integrated - // ".fleet-actions": {mapping: es.MappingAction}, - // ".fleet-actions-results": {mapping: es.MappingActionResult, datastream: true}, - // ".fleet-agents": {mapping: es.MappingAgent}, - // ".fleet-enrollment-api-keys": {mapping: es.MappingEnrollmentApiKey}, - // ".fleet-policies": {mapping: es.MappingPolicy}, - // ".fleet-policies-leader": {mapping: es.MappingPolicyLeader}, - // ".fleet-servers": {mapping: es.MappingServer}, + ".fleet-actions": {mapping: es.MappingAction}, + ".fleet-actions-results": {mapping: es.MappingActionResult, datastream: true}, + ".fleet-agents": {mapping: es.MappingAgent}, + ".fleet-enrollment-api-keys": {mapping: es.MappingEnrollmentApiKey}, + ".fleet-policies": {mapping: es.MappingPolicy}, + ".fleet-policies-leader": {mapping: es.MappingPolicyLeader}, + ".fleet-servers": {mapping: es.MappingServer}, } // Bootstrap creates .fleet-actions data stream From 3951f20a9fcecef8170f392bdb35dc6dc88a522e Mon Sep 17 00:00:00 2001 From: Aleksandr Maus Date: Wed, 27 Jan 2021 16:22:19 -0500 Subject: [PATCH 007/240] Rename action input_id to input_type --- cmd/fleet/handleCheckin.go | 2 +- cmd/fleet/schema.go | 2 +- internal/pkg/es/mapping.go | 2 +- internal/pkg/model/schema.go | 4 ++-- internal/pkg/testing/actions.go | 7 ++++--- model/schema.json | 4 ++-- 6 files changed, 11 insertions(+), 10 deletions(-) diff --git a/cmd/fleet/handleCheckin.go b/cmd/fleet/handleCheckin.go index 10c7a74a5..f074eedac 100644 --- a/cmd/fleet/handleCheckin.go +++ b/cmd/fleet/handleCheckin.go @@ -246,7 +246,7 @@ func convertActions(agentId string, actions []model.Action) ([]ActionResp, strin Data: []byte(action.Data), Id: action.ActionId, Type: action.Type, - InputId: action.InputId, + InputType: action.InputType, }) } diff --git a/cmd/fleet/schema.go b/cmd/fleet/schema.go index 77371b6c4..dc2de3b5c 100644 --- a/cmd/fleet/schema.go +++ b/cmd/fleet/schema.go @@ -123,7 +123,7 @@ type ActionResp struct { Data json.RawMessage `json:"data"` Id string `json:"id"` Type string `json:"type"` - InputId string `json:"input_id"` + InputType string `json:"input_type"` } type Event struct { diff --git a/internal/pkg/es/mapping.go b/internal/pkg/es/mapping.go index dadae8870..91dd14e9a 100644 --- a/internal/pkg/es/mapping.go +++ b/internal/pkg/es/mapping.go @@ -24,7 +24,7 @@ const ( "expiration": { "type": "date" }, - "input_id": { + "input_type": { "type": "keyword" }, "@timestamp": { diff --git a/internal/pkg/model/schema.go b/internal/pkg/model/schema.go index 8f66bee5e..1d9c7e101 100644 --- a/internal/pkg/model/schema.go +++ b/internal/pkg/model/schema.go @@ -45,8 +45,8 @@ type Action struct { // The action expiration date/time Expiration string `json:"expiration,omitempty"` - // The input identifier the actions should be routed to. - InputId string `json:"input_id,omitempty"` + // The input type the actions should be routed to. + InputType string `json:"input_type,omitempty"` // Date/time the action was created Timestamp string `json:"@timestamp,omitempty"` diff --git a/internal/pkg/testing/actions.go b/internal/pkg/testing/actions.go index 2bea9a4dc..63e66dd87 100644 --- a/internal/pkg/testing/actions.go +++ b/internal/pkg/testing/actions.go @@ -9,12 +9,13 @@ package testing import ( "context" "encoding/json" + "testing" + "time" + "github.com/elastic/fleet-server/v7/internal/pkg/bulk" "github.com/elastic/fleet-server/v7/internal/pkg/es" "github.com/elastic/fleet-server/v7/internal/pkg/model" "github.com/elastic/fleet-server/v7/internal/pkg/rnd" - "testing" - "time" "github.com/gofrs/uuid" "github.com/rs/xid" @@ -60,7 +61,7 @@ func CreateRandomActions(min, max int) ([]model.Action, error) { Timestamp: r.Time(now, 2, 5, time.Second, rnd.TimeBefore).Format(time.RFC3339), Expiration: r.Time(now, 12, 25, time.Minute, rnd.TimeAfter).Format(time.RFC3339), Type: "APP_ACTION", - InputId: "osquery", + InputType: "osquery", Agents: aid, Data: data, } diff --git a/model/schema.json b/model/schema.json index 686c23124..caa3254ca 100644 --- a/model/schema.json +++ b/model/schema.json @@ -33,8 +33,8 @@ "description": "The action type. APP_ACTION is the value for the actions that suppose to be routed to the endpoints/beats.", "type": "string" }, - "input_id": { - "description": "The input identifier the actions should be routed to.", + "input_type": { + "description": "The input type the actions should be routed to.", "type": "string" }, "agents": { From b16f636c9e8c89a2c763ce07f85e91b165563ed2 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Tue, 2 Feb 2021 10:10:02 -0500 Subject: [PATCH 008/240] Add .exe extension to fleet-server windows artifacts. (#78) (#79) (cherry picked from commit 376dedd078e219bb385fc442d92fb1f7ffdcb30e) --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index 525c51788..eed30f208 100644 --- a/Makefile +++ b/Makefile @@ -114,6 +114,7 @@ $(PLATFORM_TARGETS): release-%: .PHONY: package-target package-target: build/distributions ifeq ($(OS),windows) + @mv build/binaries/fleet-server-$(VERSION)-$(OS)-$(ARCH)/fleet-server build/binaries/fleet-server-$(VERSION)-$(OS)-$(ARCH)/fleet-server.exe @cd build/binaries && zip -q -r ../distributions/fleet-server-$(VERSION)-$(OS)-$(ARCH).zip fleet-server-$(VERSION)-$(OS)-$(ARCH) @cd build/distributions && shasum -a 512 fleet-server-$(VERSION)-$(OS)-$(ARCH).zip > fleet-server-$(VERSION)-$(OS)-$(ARCH).zip.sha512 else From f0912adc69e1ca1829dddbd4964dbde036f14242 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Tue, 2 Feb 2021 12:07:35 -0500 Subject: [PATCH 009/240] Fix permissions on dependencies-report script (#80) (#81) This commit is adding execution permission to dependencies-report script which are required during the unified release build. (cherry picked from commit 19fd842ccd5d49f47694e7c2b17ea9d840976839) Co-authored-by: Julien Mailleret <8582351+jmlrt@users.noreply.github.com> --- dev-tools/dependencies-report | 0 1 file changed, 0 insertions(+), 0 deletions(-) mode change 100644 => 100755 dev-tools/dependencies-report diff --git a/dev-tools/dependencies-report b/dev-tools/dependencies-report old mode 100644 new mode 100755 From dbbcd69c35d3a11cd90614c024d4c223b5194fa9 Mon Sep 17 00:00:00 2001 From: Aleksandr Maus Date: Tue, 2 Feb 2021 17:46:30 -0500 Subject: [PATCH 010/240] Reduce actions fetching interval if the full page of action documents was fetched --- internal/pkg/model/schema.go | 2 +- internal/pkg/monitor/monitor.go | 28 ++++++++++++++++++++++------ model/schema.json | 2 +- 3 files changed, 24 insertions(+), 8 deletions(-) diff --git a/internal/pkg/model/schema.go b/internal/pkg/model/schema.go index 1d9c7e101..b4016fd60 100644 --- a/internal/pkg/model/schema.go +++ b/internal/pkg/model/schema.go @@ -51,7 +51,7 @@ type Action struct { // Date/time the action was created Timestamp string `json:"@timestamp,omitempty"` - // The action type. APP_ACTION is the value for the actions that suppose to be routed to the endpoints/beats. + // The action type. INPUT_ACTION is the value for the actions that suppose to be routed to the endpoints/beats. Type string `json:"type,omitempty"` } diff --git a/internal/pkg/monitor/monitor.go b/internal/pkg/monitor/monitor.go index bdea51fce..a9c726899 100644 --- a/internal/pkg/monitor/monitor.go +++ b/internal/pkg/monitor/monitor.go @@ -22,9 +22,12 @@ import ( ) const ( - defaultCheckInterval = 1 // check every second for the new action - defaultSeqNo = int64(-1) // the _seq_no in elasticsearch start with 0 + defaultCheckInterval = 1 * time.Second // check every second for the new action + defaultSeqNo = int64(-1) // the _seq_no in elasticsearch start with 0 defaultWithExpiration = false + defaultFetchSize = 10 + + tightLoopCheckInterval = 50 * time.Millisecond // when we get a full page (fetchSize) of documents, use this interval to repeatedly poll for more records ) const ( @@ -80,6 +83,7 @@ type simpleMonitorT struct { index string checkInterval time.Duration withExpiration bool + fetchSize int checkpoint int64 // index global checkpoint @@ -98,8 +102,9 @@ func NewSimple(index string, cli *elasticsearch.Client, opts ...Option) (SimpleM m := &simpleMonitorT{ index: index, cli: cli, - checkInterval: defaultCheckInterval * time.Second, + checkInterval: defaultCheckInterval, withExpiration: defaultWithExpiration, + fetchSize: defaultFetchSize, checkpoint: defaultSeqNo, outCh: make(chan []es.HitT, 1), } @@ -199,29 +204,39 @@ func (m *simpleMonitorT) Run(ctx context.Context) (err error) { for { select { case <-t.C: + interval := m.checkInterval + hits, err := m.check(ctx) if err != nil { m.log.Error().Err(err).Msg("failed checking new documents") } else { - m.notify(ctx, hits) + count := m.notify(ctx, hits) + + // Change check interval if fetched the full page (m.fetchSize) of documents + if count == m.fetchSize { + m.log.Debug().Int("count", count).Dur("wait_next_check", interval).Msg("tight loop check") + interval = tightLoopCheckInterval + } } - t.Reset(m.checkInterval) + t.Reset(interval) case <-ctx.Done(): return ctx.Err() } } } -func (m *simpleMonitorT) notify(ctx context.Context, hits []es.HitT) { +func (m *simpleMonitorT) notify(ctx context.Context, hits []es.HitT) int { sz := len(hits) if sz > 0 { select { case m.outCh <- hits: maxVal := hits[sz-1].SeqNo m.storeCheckpoint(maxVal) + return sz case <-ctx.Done(): } } + return 0 } func (m *simpleMonitorT) check(ctx context.Context) ([]es.HitT, error) { @@ -322,6 +337,7 @@ func (m *simpleMonitorT) prepareCheckQuery() (tmpl *dsl.Tmpl, err error) { // Prepares full documents query func (m *simpleMonitorT) prepareQuery() (tmpl *dsl.Tmpl, err error) { tmpl, root := m.prepareCommon(true) + root.Size(uint64(m.fetchSize)) root.Sort().SortOrder(fieldSeqNo, dsl.SortAscend) if err := tmpl.Resolve(root); err != nil { diff --git a/model/schema.json b/model/schema.json index caa3254ca..ea0517e40 100644 --- a/model/schema.json +++ b/model/schema.json @@ -30,7 +30,7 @@ "format": "date-time" }, "type": { - "description": "The action type. APP_ACTION is the value for the actions that suppose to be routed to the endpoints/beats.", + "description": "The action type. INPUT_ACTION is the value for the actions that suppose to be routed to the endpoints/beats.", "type": "string" }, "input_type": { From d1fd09269720a1416f6d6b3aada371303baf6878 Mon Sep 17 00:00:00 2001 From: Aleksandr Maus Date: Wed, 3 Feb 2021 11:02:05 -0500 Subject: [PATCH 011/240] Increase the initial agent's actions fetch size to 100 --- internal/pkg/dl/actions.go | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/internal/pkg/dl/actions.go b/internal/pkg/dl/actions.go index 15b617241..9a2b34fe7 100644 --- a/internal/pkg/dl/actions.go +++ b/internal/pkg/dl/actions.go @@ -15,6 +15,8 @@ import ( const ( FieldAgents = "agents" FieldExpiration = "expiration" + + maxAgentActionsFetchSize = 100 ) var ( @@ -44,6 +46,8 @@ func prepareFindAgentActions() *dsl.Tmpl { filter.Terms(FieldAgents, tmpl.Bind(FieldAgents), nil) + // Select more actions per agent since the agents array is not loaded + root.Size(maxAgentActionsFetchSize) root.Source().Excludes(FieldAgents) tmpl.MustResolve(root) From ae8188982e651fc5a6b40448941823228217f9e4 Mon Sep 17 00:00:00 2001 From: Aleksandr Maus Date: Sun, 7 Feb 2021 16:10:50 -0500 Subject: [PATCH 012/240] Flatten .fleet-actions-results schema --- cmd/fleet/handleAck.go | 11 +++++++---- cmd/fleet/schema.go | 25 ++++++++++++++----------- internal/pkg/es/mapping.go | 17 +++++++++++++++++ internal/pkg/model/schema.go | 13 +++++++++++++ model/schema.json | 15 +++++++++++++++ 5 files changed, 66 insertions(+), 15 deletions(-) diff --git a/cmd/fleet/handleAck.go b/cmd/fleet/handleAck.go index dc8967903..bb85584be 100644 --- a/cmd/fleet/handleAck.go +++ b/cmd/fleet/handleAck.go @@ -108,10 +108,13 @@ func _handleAckEvents(ctx context.Context, agent *model.Agent, events []Event, b } acr := model.ActionResult{ - ActionId: ev.ActionId, - AgentId: agent.Id, - Data: ev.Data, - Error: ev.Error, + ActionId: ev.ActionId, + AgentId: agent.Id, + StartedAt: ev.StartedAt, + CompletedAt: ev.CompletedAt, + ActionData: ev.ActionData, + Data: ev.Data, + Error: ev.Error, } if _, err := dl.CreateActionResult(ctx, bulker, acr); err != nil { return err diff --git a/cmd/fleet/schema.go b/cmd/fleet/schema.go index dc2de3b5c..d92d28f00 100644 --- a/cmd/fleet/schema.go +++ b/cmd/fleet/schema.go @@ -127,15 +127,18 @@ type ActionResp struct { } type Event struct { - Type string `json:"type"` - SubType string `json:"subtype"` - AgentId string `json:"agent_id"` - ActionId string `json:"action_id"` - PolicyId string `json:"policy_id"` - StreamId string `json:"stream_id"` - Timestamp string `json:"timestamp"` - Message string `json:"message"` - Payload string `json:"payload,omitempty"` - Data json.RawMessage `json:"data,omitempty"` - Error string `json:"error,omitempty"` + Type string `json:"type"` + SubType string `json:"subtype"` + AgentId string `json:"agent_id"` + ActionId string `json:"action_id"` + PolicyId string `json:"policy_id"` + StreamId string `json:"stream_id"` + Timestamp string `json:"timestamp"` + Message string `json:"message"` + Payload string `json:"payload,omitempty"` + StartedAt string `json:"started_at"` + CompletedAt string `json:"completed_at"` + ActionData json.RawMessage `json:"action_data,omitempty"` + Data json.RawMessage `json:"data,omitempty"` + Error string `json:"error,omitempty"` } diff --git a/internal/pkg/es/mapping.go b/internal/pkg/es/mapping.go index 91dd14e9a..e2581482b 100644 --- a/internal/pkg/es/mapping.go +++ b/internal/pkg/es/mapping.go @@ -36,15 +36,29 @@ const ( } }` + // ActionData The opaque payload. + MappingActionData = `{ + "properties": { + + } +}` + // ActionResult An Elastic Agent action results MappingActionResult = `{ "properties": { + "action_data": { + "enabled" : false, + "type": "object" + }, "action_id": { "type": "keyword" }, "agent_id": { "type": "keyword" }, + "completed_at": { + "type": "date" + }, "data": { "enabled" : false, "type": "object" @@ -52,6 +66,9 @@ const ( "error": { "type": "keyword" }, + "started_at": { + "type": "date" + }, "@timestamp": { "type": "date" } diff --git a/internal/pkg/model/schema.go b/internal/pkg/model/schema.go index b4016fd60..60b2920a6 100644 --- a/internal/pkg/model/schema.go +++ b/internal/pkg/model/schema.go @@ -55,22 +55,35 @@ type Action struct { Type string `json:"type,omitempty"` } +// ActionData The opaque payload. +type ActionData struct { +} + // ActionResult An Elastic Agent action results type ActionResult struct { ESDocument + // The opaque payload. + ActionData json.RawMessage `json:"action_data,omitempty"` + // The action id. ActionId string `json:"action_id,omitempty"` // The agent id. AgentId string `json:"agent_id,omitempty"` + // Date/time the action was completed + CompletedAt string `json:"completed_at,omitempty"` + // The opaque payload. Data json.RawMessage `json:"data,omitempty"` // The action error message. Error string `json:"error,omitempty"` + // Date/time the action was started + StartedAt string `json:"started_at,omitempty"` + // Date/time the action was created Timestamp string `json:"@timestamp,omitempty"` } diff --git a/model/schema.json b/model/schema.json index ea0517e40..7fdccf79a 100644 --- a/model/schema.json +++ b/model/schema.json @@ -73,6 +73,21 @@ "description": "The action id.", "type": "string" }, + "started_at": { + "description": "Date/time the action was started", + "type": "string", + "format": "date-time" + }, + "completed_at": { + "description": "Date/time the action was completed", + "type": "string", + "format": "date-time" + }, + "action_data": { + "description": "The opaque payload.", + "type": "object", + "format": "raw" + }, "error": { "description": "The action error message.", "type": "string" From 5dc43dda3a76d39a67acc2b2ee56acbad32ecffc Mon Sep 17 00:00:00 2001 From: Sean Cunningham Date: Thu, 18 Feb 2021 11:20:46 -0500 Subject: [PATCH 013/240] [Backport 7.x] Change default bind port from 8000 to 8220 (#92) (#94) --- internal/pkg/config/config_test.go | 14 +++++++------- internal/pkg/config/input.go | 7 +++++-- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/internal/pkg/config/config_test.go b/internal/pkg/config/config_test.go index 6e9ed874d..de2e6cfe6 100644 --- a/internal/pkg/config/config_test.go +++ b/internal/pkg/config/config_test.go @@ -46,8 +46,8 @@ func TestConfig(t *testing.T) { { Type: "fleet-server", Server: Server{ - Host: "localhost", - Port: 8000, + Host: kDefaultHost, + Port: kDefaultPort, Timeouts: ServerTimeouts{ Read: 5 * time.Second, Write: 60 * 10 * time.Second, @@ -94,8 +94,8 @@ func TestConfig(t *testing.T) { { Type: "fleet-server", Server: Server{ - Host: "localhost", - Port: 8000, + Host: kDefaultHost, + Port: kDefaultPort, Timeouts: ServerTimeouts{ Read: 5 * time.Second, Write: 60 * 10 * time.Second, @@ -140,8 +140,8 @@ func TestConfig(t *testing.T) { { Type: "fleet-server", Server: Server{ - Host: "localhost", - Port: 8000, + Host: kDefaultHost, + Port: kDefaultPort, Timeouts: ServerTimeouts{ Read: 5 * time.Second, Write: 60 * 10 * time.Second, @@ -186,7 +186,7 @@ func TestConfig(t *testing.T) { { Type: "fleet-server", Server: Server{ - Host: "localhost", + Host: kDefaultHost, Port: 8888, Timeouts: ServerTimeouts{ Read: 20 * time.Second, diff --git a/internal/pkg/config/input.go b/internal/pkg/config/input.go index d1c84f641..28e7296ca 100644 --- a/internal/pkg/config/input.go +++ b/internal/pkg/config/input.go @@ -10,6 +10,9 @@ import ( "time" ) +const kDefaultHost = "localhost" +const kDefaultPort = 8220 + // Policy is the configuration policy to use. type Policy struct { ID string `config:"id"` @@ -58,8 +61,8 @@ type Server struct { // InitDefaults initializes the defaults for the configuration. func (c *Server) InitDefaults() { - c.Host = "localhost" - c.Port = 8000 + c.Host = kDefaultHost + c.Port = kDefaultPort c.Timeouts.InitDefaults() c.MaxHeaderByteSize = 8192 // 8k c.RateLimitBurst = 1024 From ce59b47edb0bc6196f0dc10660d1ace37f5133ae Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Thu, 18 Feb 2021 14:10:58 -0500 Subject: [PATCH 014/240] Bump version to 7.13. (#97) --- main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main.go b/main.go index 13bd1d067..5deedb5a6 100644 --- a/main.go +++ b/main.go @@ -19,7 +19,7 @@ import ( "github.com/elastic/fleet-server/v7/cmd/fleet" ) -const defaultVersion = "7.12.0" +const defaultVersion = "7.13.0" var ( Version string = defaultVersion From f5932f5a94ea2ff49238cfecd331f8b848cf8868 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Mon, 22 Feb 2021 11:19:01 -0500 Subject: [PATCH 015/240] Add ssl configuration to fleet server http. (#98) (#100) * Add ssl configuration to fleet server http configuration. * Add log message when tls disabled. * Fix import. * Fix integration test. (cherry picked from commit 9d451b7168fb6d624bd4797517cb7e8259a9545e) --- cmd/fleet/server.go | 40 ++++++++++++++-------------- cmd/fleet/server_integration_test.go | 2 +- internal/pkg/config/input.go | 20 +++++++------- 3 files changed, 32 insertions(+), 30 deletions(-) diff --git a/cmd/fleet/server.go b/cmd/fleet/server.go index 0e418adee..3e52dab36 100644 --- a/cmd/fleet/server.go +++ b/cmd/fleet/server.go @@ -6,13 +6,15 @@ package fleet import ( "context" - "github.com/elastic/fleet-server/v7/internal/pkg/config" + "crypto/tls" slog "log" "net" "net/http" + "github.com/elastic/fleet-server/v7/internal/pkg/config" "github.com/elastic/fleet-server/v7/internal/pkg/rate" + "github.com/elastic/beats/v7/libbeat/common/transport/tlscommon" "github.com/julienschmidt/httprouter" "github.com/rs/zerolog/log" ) @@ -37,7 +39,7 @@ func runServer(ctx context.Context, router *httprouter.Router, cfg *config.Serve Str("bind", addr). Dur("rdTimeout", rdto). Dur("wrTimeout", wrto). - Msg("Server listening") + Msg("server listening") server := http.Server{ Addr: addr, @@ -57,28 +59,32 @@ func runServer(ctx context.Context, router *httprouter.Router, cfg *config.Serve go func() { select { case <-ctx.Done(): - log.Debug().Msg("Force server close on ctx.Done()") + log.Debug().Msg("force server close on ctx.Done()") server.Close() case <-forceCh: - log.Debug().Msg("Go routine forced closed on exit") + log.Debug().Msg("go routine forced closed on exit") } }() - ln, err := makeListener(ctx, addr, cfg) + ln, err := net.Listen("tcp", addr) if err != nil { return err } defer ln.Close() - // TODO: Use tls.Config to properly mux down tls connection - keyFile := cfg.TLS.Key - certFile := cfg.TLS.Cert - - if keyFile != "" || certFile != "" { - return server.ServeTLS(ln, certFile, keyFile) + if cfg.TLS != nil && cfg.TLS.IsEnabled() { + tlsCfg, err := tlscommon.LoadTLSConfig(cfg.TLS) + if err != nil { + return err + } + server.TLSConfig = tlsCfg.ToConfig() + ln = tls.NewListener(ln, server.TLSConfig) + } else { + log.Warn().Msg("exposed over insecure HTTP; enablement of TLS is strongly recommended") } + ln = wrapRateLimitter(ctx, ln, cfg) if err := server.Serve(ln); err != nil && err != context.Canceled { return err } @@ -86,13 +92,7 @@ func runServer(ctx context.Context, router *httprouter.Router, cfg *config.Serve return nil } -func makeListener(ctx context.Context, addr string, cfg *config.Server) (net.Listener, error) { - // Create listener - ln, err := net.Listen("tcp", addr) - if err != nil { - return nil, err - } - +func wrapRateLimitter(ctx context.Context, ln net.Listener, cfg *config.Server) net.Listener { rateLimitBurst := cfg.RateLimitBurst rateLimitInterval := cfg.RateLimitInterval @@ -100,10 +100,10 @@ func makeListener(ctx context.Context, addr string, cfg *config.Server) (net.Lis log.Info().Dur("interval", rateLimitInterval).Int("burst", rateLimitBurst).Msg("Server rate limiter installed") ln = rate.NewRateListener(ctx, ln, rateLimitBurst, rateLimitInterval) } else { - log.Info().Msg("Server connection rate limiter disabled") + log.Info().Msg("server connection rate limiter disabled") } - return ln, err + return ln } type stubLogger struct { diff --git a/cmd/fleet/server_integration_test.go b/cmd/fleet/server_integration_test.go index 61cbd3bf4..5f9d726c9 100644 --- a/cmd/fleet/server_integration_test.go +++ b/cmd/fleet/server_integration_test.go @@ -47,7 +47,7 @@ func (s *tserver) baseUrl() string { input := s.cfg.Inputs[0] tls := input.Server.TLS schema := "http" - if tls.Key != "" || tls.Cert != "" { + if tls != nil && tls.IsEnabled() { schema = "https" } return fmt.Sprintf("%s://%s:%d", schema, input.Server.Host, input.Server.Port) diff --git a/internal/pkg/config/input.go b/internal/pkg/config/input.go index 28e7296ca..391bfc48c 100644 --- a/internal/pkg/config/input.go +++ b/internal/pkg/config/input.go @@ -8,6 +8,8 @@ import ( "fmt" "strings" "time" + + "github.com/elastic/beats/v7/libbeat/common/transport/tlscommon" ) const kDefaultHost = "localhost" @@ -48,15 +50,15 @@ type ServerTLS struct { // Server is the configuration for the server type Server struct { - Host string `config:"host"` - Port uint16 `config:"port"` - TLS ServerTLS `config:"tls"` - Timeouts ServerTimeouts `config:"timeouts"` - MaxHeaderByteSize int `config:"max_header_byte_size"` - RateLimitBurst int `config:"rate_limit_burst"` - RateLimitInterval time.Duration `config:"rate_limit_interval"` - MaxEnrollPending int64 `config:"max_enroll_pending"` - Profile ServerProfile `config:"profile"` + Host string `config:"host"` + Port uint16 `config:"port"` + TLS *tlscommon.Config `config:"ssl"` + Timeouts ServerTimeouts `config:"timeouts"` + MaxHeaderByteSize int `config:"max_header_byte_size"` + RateLimitBurst int `config:"rate_limit_burst"` + RateLimitInterval time.Duration `config:"rate_limit_interval"` + MaxEnrollPending int64 `config:"max_enroll_pending"` + Profile ServerProfile `config:"profile"` } // InitDefaults initializes the defaults for the configuration. From 900202936836b6ff725567809ff7fcf0b22f5f63 Mon Sep 17 00:00:00 2001 From: Aleksandr Maus Date: Wed, 24 Feb 2021 14:06:17 -0500 Subject: [PATCH 016/240] Upgrade to Go 1.15 --- .go-version | 2 +- go.mod | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.go-version b/.go-version index 4ed70fac1..98e863cdf 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.14.12 +1.15.8 diff --git a/go.mod b/go.mod index dcf5ad298..13460aef5 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/elastic/fleet-server/v7 -go 1.14 +go 1.15 require ( github.com/aleksmaus/generate v0.0.0-20201213151810-c5bc68a6a42f From f0c932026a394d074eefe6f36d3c5048f3914345 Mon Sep 17 00:00:00 2001 From: Sean Cunningham Date: Thu, 25 Feb 2021 10:43:14 -0500 Subject: [PATCH 017/240] Align output key permissions with 7.12 Kibana (#103) (#107) --- cmd/fleet/schema.go | 37 +++++++++++++++++-------------------- 1 file changed, 17 insertions(+), 20 deletions(-) diff --git a/cmd/fleet/schema.go b/cmd/fleet/schema.go index d92d28f00..9718a0748 100644 --- a/cmd/fleet/schema.go +++ b/cmd/fleet/schema.go @@ -36,26 +36,23 @@ const kFleetAccessRolesJSON = ` ` const kFleetOutputRolesJSON = ` -{ - "fleet-output": { - "cluster": ["monitor"], - "index": [{ - "names": [ - "logs-*", - "metrics-*", - "events-*", - ".ds-logs-*", - ".ds-metrics-*", - ".ds-events-*" - ], - "privileges": [ - "write", - "create_index", - "indices:admin/auto_create" - ] - }] - } -} + { + "fleet-output": { + "cluster": ["monitor"], + "index": [{ + "names": [ + "logs-*", + "metrics-*", + "traces-*", + ".logs-endpoint.diagnostic.collection-*" + ], + "privileges": [ + "auto_configure", + "create_doc" + ] + }] + } + } ` // Wrong: no AAD; From 0ff4c9078be59ad5fd37819eb2addbabe5e2eb66 Mon Sep 17 00:00:00 2001 From: Aleksandr Maus Date: Mon, 1 Mar 2021 10:29:51 -0500 Subject: [PATCH 018/240] Remove ES bootsrap code from Fleet Server (#105) (#109) * Moved it to testing/esutil package because it is still used for integration testing indices bootstrapping --- cmd/fleet/main.go | 25 +++---------------- dev-tools/integration/main.go | 8 +++--- .../pkg/{esboot => testing/esutil}/README.MD | 0 .../{esboot => testing/esutil}/bootstrap.go | 2 +- .../{esboot => testing/esutil}/datastream.go | 2 +- .../pkg/{esboot => testing/esutil}/esutil.go | 2 +- .../pkg/{esboot => testing/esutil}/ilm.go | 2 +- .../pkg/{esboot => testing/esutil}/index.go | 2 +- .../pkg/{esboot => testing/esutil}/strmap.go | 2 +- .../{esboot => testing/esutil}/template.go | 2 +- internal/pkg/testing/setup.go | 4 +-- 11 files changed, 16 insertions(+), 35 deletions(-) rename internal/pkg/{esboot => testing/esutil}/README.MD (100%) rename internal/pkg/{esboot => testing/esutil}/bootstrap.go (99%) rename internal/pkg/{esboot => testing/esutil}/datastream.go (98%) rename internal/pkg/{esboot => testing/esutil}/esutil.go (99%) rename internal/pkg/{esboot => testing/esutil}/ilm.go (99%) rename internal/pkg/{esboot => testing/esutil}/index.go (98%) rename internal/pkg/{esboot => testing/esutil}/strmap.go (97%) rename internal/pkg/{esboot => testing/esutil}/template.go (99%) diff --git a/cmd/fleet/main.go b/cmd/fleet/main.go index 730ab9f9e..68d918c3e 100644 --- a/cmd/fleet/main.go +++ b/cmd/fleet/main.go @@ -7,13 +7,14 @@ package fleet import ( "context" "fmt" - "github.com/elastic/go-ucfg" - "github.com/elastic/go-ucfg/yaml" "io" "os" "sync" "time" + "github.com/elastic/go-ucfg" + "github.com/elastic/go-ucfg/yaml" + "github.com/elastic/fleet-server/v7/internal/pkg/action" "github.com/elastic/fleet-server/v7/internal/pkg/bulk" "github.com/elastic/fleet-server/v7/internal/pkg/cache" @@ -21,9 +22,7 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/coordinator" "github.com/elastic/fleet-server/v7/internal/pkg/dl" "github.com/elastic/fleet-server/v7/internal/pkg/env" - "github.com/elastic/fleet-server/v7/internal/pkg/esboot" "github.com/elastic/fleet-server/v7/internal/pkg/logger" - "github.com/elastic/fleet-server/v7/internal/pkg/migrate" "github.com/elastic/fleet-server/v7/internal/pkg/monitor" "github.com/elastic/fleet-server/v7/internal/pkg/policy" "github.com/elastic/fleet-server/v7/internal/pkg/profile" @@ -472,24 +471,6 @@ func (f *FleetServer) runServer(ctx context.Context, cfg *config.Config) (err er } sv := saved.NewMgr(bulker, savedObjectKey()) - // Initial indices bootstrapping, needed for agents actions development - // TODO: remove this after the indices bootstrapping logic implemented in ES plugin - bootFlag := env.GetStr( - "FLEET_ES_BOOT", - "", - ) - if bootFlag == "1" { - log.Debug().Msg("FLEET_ES_BOOT is set to true, perform bootstrap") - err = esboot.EnsureESIndices(ctx, es) - if err != nil { - return err - } - err = migrate.Migrate(ctx, log.Logger, sv, bulker) - if err != nil { - return err - } - } - // Replacing to errgroup context g, ctx := errgroup.WithContext(ctx) diff --git a/dev-tools/integration/main.go b/dev-tools/integration/main.go index 3484529ad..a4e329319 100644 --- a/dev-tools/integration/main.go +++ b/dev-tools/integration/main.go @@ -11,7 +11,7 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/config" "github.com/elastic/fleet-server/v7/internal/pkg/es" - "github.com/elastic/fleet-server/v7/internal/pkg/esboot" + "github.com/elastic/fleet-server/v7/internal/pkg/testing/esutil" "github.com/rs/zerolog/log" ) @@ -34,15 +34,15 @@ func main() { es, err := es.NewClient(ctx, cfg) checkErr(err) - err = esboot.EnsureESIndices(ctx, es) + err = esutil.EnsureESIndices(ctx, es) checkErr(err) // Create .kibana index for integration tests // This temporarily until all the parts are unplugged from .kibana // Otherwise the fleet server fails to start at the moment const name = ".kibana" - err = esboot.EnsureIndex(ctx, es, name, kibanaMapping) - if errors.Is(err, esboot.ErrResourceAlreadyExists) { + err = esutil.EnsureIndex(ctx, es, name, kibanaMapping) + if errors.Is(err, esutil.ErrResourceAlreadyExists) { log.Info().Str("name", name).Msg("Index already exists") err = nil } diff --git a/internal/pkg/esboot/README.MD b/internal/pkg/testing/esutil/README.MD similarity index 100% rename from internal/pkg/esboot/README.MD rename to internal/pkg/testing/esutil/README.MD diff --git a/internal/pkg/esboot/bootstrap.go b/internal/pkg/testing/esutil/bootstrap.go similarity index 99% rename from internal/pkg/esboot/bootstrap.go rename to internal/pkg/testing/esutil/bootstrap.go index 9906a8f52..535f201d7 100644 --- a/internal/pkg/esboot/bootstrap.go +++ b/internal/pkg/testing/esutil/bootstrap.go @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package esboot +package esutil import ( "context" diff --git a/internal/pkg/esboot/datastream.go b/internal/pkg/testing/esutil/datastream.go similarity index 98% rename from internal/pkg/esboot/datastream.go rename to internal/pkg/testing/esutil/datastream.go index da518aaab..d31a37b78 100644 --- a/internal/pkg/esboot/datastream.go +++ b/internal/pkg/testing/esutil/datastream.go @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package esboot +package esutil import ( "context" diff --git a/internal/pkg/esboot/esutil.go b/internal/pkg/testing/esutil/esutil.go similarity index 99% rename from internal/pkg/esboot/esutil.go rename to internal/pkg/testing/esutil/esutil.go index a6c251e7d..1bace262b 100644 --- a/internal/pkg/esboot/esutil.go +++ b/internal/pkg/testing/esutil/esutil.go @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package esboot +package esutil import ( "encoding/json" diff --git a/internal/pkg/esboot/ilm.go b/internal/pkg/testing/esutil/ilm.go similarity index 99% rename from internal/pkg/esboot/ilm.go rename to internal/pkg/testing/esutil/ilm.go index 34256c3e5..16dc28271 100644 --- a/internal/pkg/esboot/ilm.go +++ b/internal/pkg/testing/esutil/ilm.go @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package esboot +package esutil import ( "context" diff --git a/internal/pkg/esboot/index.go b/internal/pkg/testing/esutil/index.go similarity index 98% rename from internal/pkg/esboot/index.go rename to internal/pkg/testing/esutil/index.go index a6ebcd877..6b61534af 100644 --- a/internal/pkg/esboot/index.go +++ b/internal/pkg/testing/esutil/index.go @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package esboot +package esutil import ( "context" diff --git a/internal/pkg/esboot/strmap.go b/internal/pkg/testing/esutil/strmap.go similarity index 97% rename from internal/pkg/esboot/strmap.go rename to internal/pkg/testing/esutil/strmap.go index 9794655aa..3e0950ff1 100644 --- a/internal/pkg/esboot/strmap.go +++ b/internal/pkg/testing/esutil/strmap.go @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package esboot +package esutil type stringMap map[string]interface{} diff --git a/internal/pkg/esboot/template.go b/internal/pkg/testing/esutil/template.go similarity index 99% rename from internal/pkg/esboot/template.go rename to internal/pkg/testing/esutil/template.go index bbea76279..0873c4885 100644 --- a/internal/pkg/esboot/template.go +++ b/internal/pkg/testing/esutil/template.go @@ -2,7 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. -package esboot +package esutil import ( "context" diff --git a/internal/pkg/testing/setup.go b/internal/pkg/testing/setup.go index 84a30b5c7..03acdc368 100644 --- a/internal/pkg/testing/setup.go +++ b/internal/pkg/testing/setup.go @@ -15,7 +15,7 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/bulk" "github.com/elastic/fleet-server/v7/internal/pkg/config" - "github.com/elastic/fleet-server/v7/internal/pkg/esboot" + "github.com/elastic/fleet-server/v7/internal/pkg/testing/esutil" ) var defaultCfg config.Config @@ -53,7 +53,7 @@ func SetupBulk(ctx context.Context, t *testing.T, opts ...bulk.BulkOpt) bulk.Bul func SetupIndex(ctx context.Context, t *testing.T, bulker bulk.Bulk, mapping string) string { t.Helper() index := xid.New().String() - err := esboot.EnsureIndex(ctx, bulker.Client(), index, mapping) + err := esutil.EnsureIndex(ctx, bulker.Client(), index, mapping) if err != nil { t.Fatal(err) } From 8c435f5e8e24c7fb5b9dfd6b67e56aa59ff30f07 Mon Sep 17 00:00:00 2001 From: Aleksandr Maus Date: Mon, 1 Mar 2021 15:26:12 -0500 Subject: [PATCH 019/240] Remove saved objects code (#110) (#111) * Remove saved objects code * Make check happy --- NOTICE.txt | 136 ++++++------- cmd/fleet/bulkCheckin.go | 15 +- cmd/fleet/dsl.go | 36 ---- cmd/fleet/handleCheckin.go | 7 +- cmd/fleet/main.go | 6 +- go.mod | 2 - go.sum | 2 - internal/pkg/migrate/migrate.go | 121 ------------ internal/pkg/saved/crud.go | 338 -------------------------------- internal/pkg/saved/crypto.go | 180 ----------------- internal/pkg/saved/encode.go | 232 ---------------------- internal/pkg/saved/errors.go | 23 --- internal/pkg/saved/fields.go | 53 ----- internal/pkg/saved/id.go | 63 ------ internal/pkg/saved/nonce.go | 39 ---- internal/pkg/saved/opts.go | 84 -------- internal/pkg/saved/query.go | 237 ---------------------- 17 files changed, 80 insertions(+), 1494 deletions(-) delete mode 100644 cmd/fleet/dsl.go delete mode 100644 internal/pkg/migrate/migrate.go delete mode 100644 internal/pkg/saved/crud.go delete mode 100644 internal/pkg/saved/crypto.go delete mode 100644 internal/pkg/saved/encode.go delete mode 100644 internal/pkg/saved/errors.go delete mode 100644 internal/pkg/saved/fields.go delete mode 100644 internal/pkg/saved/id.go delete mode 100644 internal/pkg/saved/nonce.go delete mode 100644 internal/pkg/saved/opts.go delete mode 100644 internal/pkg/saved/query.go diff --git a/NOTICE.txt b/NOTICE.txt index af04cc594..b7f8e4f8a 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -1755,37 +1755,6 @@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --------------------------------------------------------------------------------- -Dependency : github.com/mitchellh/mapstructure -Version: v1.3.3 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/mitchellh/mapstructure@v1.3.3/LICENSE: - -The MIT License (MIT) - -Copyright (c) 2013 Mitchell Hashimoto - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - - -------------------------------------------------------------------------------- Dependency : github.com/rs/xid Version: v1.2.1 @@ -2061,43 +2030,6 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --------------------------------------------------------------------------------- -Dependency : golang.org/x/crypto -Version: v0.0.0-20200622213623-75b288015ac9 -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/golang.org/x/crypto@v0.0.0-20200622213623-75b288015ac9/LICENSE: - -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -------------------------------------------------------------------------------- Dependency : golang.org/x/sync Version: v0.0.0-20200625203802-6e8e738ad208 @@ -27443,6 +27375,37 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------------------- +Dependency : github.com/mitchellh/mapstructure +Version: v1.1.2 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/mitchellh/mapstructure@v1.1.2/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2013 Mitchell Hashimoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + -------------------------------------------------------------------------------- Dependency : github.com/modern-go/concurrent Version: v0.0.0-20180306012644-bacd9c7ef1dd @@ -35190,6 +35153,43 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------------------- +Dependency : golang.org/x/crypto +Version: v0.0.0-20200622213623-75b288015ac9 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/crypto@v0.0.0-20200622213623-75b288015ac9/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + -------------------------------------------------------------------------------- Dependency : golang.org/x/exp Version: v0.0.0-20191227195350-da58074b4299 diff --git a/cmd/fleet/bulkCheckin.go b/cmd/fleet/bulkCheckin.go index 17496da15..bb508c917 100644 --- a/cmd/fleet/bulkCheckin.go +++ b/cmd/fleet/bulkCheckin.go @@ -12,15 +12,16 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/bulk" "github.com/elastic/fleet-server/v7/internal/pkg/dl" - "github.com/elastic/fleet-server/v7/internal/pkg/saved" "github.com/rs/zerolog/log" ) +type Fields map[string]interface{} + const kBulkCheckinFlushInterval = 10 * time.Second type PendingData struct { - fields saved.Fields + fields Fields seqNo int64 } @@ -37,10 +38,10 @@ func NewBulkCheckin(bulker bulk.Bulk) *BulkCheckin { } } -func (bc *BulkCheckin) CheckIn(id string, fields saved.Fields, seqno int64) error { +func (bc *BulkCheckin) CheckIn(id string, fields Fields, seqno int64) error { if fields == nil { - fields = make(saved.Fields) + fields = make(Fields) } timeNow := time.Now().UTC().Format(time.RFC3339) @@ -52,7 +53,7 @@ func (bc *BulkCheckin) CheckIn(id string, fields saved.Fields, seqno int64) erro return nil } -func (bc *BulkCheckin) Run(ctx context.Context, sv saved.CRUD) error { +func (bc *BulkCheckin) Run(ctx context.Context) error { tick := time.NewTicker(kBulkCheckinFlushInterval) @@ -61,7 +62,7 @@ LOOP: for { select { case <-tick.C: - if err = bc.flush(ctx, sv); err != nil { + if err = bc.flush(ctx); err != nil { log.Error().Err(err).Msg("Eat bulk checkin error; Keep on truckin'") err = nil } @@ -75,7 +76,7 @@ LOOP: return err } -func (bc *BulkCheckin) flush(ctx context.Context, sv saved.CRUD) error { +func (bc *BulkCheckin) flush(ctx context.Context) error { start := time.Now() bc.mut.Lock() diff --git a/cmd/fleet/dsl.go b/cmd/fleet/dsl.go deleted file mode 100644 index b11c215b8..000000000 --- a/cmd/fleet/dsl.go +++ /dev/null @@ -1,36 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package fleet - -import ( - "github.com/elastic/fleet-server/v7/internal/pkg/dsl" - "github.com/elastic/fleet-server/v7/internal/pkg/saved" -) - -const ( - kTmplApiKeyField = "ApiKeyId" - kTmplAgentIdField = "AgentIdList" -) - -var agentActionQueryTmpl = genAgentActionQueryTemplate() - -func genAgentActionQueryTemplate() *dsl.Tmpl { - tmpl := dsl.NewTmpl() - token := tmpl.Bind(kTmplAgentIdField) - - root := saved.NewQuery(AGENT_ACTION_SAVED_OBJECT_TYPE) - - fieldSentAt := saved.ScopeField(AGENT_ACTION_SAVED_OBJECT_TYPE, "sent_at") - fieldAgentId := saved.ScopeField(AGENT_ACTION_SAVED_OBJECT_TYPE, "agent_id") - - root.Query().Bool().Must().Terms(fieldAgentId, token, nil) - root.Query().Bool().MustNot().Exists(fieldSentAt) - - if err := tmpl.Resolve(root); err != nil { - panic(err) - } - - return tmpl -} diff --git a/cmd/fleet/handleCheckin.go b/cmd/fleet/handleCheckin.go index f074eedac..7f57d90db 100644 --- a/cmd/fleet/handleCheckin.go +++ b/cmd/fleet/handleCheckin.go @@ -21,7 +21,6 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/model" "github.com/elastic/fleet-server/v7/internal/pkg/monitor" "github.com/elastic/fleet-server/v7/internal/pkg/policy" - "github.com/elastic/fleet-server/v7/internal/pkg/saved" "github.com/julienschmidt/httprouter" "github.com/rs/zerolog/log" @@ -347,7 +346,7 @@ func findAgentByApiKeyId(ctx context.Context, bulker bulk.Bulk, id string) (*mod // parseMeta compares the agent and the request local_metadata content // and returns fields to update the agent record or nil -func parseMeta(agent *model.Agent, req *CheckinRequest) (fields saved.Fields, err error) { +func parseMeta(agent *model.Agent, req *CheckinRequest) (fields Fields, err error) { // Quick comparison first if bytes.Equal(req.LocalMeta, agent.LocalMetadata) { log.Trace().Msg("Quick comparing local metadata is equal") @@ -355,8 +354,8 @@ func parseMeta(agent *model.Agent, req *CheckinRequest) (fields saved.Fields, er } // Compare local_metadata content and update if different - var reqLocalMeta saved.Fields - var agentLocalMeta saved.Fields + var reqLocalMeta Fields + var agentLocalMeta Fields err = json.Unmarshal(req.LocalMeta, &reqLocalMeta) if err != nil { return nil, err diff --git a/cmd/fleet/main.go b/cmd/fleet/main.go index 68d918c3e..15fe89984 100644 --- a/cmd/fleet/main.go +++ b/cmd/fleet/main.go @@ -27,7 +27,6 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/policy" "github.com/elastic/fleet-server/v7/internal/pkg/profile" "github.com/elastic/fleet-server/v7/internal/pkg/reload" - "github.com/elastic/fleet-server/v7/internal/pkg/saved" "github.com/elastic/fleet-server/v7/internal/pkg/signal" "github.com/elastic/fleet-server/v7/internal/pkg/status" @@ -469,7 +468,6 @@ func (f *FleetServer) runServer(ctx context.Context, cfg *config.Config) (err er if err != nil { return err } - sv := saved.NewMgr(bulker, savedObjectKey()) // Replacing to errgroup context g, ctx := errgroup.WithContext(ctx) @@ -512,9 +510,7 @@ func (f *FleetServer) runServer(ctx context.Context, cfg *config.Config) (err er } bc := NewBulkCheckin(bulker) - g.Go(loggedRunFunc(ctx, "Bulk checkin", func(ctx context.Context) error { - return bc.Run(ctx, sv) - })) + g.Go(loggedRunFunc(ctx, "Bulk checkin", bc.Run)) ct := NewCheckinT(f.cfg, f.cache, bc, pm, am, ad, tr, bulker) et, err := NewEnrollerT(&f.cfg.Inputs[0].Server, bulker, f.cache) diff --git a/go.mod b/go.mod index 13460aef5..a7cb815b1 100644 --- a/go.mod +++ b/go.mod @@ -14,12 +14,10 @@ require ( github.com/hashicorp/go-cleanhttp v0.5.1 github.com/hashicorp/golang-lru v0.5.2-0.20190520140433-59383c442f7d github.com/julienschmidt/httprouter v1.3.0 - github.com/mitchellh/mapstructure v1.3.3 github.com/rs/xid v1.2.1 github.com/rs/zerolog v1.19.0 github.com/spf13/cobra v0.0.5 github.com/stretchr/testify v1.6.1 - golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e ) diff --git a/go.sum b/go.sum index 15f04d599..19430dbad 100644 --- a/go.sum +++ b/go.sum @@ -360,8 +360,6 @@ github.com/mitchellh/gox v1.0.1/go.mod h1:ED6BioOGXMswlXa2zxfh/xdd5QhwYliBFn9V18 github.com/mitchellh/hashstructure v0.0.0-20170116052023-ab25296c0f51/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.3.3 h1:SzB1nHZ2Xi+17FP0zVQBHIZqvwRN9408fJO8h+eeNA8= -github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= diff --git a/internal/pkg/migrate/migrate.go b/internal/pkg/migrate/migrate.go deleted file mode 100644 index f18a4bde0..000000000 --- a/internal/pkg/migrate/migrate.go +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package migrate - -import ( - "context" - "encoding/json" - "errors" - - "github.com/elastic/fleet-server/v7/internal/pkg/bulk" - "github.com/elastic/fleet-server/v7/internal/pkg/dl" - "github.com/elastic/fleet-server/v7/internal/pkg/es" - "github.com/elastic/fleet-server/v7/internal/pkg/model" - "github.com/elastic/fleet-server/v7/internal/pkg/saved" - "github.com/rs/zerolog" -) - -type enrollmentApiKey struct { - Name string `json:"name"` - Type string `json:"type"` - ApiKey string `json:"api_key" saved:"encrypt"` - ApiKeyId string `json:"api_key_id"` - PolicyId string `json:"policy_id"` - CreatedAt string `json:"created_at"` - UpdatedAt string `json:"updated_at"` - ExpireAt string `json:"expire_at"` - Active bool `json:"active"` -} - -// Data migration -// This is for development only (1 instance of fleet) -// Not safe for multiple instances of fleet -// Initially needed to migrate the enrollment-api-keys that kibana creates -func Migrate(ctx context.Context, log zerolog.Logger, sv saved.CRUD, bulker bulk.Bulk) error { - return MigrateEnrollmentAPIKeys(ctx, log, sv, bulker) -} - -func MigrateEnrollmentAPIKeys(ctx context.Context, log zerolog.Logger, sv saved.CRUD, bulker bulk.Bulk) error { - - // Query all enrollment keys from the new schema - raw, err := dl.RenderAllEnrollmentAPIKeysQuery(1000) - if err != nil { - return err - } - - var recs []model.EnrollmentApiKey - var resHits []es.HitT - res, err := bulker.Search(ctx, []string{dl.FleetEnrollmentAPIKeys}, raw, bulk.WithRefresh()) - if err != nil { - if errors.Is(err, es.ErrIndexNotFound) { - log.Debug().Str("index", dl.FleetEnrollmentAPIKeys).Msg(es.ErrIndexNotFound.Error()) - // Continue with migration if the .fleet-enrollment-api-keys index is not found - err = nil - } else { - return err - } - } else { - resHits = res.Hits - } - - for _, hit := range resHits { - var rec model.EnrollmentApiKey - err := json.Unmarshal(hit.Source, &rec) - if err != nil { - return err - } - recs = append(recs, rec) - } - - // Query enrollment keys from kibana saved objects - query := saved.NewQuery("fleet-enrollment-api-keys") - - hits, err := sv.FindByNode(ctx, query) - if err != nil { - return err - } - - for _, hit := range hits { - var rec enrollmentApiKey - if err := sv.Decode(hit, &rec); err != nil { - return err - } - if _, ok := findExistingEnrollmentAPIKey(recs, rec); !ok { - newRec := translateEnrollmentAPIKey(rec) - b, err := json.Marshal(newRec) - if err != nil { - return err - } - _, err = bulker.Create(ctx, dl.FleetEnrollmentAPIKeys, "", b, bulk.WithRefresh()) - if err != nil { - return err - } - } - } - - return nil -} - -func findExistingEnrollmentAPIKey(hay []model.EnrollmentApiKey, needle enrollmentApiKey) (*model.EnrollmentApiKey, bool) { - for _, rec := range hay { - if rec.ApiKeyId == needle.ApiKeyId { - return &rec, true - } - } - return nil, false -} - -func translateEnrollmentAPIKey(src enrollmentApiKey) model.EnrollmentApiKey { - return model.EnrollmentApiKey{ - Active: src.Active, - ApiKey: src.ApiKey, - ApiKeyId: src.ApiKeyId, - CreatedAt: src.CreatedAt, - ExpireAt: src.ExpireAt, - Name: src.Name, - PolicyId: src.PolicyId, - UpdatedAt: src.UpdatedAt, - } -} diff --git a/internal/pkg/saved/crud.go b/internal/pkg/saved/crud.go deleted file mode 100644 index a3fd2aabf..000000000 --- a/internal/pkg/saved/crud.go +++ /dev/null @@ -1,338 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package saved - -import ( - "context" - "encoding/json" - "time" - - "github.com/elastic/fleet-server/v7/internal/pkg/bulk" - "github.com/elastic/fleet-server/v7/internal/pkg/dsl" - - "github.com/elastic/go-elasticsearch/v8" - "github.com/rs/zerolog/log" -) - -const ( - kIndexKibana = ".kibana*" - kMigrationVersion = "7.9.0" // TODO: bring in during build -) - -type Hit struct { - Id string - Type string - Space string - References []string - UpdatedAt string - Data json.RawMessage -} - -type UpdateT struct { - Id string - Type string - Fields map[string]interface{} -} - -type CRUD interface { - Create(ctx context.Context, ty string, src interface{}, opts ...Option) (id string, err error) - Read(ctx context.Context, ty, id string, dst interface{}, opts ...Option) error - - // AAD or Encrypted fields not supported; you will break your saved object; don't do that. - Update(ctx context.Context, ty, id string, fields map[string]interface{}, opts ...Option) error - MUpdate(ctx context.Context, updates []UpdateT, opts ...Option) error - - FindByField(ctx context.Context, ty string, fields map[string]interface{}) ([]Hit, error) - FindByNode(ctx context.Context, node *dsl.Node) ([]Hit, error) - FindRaw(ctx context.Context, json []byte) ([]Hit, error) - Decode(hit Hit, dst interface{}) error - - Client() *elasticsearch.Client -} - -type mgr struct { - idx bulk.Bulk - key string -} - -func NewMgr(idx bulk.Bulk, key string) CRUD { - return &mgr{idx, key} -} - -func (m *mgr) Client() *elasticsearch.Client { - return m.idx.Client() -} - -func (m *mgr) Create(ctx context.Context, ty string, src interface{}, options ...Option) (id string, err error) { - opts, err := processOpts(options...) - - if err != nil { - return - } - - if err = validateType(ty); err != nil { - return - } - - if id, err = genID(opts); err != nil { - return - } - - var data []byte - if data, err = m.encode(ty, id, opts.Space, src); err != nil { - return - } - - docID := fmtID(ty, id, opts.Space) - - nowStr := time.Now().UTC().Format(time.RFC3339) - - // TODO: hardcoded migration version - var objMap = map[string]interface{}{ - ty: json.RawMessage(data), - "type": ty, - "updated_at": nowStr, - "migrationVersion": map[string]string{ - "config": kMigrationVersion, - }, - "references": opts.References, - } - - if opts.Space != "" { - objMap["namespace"] = opts.Space - } - - var source []byte - if source, err = json.Marshal(objMap); err != nil { - return - } - - bulkOpts := m.makeBulkOpts(opts) - - if opts.Overwrite { - id, err = m.idx.Index(ctx, kIndexKibana, docID, source, bulkOpts...) - } else { - id, err = m.idx.Create(ctx, kIndexKibana, docID, source, bulkOpts...) - } - - log.Trace().Err(err).RawJSON("source", source).Msg("On create") - - return -} - -func (m *mgr) makeBulkOpts(opts optionsT) []bulk.Opt { - var bulkOpts []bulk.Opt - if opts.Refresh { - bulkOpts = append(bulkOpts, bulk.WithRefresh()) - } - return bulkOpts -} - -func (m *mgr) Read(ctx context.Context, ty, id string, dst interface{}, options ...Option) error { - opts, err := processOpts(options...) - if err != nil { - return err - } - - if err := validateType(ty); err != nil { - return err - } - - if err := validateId(id); err != nil { - return err - } - - docId := fmtID(ty, id, opts.Space) - - payload, err := m.idx.Read(ctx, kIndexKibana, docId, bulk.WithRefresh()) - if err != nil { - return err - } - - var tmap map[string]json.RawMessage - if err = json.Unmarshal(payload, &tmap); err != nil { - return err - } - - obj, ok := tmap[ty] - if !ok { - return ErrMalformedSavedObj - } - - return m.decode(ty, id, opts.Space, obj, dst) -} - -// Warning: If you pass encrypted or AAD fields, you broke something. Don't do that. -func (m *mgr) Update(ctx context.Context, ty, id string, fields map[string]interface{}, options ...Option) error { - opts, err := processOpts(options...) - if err != nil { - return err - } - - if err := validateType(ty); err != nil { - return err - } - - if err := validateId(id); err != nil { - return err - } - - docId := fmtID(ty, id, opts.Space) - - timeNow := time.Now().UTC().Format(time.RFC3339) - - source, err := json.Marshal(map[string]interface{}{ - "doc": map[string]interface{}{ - ty: fields, - "updated_at": timeNow, - }, - }) - - if err != nil { - return err - } - - bulkOpts := m.makeBulkOpts(opts) - - return m.idx.Update(ctx, kIndexKibana, docId, source, bulkOpts...) -} - -// Warning: If you pass encrypted or AAD fields, you broke something. Don't do that. -func (m *mgr) MUpdate(ctx context.Context, updates []UpdateT, options ...Option) error { - opts, err := processOpts(options...) - if err != nil { - return err - } - - timeNow := time.Now().UTC().Format(time.RFC3339) - - ops := make([]bulk.BulkOp, 0, len(updates)) - - for _, u := range updates { - - if err := validateType(u.Type); err != nil { - return err - } - - if err := validateId(u.Id); err != nil { - return err - } - - docId := fmtID(u.Type, u.Id, opts.Space) - - source, err := json.Marshal(map[string]interface{}{ - "doc": map[string]interface{}{ - u.Type: u.Fields, - "updated_at": timeNow, - }, - }) - - if err != nil { - return err - } - - ops = append(ops, bulk.BulkOp{ - Id: docId, - Body: source, - Index: kIndexKibana, - }) - } - - bulkOpts := m.makeBulkOpts(opts) - - return m.idx.MUpdate(ctx, ops, bulkOpts...) -} - -// Simple term query; does NOT support find on encrypted field. -func (m *mgr) FindByField(ctx context.Context, ty string, fields map[string]interface{}) ([]Hit, error) { - - query := NewQuery(ty) - mustNode := query.Query().Bool().Must() - for f, v := range fields { - mustNode.Term(ScopeField(ty, f), v, nil) - } - - return m.FindByNode(ctx, query) -} - -func (m *mgr) FindByNode(ctx context.Context, node *dsl.Node) ([]Hit, error) { - body, err := json.Marshal(node) - if err != nil { - return nil, err - } - - return m.FindRaw(ctx, body) -} - -func (m *mgr) FindRaw(ctx context.Context, body []byte) ([]Hit, error) { - - searcResult, err := m.idx.Search(ctx, []string{kIndexKibana}, body) - - if err != nil { - return nil, err - } - - var hits []Hit - - for _, h := range searcResult.Hits { - - o, err := parseId(h.Id) - if err != nil { - return nil, err - } - - // Decode the source, better way to do this? - var src map[string]json.RawMessage - if err := json.Unmarshal(h.Source, &src); err != nil { - return nil, err - } - - var t string - if err := json.Unmarshal(src["type"], &t); err != nil { - return nil, err - } - - var space string - if v, ok := src["namespace"]; ok { - if err := json.Unmarshal(v, &space); err != nil { - return nil, err - } - } - - if t != o.ty { - return nil, ErrTypeMismatch - } - - if space != o.ns { - return nil, ErrSpaceMismatch - } - - var refs []string - if err := json.Unmarshal(src["references"], &refs); err != nil { - return nil, err - } - - var updatedAt string - if err := json.Unmarshal(src["updated_at"], &updatedAt); err != nil { - return nil, err - } - - hits = append(hits, Hit{ - Id: o.id, - Type: t, - Space: space, - References: refs, - UpdatedAt: updatedAt, - Data: src[t], - }) - - } - - return hits, err -} - -func (m *mgr) Decode(hit Hit, dst interface{}) error { - return m.decode(hit.Type, hit.Id, hit.Space, hit.Data, dst) -} diff --git a/internal/pkg/saved/crypto.go b/internal/pkg/saved/crypto.go deleted file mode 100644 index 948db9ead..000000000 --- a/internal/pkg/saved/crypto.go +++ /dev/null @@ -1,180 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package saved - -import ( - "bytes" - "crypto/aes" - "crypto/cipher" - "crypto/sha512" - "encoding/base64" - "encoding/json" - "golang.org/x/crypto/pbkdf2" -) - -const ( - tagLen = 16 - keyLengthInBytes = 32 - keyIterations = 10000 -) - -func encryptFields(key, aad []byte, fields Fields) error { - - for k, v := range fields { - ciphertext, err := encrypt(key, aad, v) - - if err != nil { - return err - } - fields[k] = ciphertext - } - - return nil -} - -func decryptFields(key, aad []byte, fields Fields) error { - - for k, v := range fields { - ciphertext, ok := v.(string) - if !ok { - return ErrBadCipherText - } - - v, err := decrypt(key, aad, ciphertext) - - if err != nil { - return err - } - fields[k] = v - } - - return nil -} - -// see: https://github.com/elastic/node-crypto/blob/master/src/crypto.ts#L119 -func encrypt(key, aad []byte, v interface{}) (string, error) { - - plaintext, err := json.Marshal(v) - if err != nil { - return "", err - } - - // Generate random data for iv and salt - nonce, err := newNonce() - if err != nil { - return "", err - } - - dk := deriveKey(key, nonce.salt()) - - block, err := aes.NewCipher(dk) - if err != nil { - return "", err - } - - aesgcm, err := cipher.NewGCMWithTagSize(block, tagLen) - if err != nil { - return "", err - } - - ciphertext := aesgcm.Seal(nil, nonce.iv(), plaintext, aad) - - // Expects binary buffer [salt, iv, tag, encrypted] - // goland slaps the tag on the back of the slice, so we have to reorg a bit - tagOffset := len(ciphertext) - tagLen - - buf := bytes.Buffer{} - buf.Grow(ivLen + saltLen + len(ciphertext)) - // Write salt:iv - buf.Write(nonce.both()) - // Write tag - buf.Write(ciphertext[tagOffset:]) - // Write cipher text - buf.Write(ciphertext[:tagOffset]) - - payload := base64.StdEncoding.EncodeToString(buf.Bytes()) - return payload, nil -} - -func decrypt(key, aad []byte, cipherText string) (interface{}, error) { - - ciphertext, err := base64.StdEncoding.DecodeString(cipherText) - if err != nil { - return nil, err - } - - // expects header [salt, iv, tag, encrypted] - if len(ciphertext) <= saltLen+ivLen+tagLen { - return nil, ErrBadCipherText - } - - tagOffset := saltLen + ivLen - dataOffset := tagOffset + tagLen - - salt := ciphertext[:saltLen] - iv := ciphertext[saltLen:tagOffset] - tag := ciphertext[tagOffset:dataOffset] - data := ciphertext[dataOffset:] - - dk := deriveKey(key, salt) - - block, err := aes.NewCipher(dk) - if err != nil { - return nil, err - } - - aesgcm, err := cipher.NewGCMWithTagSize(block, tagLen) - if err != nil { - return nil, err - } - - // aesgcm expects the tag to be after the ciphertext - buf := bytes.Buffer{} - buf.Grow(len(data) + len(tag)) - buf.Write(data) - buf.Write(tag) - - plaintext, err := aesgcm.Open(nil, iv, buf.Bytes(), aad) - if err != nil { - return nil, err - } - - // plaintext is raw JSON, decode - var v interface{} - err = json.Unmarshal(plaintext, &v) - return v, err -} - -func deriveKey(key, salt []byte) []byte { - - return pbkdf2.Key( - []byte(key), - salt, - keyIterations, - keyLengthInBytes, - sha512.New, - ) -} - -// Emulate Additional Authenticated Data (AAD) generation in Kibana -// Effectively stable_stringify([ {namespace}, type, id, attributesAAD]); -// -func deriveAAD(ty, id, space string, attrs map[string]interface{}) ([]byte, error) { - /* - if len(attrs) == 0 { - log.Debug().Str("type", ty).Str("id", id).Str("space", space).Msg("No AAD; that seems wrong.") - } - */ - - v := []interface{}{space, ty, id, attrs} - - if space == "" { - v = v[1:] - } - - // This MUST be stable; and 1x1 with what javascript stringify is doing. - // Milage may vary; we may have to implement this manually depending on types and formatting. - return json.Marshal(v) -} diff --git a/internal/pkg/saved/encode.go b/internal/pkg/saved/encode.go deleted file mode 100644 index 995a31f8c..000000000 --- a/internal/pkg/saved/encode.go +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package saved - -import ( - "bytes" - "encoding/json" - "fmt" - "reflect" - "strings" - "unicode" -) - -const ( - TagSaved = "saved" - TagAad = "aad" - TagEncrypt = "encrypt" - TagJSON = "json" -) - -type tagOptions string - -// From golang JSON code -func parseTag(tag string) (string, tagOptions) { - if idx := strings.Index(tag, ","); idx != -1 { - return tag[:idx], tagOptions(tag[idx+1:]) - } - return tag, tagOptions("") -} - -// From golang JSON code -func (o tagOptions) Contains(optionName string) bool { - if len(o) == 0 { - return false - } - s := string(o) - for s != "" { - var next string - i := strings.Index(s, ",") - if i >= 0 { - s, next = s[:i], s[i+1:] - } - if s == optionName { - return true - } - s = next - } - return false -} - -// From golang JSON code -func isValidTag(s string) bool { - if s == "" { - return false - } - for _, c := range s { - switch { - case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c): - // Backslash and quote chars are reserved, but - // otherwise any punctuation chars are allowed - // in a tag name. - case !unicode.IsLetter(c) && !unicode.IsDigit(c): - return false - } - } - return true -} - -// From golang JSON code -func isEmptyValue(v reflect.Value) bool { - switch v.Kind() { - case reflect.Array, reflect.Map, reflect.Slice, reflect.String: - return v.Len() == 0 - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return v.Float() == 0 - case reflect.Interface, reflect.Ptr: - return v.IsNil() - } - return false -} - -func deriveFieldKey(field reflect.StructField) (string, tagOptions) { - - // Use json tag if available, otherwise lowercase name - tag := field.Tag.Get(TagJSON) - key, opts := parseTag(tag) - - if !isValidTag(key) { - key = strings.ToLower(field.Name) - } - - var out bytes.Buffer - json.HTMLEscape(&out, []byte(key)) - - return out.String(), opts -} - -func gatherAAD(src interface{}) (Fields, Fields) { - t := reflect.TypeOf(src) - v := reflect.ValueOf(src) - - if t.Kind() == reflect.Ptr { - v = v.Elem() - t = reflect.TypeOf(v.Interface()) - } - - aad := make(Fields) - encrypt := make(Fields) - - for i := 0; i < t.NumField(); i++ { - field := t.Field(i) - - // Get the field tag value - tag := field.Tag.Get(TagSaved) - - switch tag { - case TagAad: - key, _ := deriveFieldKey(field) - aad[key] = v.Field(i).Interface() - case TagEncrypt: - key, _ := deriveFieldKey(field) - encrypt[key] = v.Field(i).Interface() - case "", "-": - default: - panic(fmt.Sprintf("Unknown tag %s:\"%s\"", TagSaved, tag)) - } - } - - return aad, encrypt -} - -func isEncrypted(src interface{}) bool { - t := reflect.TypeOf(src) - - if t.Kind() == reflect.Ptr { - v := reflect.ValueOf(src).Elem().Interface() - t = reflect.TypeOf(v) - } - - for i := 0; i < t.NumField(); i++ { - field := t.Field(i) - - // Get the field tag value - tag := field.Tag.Get(TagSaved) - - switch tag { - case TagEncrypt: - return true - case TagAad, "", "-": - default: - panic(fmt.Sprintf("Unknown tag %s:\"%s\"", TagSaved, tag)) - } - } - - return false -} - -func (m *mgr) encode(ty, id, space string, src interface{}) ([]byte, error) { - if !isEncrypted(src) { - return json.Marshal(src) - } - - // scan for aad - aadSet, encryptSet := gatherAAD(src) - - aad, err := deriveAAD(ty, id, space, aadSet) - if err != nil { - return nil, err - } - - if err := encryptFields([]byte(m.key), aad, encryptSet); err != nil { - return nil, err - } - - fields := NewFields(src) - - for k, v := range encryptSet { - fields[k] = v - } - - return json.Marshal(fields) -} - -func (m *mgr) decode(ty, id, space string, data []byte, dst interface{}) error { - - if err := json.Unmarshal(data, dst); err != nil { - return err - } - - if !isEncrypted(dst) { - return nil - } - - fields := NewFields(dst) - - // scan for aad, this will return empty values, but we need the keys - aadSet, encryptSet := gatherAAD(dst) - - // Fix up aadSet with actual values retrieved from JSON - for k, _ := range aadSet { - aadSet[k] = fields[k] - } - - aad, err := deriveAAD(ty, id, space, aadSet) - if err != nil { - return err - } - - // Fix up encryptSet with actual values retrieved from JSON - for k, _ := range encryptSet { - encryptSet[k] = fields[k] - } - - if err := decryptFields([]byte(m.key), aad, encryptSet); err != nil { - return err - } - - // Overlay encrypted values on fields - for k, v := range encryptSet { - fields[k] = v - } - - return fields.MapInterface(dst) -} diff --git a/internal/pkg/saved/errors.go b/internal/pkg/saved/errors.go deleted file mode 100644 index 2344abd4a..000000000 --- a/internal/pkg/saved/errors.go +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package saved - -import ( - "errors" -) - -var ( - ErrNoType = errors.New("no type") - ErrRead = errors.New("read error") - ErrNoId = errors.New("no id") - ErrAttributeUnknown = errors.New("unknown attribute") - ErrAttributeType = errors.New("wrong attribute type") - ErrBadCipherText = errors.New("bad cipher text") - ErrNotEncrypted = errors.New("attribute not encrypted") - ErrMalformedSavedObj = errors.New("malformed saved object") - ErrMalformedIdentifier = errors.New("malformed saved object identifier") - ErrTypeMismatch = errors.New("type mismatch") - ErrSpaceMismatch = errors.New("namespace mismatch") -) diff --git a/internal/pkg/saved/fields.go b/internal/pkg/saved/fields.go deleted file mode 100644 index 2177aca3c..000000000 --- a/internal/pkg/saved/fields.go +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package saved - -import ( - "github.com/mitchellh/mapstructure" - "reflect" -) - -type Fields map[string]interface{} - -func NewFields(src interface{}) Fields { - t := reflect.TypeOf(src) - v := reflect.ValueOf(src) - - if t.Kind() == reflect.Ptr { - v = v.Elem() - t = reflect.TypeOf(v.Interface()) - } - - nFields := v.NumField() - - m := make(Fields, nFields) - - for i := 0; i < nFields; i++ { - key, opts := deriveFieldKey(t.Field(i)) - - if key == "-" || (opts.Contains("omitempty") && isEmptyValue(v.Field(i))) { - continue - } - - m[key] = v.Field(i).Interface() - } - - return m -} - -func (f Fields) MapInterface(dst interface{}) error { - - config := &mapstructure.DecoderConfig{ - TagName: TagJSON, - Result: dst, - } - - decoder, err := mapstructure.NewDecoder(config) - if err != nil { - return err - } - - return decoder.Decode(f) -} diff --git a/internal/pkg/saved/id.go b/internal/pkg/saved/id.go deleted file mode 100644 index 5cb78eaa2..000000000 --- a/internal/pkg/saved/id.go +++ /dev/null @@ -1,63 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package saved - -import ( - "fmt" - "strings" - - "github.com/gofrs/uuid" -) - -func genID(opts optionsT) (string, error) { - var id string - - if opts.Id != "" { - id = opts.Id - } else if u, err := uuid.NewV4(); err != nil { - return "", err - } else { - id = u.String() - } - - return id, nil -} - -func fmtID(ty, id, space string) string { - - if space != "" { - return fmt.Sprintf("%s:%s:%s", space, ty, id) - } - - return fmt.Sprintf("%s:%s", ty, id) -} - -type objectId struct { - id string - ns string - ty string -} - -// Deconstruct the ID. Expect namespace:type:id -func parseId(id string) (o objectId, err error) { - - tuple := strings.Split(id, ":") - - switch len(tuple) { - case 1: - o.id = tuple[0] - case 2: - o.ty = tuple[0] - o.id = tuple[1] - case 3: - o.ns = tuple[0] - o.ty = tuple[1] - o.id = tuple[2] - default: - err = ErrMalformedIdentifier - } - - return -} diff --git a/internal/pkg/saved/nonce.go b/internal/pkg/saved/nonce.go deleted file mode 100644 index b22124a3f..000000000 --- a/internal/pkg/saved/nonce.go +++ /dev/null @@ -1,39 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package saved - -import ( - "crypto/rand" -) - -const ( - ivLen = 12 - saltLen = 64 -) - -type nonceT struct { - buf []byte -} - -func newNonce() (nonceT, error) { - n := nonceT{ - buf: make([]byte, saltLen+ivLen), - } - - _, err := rand.Read(n.buf) - return n, err -} - -func (n nonceT) iv() []byte { - return n.buf[saltLen:] -} - -func (n nonceT) salt() []byte { - return n.buf[:saltLen] -} - -func (n nonceT) both() []byte { - return n.buf -} diff --git a/internal/pkg/saved/opts.go b/internal/pkg/saved/opts.go deleted file mode 100644 index 542c25662..000000000 --- a/internal/pkg/saved/opts.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package saved - -type optionsT struct { - Id string - Space string - Overwrite bool - Flush bool - Refresh bool - References []string -} - -func (c optionsT) Validate() error { - // TODO: validate Space - // TODO: validate Id - // TODO: validate References - return nil -} - -type Option func(*optionsT) - -func WithId(id string) Option { - return func(opt *optionsT) { - opt.Id = id - } -} - -func WithSpace(space string) Option { - return func(opt *optionsT) { - opt.Space = space - } -} - -func WithOverwrite() Option { - return func(opt *optionsT) { - opt.Overwrite = true - } -} - -func WithFlush() Option { - return func(opt *optionsT) { - opt.Flush = true - } -} - -func WithRefresh() Option { - return func(opt *optionsT) { - opt.Refresh = true - } -} - -func WithRefs(refs []string) Option { - return func(opt *optionsT) { - opt.References = refs - } -} - -func processOpts(options ...Option) (opts optionsT, err error) { - for _, optF := range options { - optF(&opts) - } - - err = opts.Validate() - return -} - -func validateType(ty string) error { - // TODO: check for invalidate runes - if ty == "" { - return ErrNoType - } - return nil -} - -func validateId(id string) error { - // TODO: check for invalidate runes - if id == "" { - return ErrNoId - } - return nil -} diff --git a/internal/pkg/saved/query.go b/internal/pkg/saved/query.go deleted file mode 100644 index 2b39a1094..000000000 --- a/internal/pkg/saved/query.go +++ /dev/null @@ -1,237 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package saved - -import ( - "fmt" - - "github.com/elastic/fleet-server/v7/internal/pkg/dsl" -) - -func NewQuery(ty string) *dsl.Node { - - root := dsl.NewRoot() - - // Require the type - root.Query().Bool().Must().Term("type", ty, nil) - - return root -} - -func ScopeField(ty, field string) string { - return fmt.Sprintf("%s.%s", ty, field) -} - -type ScopeFuncT func(field string) string - -func ScopeFunc(ty string) ScopeFuncT { - prefix := fmt.Sprintf("%s.", ty) - return func(field string) string { - return prefix + field - } -} - -/* - -1) saved.SearchNode(ctx, dsl.Node) -2) saved.SearchRaw(ctx, []byte) -3) fix policy to support N looksup in parallel -4) multisearch? how return hits? -5) strip out comments... -6) templatize call to get agent id at beginning of program - - - - q.Field(scopedField, value, boost) - -type treeMap map[string]*QueryN -type QueryN struct { - leaf interface{} - tree treeMap - array []*QueryN -} - - -func (q *QueryN) MarshalJSON() ([]byte, error) { - - switch { - case q.leaf != nil: - return json.Marshal(q.leaf) - case q.tree != nil: - return json.Marshal(q.tree) - case q.array != nil: - return json.Marshal(q.array) - } - - return []byte("null"), nil -} - -func (q *QueryN) Query() *QueryN { - if node, ok := q.tree["query"]; ok { - return node - } - - if q.tree == nil { - q.tree = make(map[string]*QueryN) - } - - node := &QueryN{} - q.tree["query"] = node - return node -} - -func (q *QueryN) Bool() *QueryN { - if node, ok := q.tree["bool"]; ok { - return node - } - - if q.tree == nil { - q.tree = make(map[string]*QueryN) - } - - node := &QueryN{} - q.tree["bool"] = node - return node -} - -func (q *QueryN) Must() *QueryN { - if node, ok := q.tree["must"]; ok { - return node - } - - if q.tree == nil { - q.tree = make(map[string]*QueryN) - } - - node := &QueryN{ - array: make([]*QueryN, 0), - } - q.tree["must"] = node - return node -} - -func (q *QueryN) Term() *QueryN { - return q.makeChildNode("term") -} - -func (q *QueryN) makeChildNode(key string) *QueryN { - node := &QueryN{} - if q.array != nil { - tNode := QueryN{ - tree: map[string]*QueryN{key:node}, - } - q.array = append(q.array, &tNode) - - } else { - if q.tree == nil { - q.tree = make(map[string]*QueryN) - } - q.tree[key] = node - } - - return node -} - -func (q *QueryN) Field(field string, value interface{}, boost *float64) { - if q.tree == nil { - q.tree = make(map[string]*QueryN) - } - - var leaf interface{} - - switch boost { - case nil: - leaf = value - default: - leaf = &struct { - Value interface{} `json:"value"` - Boost *float64 `json:"boost,omitempty"` - } { - value, - boost, - } - } - - node := &QueryN{ - leaf: leaf, - } - - q.tree[field] = node -} - -func (q *QueryN) SavedField(ty, field string, value interface{}, boost *float64) { - scopedField := fmt.Sprintf("%s.%s", ty, field) - q.Field(scopedField, value, boost) -} - -type RangeOpt func(treeMap) - -func WithRangeGT(v interface{}) RangeOpt { - return func(tmap treeMap) { - tmap["gt"] = &QueryN{leaf:v} - } -} - -func (q *QueryN) Range(field string, opts ...RangeOpt) { - - fieldNode := &QueryN{ - tree: make(treeMap), - } - - for _, o := range opts { - o(fieldNode.tree) - } - - node := q.makeChildNode("range") - node.tree = map[string]*QueryN{ - field: fieldNode, - } -} - -func (q *QueryN) Size(sz uint64) { - if q.tree == nil { - q.tree = make(treeMap) - } - q.tree["size"] = &QueryN { - leaf: sz, - } -} - -func (q *QueryN) Sort() *QueryN { - n := q.makeChildNode("sort") - n.array = make([]*QueryN, 0) - return n -} - -type SortOrderT string - -const ( - SortAscend SortOrderT = "asc" - SortDescend = "desc" -) - -func (q *QueryN) SortOrder(field string, order SortOrderT) { - if q.array == nil { - panic("Parent should be sort node") - } - - defaultOrder := SortAscend - if field == "_score" { - defaultOrder = SortDescend - } - - if order == defaultOrder { - q.array = append(q.array, &QueryN{leaf:field}) - } else { - n := q.makeChildNode(field) - n.leaf = order - } -} - - -func (q *QueryN) SortOpt(field string, order SortOrder, opts ...SortOpt) { - // TODO -} -*/ From dc11ce057c1f3d6ad63b56f21e0ee4507bd16bb1 Mon Sep 17 00:00:00 2001 From: Aleksandr Maus Date: Wed, 3 Mar 2021 16:05:53 -0500 Subject: [PATCH 020/240] Fix global checkpoint based monitoring (#116) (#117) The index monitoring got broken dues to replacing the original .fleet-actions and .fleet-policies indices with aliases. This change uses the first index global checkpoint value received from stats and returns the error if there are more than two indices for alias for any reason. --- internal/pkg/monitor/global_checkpoint.go | 21 ++++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/internal/pkg/monitor/global_checkpoint.go b/internal/pkg/monitor/global_checkpoint.go index 31eb73ea9..89f0e8025 100644 --- a/internal/pkg/monitor/global_checkpoint.go +++ b/internal/pkg/monitor/global_checkpoint.go @@ -7,11 +7,16 @@ package monitor import ( "context" "encoding/json" + "errors" + "fmt" + "github.com/elastic/fleet-server/v7/internal/pkg/es" "github.com/elastic/go-elasticsearch/v8" ) +var ErrGlobalCheckpoint = errors.New("global checkpoint error") + type shard struct { SeqNo struct { GlobalCheckpoint int64 `json:"global_checkpoint"` @@ -49,7 +54,21 @@ func queryGlobalCheckpoint(ctx context.Context, es *elasticsearch.Client, index return } - if stats, ok := sres.IndexStats[index]; ok { + if len(sres.IndexStats) > 1 { + indices := make([]string, 0, len(sres.IndexStats)) + for k := range sres.IndexStats { + indices = append(indices, k) + } + return seqno, fmt.Errorf("more than one indices found %v, %w", indices, ErrGlobalCheckpoint) + } + + if len(sres.IndexStats) > 0 { + // Grab the first and only index stats + var stats indexStats + for _, stats = range sres.IndexStats { + break + } + if shards, ok := stats.Shards["0"]; ok { if len(shards) > 0 { seqno = shards[0].SeqNo.GlobalCheckpoint From 03229846fe30e3a106748b33b69ea0db26657b19 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Wed, 3 Mar 2021 17:17:40 -0500 Subject: [PATCH 021/240] Set agent active to false on unenroll ack. (#118) (#120) (cherry picked from commit 006981514dd10045f02f006e024eecc8b9c6a4bd) --- cmd/fleet/handleAck.go | 1 + internal/pkg/dl/constants.go | 1 + 2 files changed, 2 insertions(+) diff --git a/cmd/fleet/handleAck.go b/cmd/fleet/handleAck.go index bb85584be..bdf1d7ade 100644 --- a/cmd/fleet/handleAck.go +++ b/cmd/fleet/handleAck.go @@ -194,6 +194,7 @@ func _handleUnenroll(ctx context.Context, bulker bulk.Bulk, agent *model.Agent) updates := make([]bulk.BulkOp, 0, 1) now := time.Now().UTC().Format(time.RFC3339) fields := map[string]interface{}{ + dl.FieldActive: false, dl.FieldUnenrolledAt: now, dl.FieldUpdatedAt: now, } diff --git a/internal/pkg/dl/constants.go b/internal/pkg/dl/constants.go index aa5cab75e..b585af688 100644 --- a/internal/pkg/dl/constants.go +++ b/internal/pkg/dl/constants.go @@ -31,6 +31,7 @@ const ( FieldPolicyRevisionIdx = "policy_revision_idx" FieldPolicyCoordinatorIdx = "policy_coordinator_idx" + FieldActive = "active" FieldUpdatedAt = "updated_at" FieldUnenrolledAt = "unenrolled_at" ) From f2841a599857eaedfa1163ea2c6dcc2f814cab9b Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Fri, 5 Mar 2021 08:52:51 -0500 Subject: [PATCH 022/240] Add API key invalidation on unenroll ACK. (#124) (#125) * Add API key invalidation on unenroll ACK. * Fix import location. * Fix test. (cherry picked from commit dc762b5a39500afb68d969d43e6de83656ddbff7) --- cmd/fleet/handleAck.go | 19 +++++++++++ cmd/fleet/server_integration_test.go | 2 +- internal/pkg/apikey/auth.go | 2 +- internal/pkg/apikey/create.go | 2 +- internal/pkg/apikey/invalidate.go | 50 ++++++++++++++++++++++++++++ 5 files changed, 72 insertions(+), 3 deletions(-) create mode 100644 internal/pkg/apikey/invalidate.go diff --git a/cmd/fleet/handleAck.go b/cmd/fleet/handleAck.go index bdf1d7ade..8ce7e4b7e 100644 --- a/cmd/fleet/handleAck.go +++ b/cmd/fleet/handleAck.go @@ -13,6 +13,7 @@ import ( "strings" "time" + "github.com/elastic/fleet-server/v7/internal/pkg/apikey" "github.com/elastic/fleet-server/v7/internal/pkg/bulk" "github.com/elastic/fleet-server/v7/internal/pkg/cache" "github.com/elastic/fleet-server/v7/internal/pkg/dl" @@ -191,6 +192,13 @@ func _handlePolicyChange(ctx context.Context, bulker bulk.Bulk, agent *model.Age } func _handleUnenroll(ctx context.Context, bulker bulk.Bulk, agent *model.Agent) error { + apiKeys := _getAPIKeyIDs(agent) + if len(apiKeys) > 0 { + if err := apikey.Invalidate(ctx, bulker.Client(), apiKeys...); err != nil { + return err + } + } + updates := make([]bulk.BulkOp, 0, 1) now := time.Now().UTC().Format(time.RFC3339) fields := map[string]interface{}{ @@ -214,3 +222,14 @@ func _handleUnenroll(ctx context.Context, bulker bulk.Bulk, agent *model.Agent) return bulker.MUpdate(ctx, updates, bulk.WithRefresh()) } + +func _getAPIKeyIDs(agent *model.Agent) []string { + keys := make([]string, 0, 1) + if agent.AccessApiKeyId != "" { + keys = append(keys, agent.AccessApiKeyId) + } + if agent.DefaultApiKeyId != "" { + keys = append(keys, agent.DefaultApiKeyId) + } + return keys +} diff --git a/cmd/fleet/server_integration_test.go b/cmd/fleet/server_integration_test.go index 5f9d726c9..e36527910 100644 --- a/cmd/fleet/server_integration_test.go +++ b/cmd/fleet/server_integration_test.go @@ -181,7 +181,7 @@ func TestServerUnauthorized(t *testing.T) { // Unauthorized, expecting error from /_security/_authenticate t.Run("unauthorized", func(t *testing.T) { - const expectedErrResponsePrefix = `Fail Auth: [401 Unauthorized]` + const expectedErrResponsePrefix = `fail Auth: [401 Unauthorized]` for _, u := range agenturls { req, err := http.NewRequest("POST", u, bytes.NewBuffer([]byte("{}"))) require.NoError(t, err) diff --git a/internal/pkg/apikey/auth.go b/internal/pkg/apikey/auth.go index e097bf185..0a7675770 100644 --- a/internal/pkg/apikey/auth.go +++ b/internal/pkg/apikey/auth.go @@ -48,7 +48,7 @@ func (k ApiKey) Authenticate(ctx context.Context, es *elasticsearch.Client) (*Se } if res.IsError() { - return nil, fmt.Errorf("Fail Auth: %s", res.String()) + return nil, fmt.Errorf("fail Auth: %s", res.String()) } var info SecurityInfo diff --git a/internal/pkg/apikey/create.go b/internal/pkg/apikey/create.go index dc244871d..35d4b66b2 100644 --- a/internal/pkg/apikey/create.go +++ b/internal/pkg/apikey/create.go @@ -48,7 +48,7 @@ func Create(ctx context.Context, client *elasticsearch.Client, name, ttl string, defer res.Body.Close() if res.IsError() { - return nil, fmt.Errorf("Fail CreateAPIKey: %s", res.String()) + return nil, fmt.Errorf("fail CreateAPIKey: %s", res.String()) } type APIKeyResponse struct { diff --git a/internal/pkg/apikey/invalidate.go b/internal/pkg/apikey/invalidate.go new file mode 100644 index 000000000..938e2bd52 --- /dev/null +++ b/internal/pkg/apikey/invalidate.go @@ -0,0 +1,50 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package apikey + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v8" + "github.com/elastic/go-elasticsearch/v8/esapi" +) + +// Invalidate invalidates the provides API keys by ID. +func Invalidate(ctx context.Context, client *elasticsearch.Client, ids ...string) error { + + payload := struct { + IDs []string `json:"ids,omitempty"` + }{ + ids, + } + + body, err := json.Marshal(&payload) + if err != nil { + return err + } + + opts := []func(*esapi.SecurityInvalidateAPIKeyRequest){ + client.Security.InvalidateAPIKey.WithContext(ctx), + } + + res, err := client.Security.InvalidateAPIKey( + bytes.NewReader(body), + opts..., + ) + + if err != nil { + return err + } + + defer res.Body.Close() + + if res.IsError() { + return fmt.Errorf("fail InvalidateAPIKey: %s", res.String()) + } + return nil +} From 0fdb2b197dc255e09ba32ee9ed898bdad192ae21 Mon Sep 17 00:00:00 2001 From: Sean Cunningham Date: Thu, 4 Mar 2021 09:09:20 -0500 Subject: [PATCH 023/240] Basic metrics framework leveraging beats logic. (#113) --- NOTICE.txt | 667 +++++++++++++++-------------- cmd/fleet/main.go | 9 + cmd/fleet/metrics.go | 44 ++ cmd/fleet/server.go | 30 +- go.mod | 4 +- go.sum | 85 ++-- internal/pkg/config/config.go | 2 + internal/pkg/config/config_test.go | 16 + internal/pkg/config/http.go | 23 + internal/pkg/logger/zapStub.go | 74 ++++ 10 files changed, 603 insertions(+), 351 deletions(-) create mode 100644 cmd/fleet/metrics.go create mode 100644 internal/pkg/config/http.go create mode 100644 internal/pkg/logger/zapStub.go diff --git a/NOTICE.txt b/NOTICE.txt index b7f8e4f8a..2a8ddb48e 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -228,11 +228,11 @@ Contents of probable licence file $GOMODCACHE/github.com/dgraph-io/ristretto@v0. -------------------------------------------------------------------------------- Dependency : github.com/elastic/beats/v7 -Version: v7.10.0 +Version: v7.11.1 Licence type (autodetected): Elastic -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/beats/v7@v7.10.0/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/elastic/beats/v7@v7.11.1/LICENSE.txt: Source code in this repository is variously licensed under the Apache License Version 2.0, an Apache compatible license, or the Elastic License. Outside of @@ -1755,6 +1755,39 @@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------------------- +Dependency : github.com/pkg/errors +Version: v0.9.1 +Licence type (autodetected): BSD-2-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/pkg/errors@v0.9.1/LICENSE: + +Copyright (c) 2015, Dave Cheney +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + -------------------------------------------------------------------------------- Dependency : github.com/rs/xid Version: v1.2.1 @@ -2030,6 +2063,35 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------------------- +Dependency : go.uber.org/zap +Version: v1.14.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/go.uber.org/zap@v1.14.0/LICENSE.txt: + +Copyright (c) 2016-2017 Uber Technologies, Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + -------------------------------------------------------------------------------- Dependency : golang.org/x/sync Version: v0.0.0-20200625203802-6e8e738ad208 @@ -4339,11 +4401,11 @@ THE SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/Azure/go-autorest/autorest -Version: v0.9.4 +Version: v0.9.6 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/!azure/go-autorest/autorest@v0.9.4/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/!azure/go-autorest/autorest@v0.9.6/LICENSE: Apache License @@ -4540,11 +4602,11 @@ Contents of probable licence file $GOMODCACHE/github.com/!azure/go-autorest/auto -------------------------------------------------------------------------------- Dependency : github.com/Azure/go-autorest/autorest/adal -Version: v0.8.1 +Version: v0.8.2 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/!azure/go-autorest/autorest/adal@v0.8.1/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/!azure/go-autorest/autorest/adal@v0.8.2/LICENSE: Apache License @@ -13619,6 +13681,37 @@ Contents of probable licence file $GOMODCACHE/github.com/docker/spdystream@v0.0. limitations under the License. +-------------------------------------------------------------------------------- +Dependency : github.com/docopt/docopt-go +Version: v0.0.0-20180111231733-ee0de3bc6815 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/docopt/docopt-go@v0.0.0-20180111231733-ee0de3bc6815/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2013 Keith Batten +Copyright (c) 2016 David Irvine + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + -------------------------------------------------------------------------------- Dependency : github.com/andrewkroh/goja Version: v0.0.0-20190128172624-dd2ac4456e20 @@ -14316,11 +14409,11 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/go-concert@v0.0 -------------------------------------------------------------------------------- Dependency : github.com/elastic/go-libaudit/v2 -Version: v2.0.2 +Version: v2.1.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/go-libaudit/v2@v2.0.2/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-libaudit/v2@v2.1.0/LICENSE.txt: Apache License @@ -16069,11 +16162,11 @@ Contents of probable licence file $GOMODCACHE/github.com/elastic/go-windows@v1.0 -------------------------------------------------------------------------------- Dependency : github.com/elastic/gosigar -Version: v0.10.6-0.20200715000138-f115143bb233 +Version: v0.13.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/gosigar@v0.10.6-0.20200715000138-f115143bb233/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/gosigar@v0.13.0/LICENSE: Apache License Version 2.0, January 2004 @@ -16771,11 +16864,11 @@ Contents of probable licence file $GOMODCACHE/github.com/envoyproxy/protoc-gen-v -------------------------------------------------------------------------------- Dependency : github.com/evanphx/json-patch -Version: v4.2.0+incompatible +Version: v4.9.0+incompatible Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/evanphx/json-patch@v4.2.0+incompatible/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/evanphx/json-patch@v4.9.0+incompatible/LICENSE: Copyright (c) 2014, Evan Phoenix All rights reserved. @@ -16785,7 +16878,7 @@ modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. -* Redistributions in binary form must reproduce the above copyright notice +* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the Evan Phoenix nor the names of its contributors @@ -16941,14 +17034,14 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : github.com/fsnotify/fsnotify -Version: v1.4.7 +Version: v1.4.9 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/fsnotify/fsnotify@v1.4.7/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/fsnotify/fsnotify@v1.4.9/LICENSE: Copyright (c) 2012 The Go Authors. All rights reserved. -Copyright (c) 2012 fsnotify Authors. All rights reserved. +Copyright (c) 2012-2019 fsnotify Authors. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -17325,11 +17418,11 @@ SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/go-logr/logr -Version: v0.1.0 +Version: v0.2.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/go-logr/logr@v0.1.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/go-logr/logr@v0.2.0/LICENSE: Apache License Version 2.0, January 2004 @@ -22166,11 +22259,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : github.com/googleapis/gnostic -Version: v0.3.1-0.20190624222214-25d8b0b66985 +Version: v0.4.1 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/googleapis/gnostic@v0.3.1-0.20190624222214-25d8b0b66985/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/googleapis/gnostic@v0.4.1/LICENSE: Apache License @@ -22377,207 +22470,6 @@ Contents of probable licence file $GOMODCACHE/github.com/googleapis/gnostic@v0.3 --------------------------------------------------------------------------------- -Dependency : github.com/gophercloud/gophercloud -Version: v0.1.0 -Licence type (autodetected): Apache-2.0 --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/gophercloud/gophercloud@v0.1.0/LICENSE: - -Copyright 2012-2013 Rackspace, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); you may not use -this file except in compliance with the License. You may obtain a copy of the -License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software distributed -under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR -CONDITIONS OF ANY KIND, either express or implied. See the License for the -specific language governing permissions and limitations under the License. - ------- - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - -------------------------------------------------------------------------------- Dependency : github.com/gopherjs/gopherjs Version: v0.0.0-20181017120253-0766667cb4d1 @@ -22744,11 +22636,11 @@ SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : github.com/h2non/filetype -Version: v1.0.12 +Version: v1.1.1-0.20201130172452-f60988ab73d5 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/h2non/filetype@v1.0.12/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/h2non/filetype@v1.1.1-0.20201130172452-f60988ab73d5/LICENSE: The MIT License @@ -25493,13 +25385,224 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------------------- +Dependency : github.com/jonboulle/clockwork +Version: v0.2.2 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/jonboulle/clockwork@v0.2.2/LICENSE: + +Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + -------------------------------------------------------------------------------- Dependency : github.com/josephspurrier/goversioninfo -Version: v0.0.0-20200309025242-14b0ab84c6ca +Version: v0.0.0-20190209210621-63e6d1acd3dd Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/josephspurrier/goversioninfo@v0.0.0-20200309025242-14b0ab84c6ca/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/josephspurrier/goversioninfo@v0.0.0-20190209210621-63e6d1acd3dd/LICENSE: The MIT License (MIT) @@ -25557,11 +25660,11 @@ SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/json-iterator/go -Version: v1.1.8 +Version: v1.1.10 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/json-iterator/go@v1.1.8/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/json-iterator/go@v1.1.10/LICENSE: MIT License @@ -29195,39 +29298,6 @@ The above copyright notice and this permission notice shall be included in all c THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --------------------------------------------------------------------------------- -Dependency : github.com/pkg/errors -Version: v0.9.1 -Licence type (autodetected): BSD-2-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/pkg/errors@v0.9.1/LICENSE: - -Copyright (c) 2015, Dave Cheney -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -------------------------------------------------------------------------------- Dependency : github.com/pmezard/go-difflib Version: v1.0.0 @@ -35124,35 +35194,6 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --------------------------------------------------------------------------------- -Dependency : go.uber.org/zap -Version: v1.14.0 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/go.uber.org/zap@v1.14.0/LICENSE.txt: - -Copyright (c) 2016-2017 Uber Technologies, Inc. - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - - -------------------------------------------------------------------------------- Dependency : golang.org/x/crypto Version: v0.0.0-20200622213623-75b288015ac9 @@ -35488,11 +35529,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : golang.org/x/text -Version: v0.3.2 +Version: v0.3.3 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/text@v0.3.2/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/text@v0.3.3/LICENSE: Copyright (c) 2009 The Go Authors. All rights reserved. @@ -35848,11 +35889,11 @@ Contents of probable licence file $GOMODCACHE/google.golang.org/appengine@v1.6.5 -------------------------------------------------------------------------------- Dependency : google.golang.org/genproto -Version: v0.0.0-20191230161307-f3c370f40bfb +Version: v0.0.0-20200526211855-cb27e3aa2013 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/google.golang.org/genproto@v0.0.0-20191230161307-f3c370f40bfb/LICENSE: +Contents of probable licence file $GOMODCACHE/google.golang.org/genproto@v0.0.0-20200526211855-cb27e3aa2013/LICENSE: Apache License @@ -36272,11 +36313,11 @@ Contents of probable licence file $GOMODCACHE/google.golang.org/grpc@v1.29.1/LIC -------------------------------------------------------------------------------- Dependency : google.golang.org/protobuf -Version: v1.23.0 +Version: v1.24.0 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/google.golang.org/protobuf@v1.23.0/LICENSE: +Contents of probable licence file $GOMODCACHE/google.golang.org/protobuf@v1.24.0/LICENSE: Copyright (c) 2018 The Go Authors. All rights reserved. @@ -38006,11 +38047,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : k8s.io/api -Version: v0.18.3 +Version: v0.19.4 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/k8s.io/api@v0.18.3/LICENSE: +Contents of probable licence file $GOMODCACHE/k8s.io/api@v0.19.4/LICENSE: Apache License @@ -38218,11 +38259,11 @@ Contents of probable licence file $GOMODCACHE/k8s.io/api@v0.18.3/LICENSE: -------------------------------------------------------------------------------- Dependency : k8s.io/apimachinery -Version: v0.18.3 +Version: v0.19.4 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/k8s.io/apimachinery@v0.18.3/LICENSE: +Contents of probable licence file $GOMODCACHE/k8s.io/apimachinery@v0.19.4/LICENSE: Apache License @@ -38430,11 +38471,11 @@ Contents of probable licence file $GOMODCACHE/k8s.io/apimachinery@v0.18.3/LICENS -------------------------------------------------------------------------------- Dependency : k8s.io/client-go -Version: v0.18.3 +Version: v0.19.4 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/k8s.io/client-go@v0.18.3/LICENSE: +Contents of probable licence file $GOMODCACHE/k8s.io/client-go@v0.19.4/LICENSE: Apache License @@ -38642,11 +38683,11 @@ Contents of probable licence file $GOMODCACHE/k8s.io/client-go@v0.18.3/LICENSE: -------------------------------------------------------------------------------- Dependency : k8s.io/gengo -Version: v0.0.0-20190128074634-0689ccc1d7d6 +Version: v0.0.0-20200413195148-3a45101e95ac Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/k8s.io/gengo@v0.0.0-20190128074634-0689ccc1d7d6/LICENSE: +Contents of probable licence file $GOMODCACHE/k8s.io/gengo@v0.0.0-20200413195148-3a45101e95ac/LICENSE: Apache License @@ -38853,12 +38894,12 @@ Contents of probable licence file $GOMODCACHE/k8s.io/gengo@v0.0.0-20190128074634 -------------------------------------------------------------------------------- -Dependency : k8s.io/klog -Version: v1.0.0 +Dependency : k8s.io/klog/v2 +Version: v2.2.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/k8s.io/klog@v1.0.0/LICENSE: +Contents of probable licence file $GOMODCACHE/k8s.io/klog/v2@v2.2.0/LICENSE: Apache License Version 2.0, January 2004 @@ -39055,11 +39096,11 @@ third-party archives. -------------------------------------------------------------------------------- Dependency : k8s.io/kube-openapi -Version: v0.0.0-20200410145947-61e04a5be9a6 +Version: v0.0.0-20200805222855-6aeccd4b50c6 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/k8s.io/kube-openapi@v0.0.0-20200410145947-61e04a5be9a6/LICENSE: +Contents of probable licence file $GOMODCACHE/k8s.io/kube-openapi@v0.0.0-20200805222855-6aeccd4b50c6/LICENSE: Apache License @@ -39267,11 +39308,11 @@ Contents of probable licence file $GOMODCACHE/k8s.io/kube-openapi@v0.0.0-2020041 -------------------------------------------------------------------------------- Dependency : k8s.io/utils -Version: v0.0.0-20200324210504-a9aa75ae1b89 +Version: v0.0.0-20200729134348-d5654de09c73 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/k8s.io/utils@v0.0.0-20200324210504-a9aa75ae1b89/LICENSE: +Contents of probable licence file $GOMODCACHE/k8s.io/utils@v0.0.0-20200729134348-d5654de09c73/LICENSE: Apache License @@ -39515,12 +39556,12 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- -Dependency : sigs.k8s.io/structured-merge-diff/v3 -Version: v3.0.0 +Dependency : sigs.k8s.io/structured-merge-diff/v4 +Version: v4.0.1 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/sigs.k8s.io/structured-merge-diff/v3@v3.0.0/LICENSE: +Contents of probable licence file $GOMODCACHE/sigs.k8s.io/structured-merge-diff/v4@v4.0.1/LICENSE: Apache License Version 2.0, January 2004 diff --git a/cmd/fleet/main.go b/cmd/fleet/main.go index 15fe89984..56798ff09 100644 --- a/cmd/fleet/main.go +++ b/cmd/fleet/main.go @@ -459,6 +459,15 @@ func loggedRunFunc(ctx context.Context, tag string, runfn runFunc) func() error } func (f *FleetServer) runServer(ctx context.Context, cfg *config.Config) (err error) { + + metricsServer, err := f.initMetrics(ctx, cfg) + switch { + case err != nil: + return err + case metricsServer != nil: + defer metricsServer.Stop() + } + // Bulker is started in its own context and managed inside of this function. This is done so // when the `ctx` is cancelled every worker using the bulker can get everything written on // shutdown before the bulker is then cancelled. diff --git a/cmd/fleet/metrics.go b/cmd/fleet/metrics.go new file mode 100644 index 000000000..70cf439fd --- /dev/null +++ b/cmd/fleet/metrics.go @@ -0,0 +1,44 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fleet + +import ( + "context" + "github.com/pkg/errors" + + "github.com/elastic/fleet-server/v7/internal/pkg/config" + "github.com/elastic/fleet-server/v7/internal/pkg/logger" + + "github.com/elastic/beats/v7/libbeat/api" + "github.com/elastic/beats/v7/libbeat/cmd/instance/metrics" + "github.com/elastic/beats/v7/libbeat/common" + "github.com/elastic/beats/v7/libbeat/monitoring" +) + +func (f *FleetServer) initMetrics(ctx context.Context, cfg *config.Config) (*api.Server, error) { + registry := monitoring.GetNamespace("info").GetRegistry() + monitoring.NewString(registry, "version").Set(f.version) + monitoring.NewString(registry, "name").Set("fleet-server") + metrics.SetupMetrics("fleet-server") + + if !cfg.HTTP.Enabled { + return nil, nil + } + + // Start local api server; largely for metics. + zapStub := logger.NewZapStub("fleet-metrics") + cfgStub, err := common.NewConfigFrom(&cfg.HTTP) + if err != nil { + return nil, err + } + s, err := api.NewWithDefaultRoutes(zapStub, cfgStub, monitoring.GetNamespace) + if err != nil { + err = errors.Wrap(err, "could not start the HTTP server for the API") + } else { + s.Start() + } + + return s, err +} diff --git a/cmd/fleet/server.go b/cmd/fleet/server.go index 3e52dab36..3a12f05cc 100644 --- a/cmd/fleet/server.go +++ b/cmd/fleet/server.go @@ -15,16 +15,36 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/rate" "github.com/elastic/beats/v7/libbeat/common/transport/tlscommon" + "github.com/elastic/beats/v7/libbeat/monitoring" "github.com/julienschmidt/httprouter" "github.com/rs/zerolog/log" ) +var ( + registry *monitoring.Registry + cntHttpNew *monitoring.Uint + cntHttpClose *monitoring.Uint +) + +func init() { + registry = monitoring.Default.NewRegistry("http_server") + cntHttpNew = monitoring.NewUint(registry, "tcp_open") + cntHttpClose = monitoring.NewUint(registry, "tcp_close") +} + func diagConn(c net.Conn, s http.ConnState) { log.Trace(). Str("local", c.LocalAddr().String()). Str("remote", c.RemoteAddr().String()). Str("state", s.String()). Msg("connection state change") + + switch s { + case http.StateNew: + cntHttpNew.Inc() + case http.StateClosed: + cntHttpClose.Inc() + } } func runServer(ctx context.Context, router *httprouter.Router, cfg *config.Server) error { @@ -66,7 +86,9 @@ func runServer(ctx context.Context, router *httprouter.Router, cfg *config.Serve } }() - ln, err := net.Listen("tcp", addr) + var listenCfg net.ListenConfig + + ln, err := listenCfg.Listen(ctx, "tcp", addr) if err != nil { return err } @@ -97,7 +119,11 @@ func wrapRateLimitter(ctx context.Context, ln net.Listener, cfg *config.Server) rateLimitInterval := cfg.RateLimitInterval if rateLimitInterval != 0 { - log.Info().Dur("interval", rateLimitInterval).Int("burst", rateLimitBurst).Msg("Server rate limiter installed") + log.Info(). + Dur("interval", rateLimitInterval). + Int("burst", rateLimitBurst). + Msg("Server rate limiter installed") + ln = rate.NewRateListener(ctx, ln, rateLimitBurst, rateLimitInterval) } else { log.Info().Msg("server connection rate limiter disabled") diff --git a/go.mod b/go.mod index a7cb815b1..7d3f74a01 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.15 require ( github.com/aleksmaus/generate v0.0.0-20201213151810-c5bc68a6a42f github.com/dgraph-io/ristretto v0.0.3 - github.com/elastic/beats/v7 v7.10.0 + github.com/elastic/beats/v7 v7.11.1 github.com/elastic/elastic-agent-client/v7 v7.0.0-20200709172729-d43b7ad5833a github.com/elastic/go-elasticsearch/v8 v8.0.0-20200728144331-527225d8e836 github.com/elastic/go-ucfg v0.8.3 @@ -14,10 +14,12 @@ require ( github.com/hashicorp/go-cleanhttp v0.5.1 github.com/hashicorp/golang-lru v0.5.2-0.20190520140433-59383c442f7d github.com/julienschmidt/httprouter v1.3.0 + github.com/pkg/errors v0.9.1 github.com/rs/xid v1.2.1 github.com/rs/zerolog v1.19.0 github.com/spf13/cobra v0.0.5 github.com/stretchr/testify v1.6.1 + go.uber.org/zap v1.14.0 golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e ) diff --git a/go.sum b/go.sum index 19430dbad..35d4032ec 100644 --- a/go.sum +++ b/go.sum @@ -29,10 +29,11 @@ github.com/Azure/go-amqp v0.12.6/go.mod h1:qApuH6OFTSKZFmCOxccvAv5rLizBQf4v8pRmG github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= -github.com/Azure/go-autorest/autorest v0.9.4/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= +github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= @@ -49,6 +50,7 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= +github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 h1:ygIc8M6trr62pF5DucadTWGdEB4mEyvzi0e2nbcmcyA= github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= @@ -71,6 +73,7 @@ github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQY github.com/antlr/antlr4 v0.0.0-20200820155224-be881fa6b91d/go.mod h1:T7PbCXFs94rrTttyxjbyT5+/1V8T2TYDejxUfHJjw1Y= github.com/apoydence/eachers v0.0.0-20181020210610-23942921fe77/go.mod h1:bXvGk6IkT1Agy7qzJ+DjIw/SJ1AaB3AvAuMDVV+Vkoo= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aws/aws-lambda-go v1.6.0/go.mod h1:zUsUQhAUjYzR8AuduJPCfhBuKWUaDbQiPOG+ouzmE1A= @@ -106,6 +109,7 @@ github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7 github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/cucumber/godog v0.8.1 h1:lVb+X41I4YDreE+ibZ50bdXmySxgRviYFgKY6Aw4XE8= github.com/cucumber/godog v0.8.1/go.mod h1:vSh3r/lM+psC1BPXvdkSEuNjmXfpVqrMGYAElF6hxnA= github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= @@ -128,20 +132,22 @@ github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUn github.com/digitalocean/go-libvirt v0.0.0-20180301200012-6075ea3c39a1/go.mod h1:PRcPVAAma6zcLpFd4GZrjR/MRpood3TamjKI2m/z/Uw= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dlclark/regexp2 v1.1.7-0.20171009020623-7632a260cbaf/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= +github.com/docker/docker v1.4.2-0.20170802015333-8af4db6f002a h1:pNE/kl/UUSqAi7IiyPjnaIbYBRaEORJY8/RCK9Tx39c= github.com/docker/docker v1.4.2-0.20170802015333-8af4db6f002a/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= github.com/docker/go-plugins-helpers v0.0.0-20181025120712-1e6269c305b8/go.mod h1:LFyLie6XcDbyKGeVK6bHe+9aJTYCxWLBg5IrJZOaXKA= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= github.com/dop251/goja_nodejs v0.0.0-20171011081505-adff31b136e6/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/eclipse/paho.mqtt.golang v1.2.1-0.20200121105743-0d940dd29fd2/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= -github.com/elastic/beats/v7 v7.10.0 h1:MpXREz0PzwuHpJnNAHcjmRoQRfVUnJFJvYQdzRjBZKg= -github.com/elastic/beats/v7 v7.10.0/go.mod h1:GV6Gy80eRYpJ4Dk4MZcQFMxXbmOnWrj9ZPK5UhwCkhU= +github.com/elastic/beats/v7 v7.11.1 h1:eYJRKc/mA6rhQNujUV9lUADQ0S9SZvI5d782BnNvgFY= +github.com/elastic/beats/v7 v7.11.1/go.mod h1:2gJ+JvWjTYuMA37chVSfsolz7Z2ca+gL39HpmSLO+z8= github.com/elastic/ecs v1.6.0/go.mod h1:pgiLbQsijLOJvFR8OTILLu0Ni/R/foUNg0L+T6mU9b4= github.com/elastic/elastic-agent-client/v7 v7.0.0-20200709172729-d43b7ad5833a h1:2NHgf1RUw+f240lpTnLrCp1aBNvq2wDi0E1A423/S1k= github.com/elastic/elastic-agent-client/v7 v7.0.0-20200709172729-d43b7ad5833a/go.mod h1:uh/Gj9a0XEbYoM4NYz4LvaBVARz3QXLmlNjsrKY9fTc= @@ -149,7 +155,7 @@ github.com/elastic/fsevents v0.0.0-20181029231046-e1d381a4d270/go.mod h1:Msl1pdb github.com/elastic/go-concert v0.0.4/go.mod h1:9MtFarjXroUgmm0m6HY3NSe1XiKhdktiNRRj9hWvIaM= github.com/elastic/go-elasticsearch/v8 v8.0.0-20200728144331-527225d8e836 h1:0ZrGQPGY7QCySD/14ht2UDggGKmqgLouMd5FFimcguA= github.com/elastic/go-elasticsearch/v8 v8.0.0-20200728144331-527225d8e836/go.mod h1:xe9a/L2aeOgFKKgrO3ibQTnMdpAeL0GC+5/HpGScSa4= -github.com/elastic/go-libaudit/v2 v2.0.2/go.mod h1:MM/l/4xV7ilcl+cIblL8Zn448J7RZaDwgNLE4gNKYPg= +github.com/elastic/go-libaudit/v2 v2.1.0/go.mod h1:MM/l/4xV7ilcl+cIblL8Zn448J7RZaDwgNLE4gNKYPg= github.com/elastic/go-licenser v0.3.1 h1:RmRukU/JUmts+rpexAw0Fvt2ly7VVu6mw8z4HrEzObU= github.com/elastic/go-licenser v0.3.1/go.mod h1:D8eNQk70FOCVBl3smCGQt/lv7meBeQno2eI1S5apiHQ= github.com/elastic/go-lookslike v0.3.0/go.mod h1:AhH+rdJux5RlVjs+6ej4jkvYyoNRkj2crxmqeHlj3hA= @@ -167,7 +173,8 @@ github.com/elastic/go-ucfg v0.8.3/go.mod h1:iaiY0NBIYeasNgycLyTvhJftQlQEUO2hpF+F github.com/elastic/go-windows v1.0.0/go.mod h1:TsU0Nrp7/y3+VwE82FoZF8gC/XFg/Elz6CcloAxnPgU= github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUtJm0= github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss= -github.com/elastic/gosigar v0.10.6-0.20200715000138-f115143bb233/go.mod h1:cdorVVzy1fhmEqmtgqkoE3bYtCfSCkVyjTyCIo22xvs= +github.com/elastic/gosigar v0.13.0 h1:EIeuQcLPKia759s6mlVztlxUyKiKYHo6y6kOODOLO7A= +github.com/elastic/gosigar v0.13.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= github.com/elastic/sarama v1.19.1-0.20200629123429-0e7b69039eec/go.mod h1:X690XXMxlbtN8c7xcpsENKNlbj8VClCZ2hwSOhSyNmE= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= @@ -175,11 +182,13 @@ github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymF github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= +github.com/fatih/color v1.5.0 h1:vBh+kQp8lg9XPr56u1CPrWjFXtdphMoGWVHr9/1c+A0= github.com/fatih/color v1.5.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/garyburd/redigo v1.0.1-0.20160525165706-b8dc90050f24/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= @@ -189,6 +198,7 @@ github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2 github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= github.com/go-ole/go-ole v1.2.5-0.20190920104607-14974a1cf647/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= @@ -213,13 +223,11 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= -github.com/golang/protobuf v0.0.0-20161109072736-4bd1920723d7/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -229,6 +237,7 @@ github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:x github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -259,17 +268,14 @@ github.com/google/uuid v1.1.2-0.20190416172445-c2e93f3ae59f h1:XXzyYlFbxK3kWfcmu github.com/google/uuid v1.1.2-0.20190416172445-c2e93f3ae59f/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gnostic v0.0.0-20170729233727-0c5108395e2d/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.1.0/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/googleapis/gnostic v0.3.1-0.20190624222214-25d8b0b66985/go.mod h1:sJBsCZ4ayReDTBIg8b9dl28c5xFWyhBTVRp3pOg5EKY= -github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8= +github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= github.com/gorhill/cronexpr v0.0.0-20161205141322-d520615e531a/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= -github.com/h2non/filetype v1.0.12/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY= +github.com/h2non/filetype v1.1.1-0.20201130172452-f60988ab73d5/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= @@ -305,12 +311,13 @@ github.com/jmoiron/sqlx v1.2.1-0.20190826204134-d7d95172beb5/go.mod h1:1FEQNm3xl github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4= github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= -github.com/josephspurrier/goversioninfo v0.0.0-20200309025242-14b0ab84c6ca/go.mod h1:eJTEwMjXb7kZ633hO3Ln9mBUCOjX2+FlTljvpl9SYdE= +github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/josephspurrier/goversioninfo v0.0.0-20190209210621-63e6d1acd3dd/go.mod h1:eJTEwMjXb7kZ633hO3Ln9mBUCOjX2+FlTljvpl9SYdE= github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= @@ -346,9 +353,11 @@ github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7 github.com/markbates/pkger v0.17.0 h1:RFfyBPufP2V6cddUyyEVSHBpaAnM1WzaMNyqomeT+iY= github.com/markbates/pkger v0.17.0/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11/go.mod h1:Ah2dBMoxZEqk118as2T4u4fjfXarE0pPnMJaArZQZsI= +github.com/mattn/go-colorable v0.0.8 h1:KatiXbcoFpoKmM5pL0yhug+tx/POfZO+0aVsuGhUhgo= github.com/mattn/go-colorable v0.0.8/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-ieproxy v0.0.0-20191113090002-7c0f6868bffe/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= +github.com/mattn/go-isatty v0.0.2 h1:F+DnWktyadxnOrohKLNUC9/GjFii5RJgY4GFG6ilggw= github.com/mattn/go-isatty v0.0.2/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-shellwords v1.0.7/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= @@ -431,6 +440,7 @@ github.com/sanathkr/go-yaml v0.0.0-20170819195128-ed9d249f429b h1:jUK33OXuZP/l6b github.com/sanathkr/go-yaml v0.0.0-20170819195128-ed9d249f429b/go.mod h1:8458kAagoME2+LN5//WxE71ysZ3B7r22fdgb7qVmXSY= github.com/sanathkr/yaml v0.0.0-20170819201035-0056894fa522/go.mod h1:tQTYKOQgxoH3v6dEmdHiz4JG+nbxWwM5fgPQUpSZqVQ= github.com/sanathkr/yaml v1.0.1-0.20170819201035-0056894fa522/go.mod h1:tQTYKOQgxoH3v6dEmdHiz4JG+nbxWwM5fgPQUpSZqVQ= +github.com/santhosh-tekuri/jsonschema v1.2.4 h1:hNhW8e7t+H1vgY+1QeEQpveR6D4+OwKPXCfD2aieJis= github.com/santhosh-tekuri/jsonschema v1.2.4/go.mod h1:TEAUOeZSmIxTTuHatJzrvARHiuO9LYd+cIxzgEHCQI4= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= @@ -492,12 +502,14 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/gopher-lua v0.0.0-20170403160031-b402f3114ec7/go.mod h1:aEV29XrmTYFr3CiRxZeGHpkvbwq+prZduBqMaascyCU= go.elastic.co/apm v1.7.2/go.mod h1:tCw6CkOJgkWnzEthFN9HUP1uL3Gjc/Ur6m7gRPLaoH0= +go.elastic.co/apm v1.8.1-0.20200909061013-2aef45b9cf4b h1:Sf+V3eV91ZuXjF3824SABFgXU+z4ZEuIX5ikDvt2lCE= go.elastic.co/apm v1.8.1-0.20200909061013-2aef45b9cf4b/go.mod h1:qoOSi09pnzJDh5fKnfY7bPmQgl8yl2tULdOu03xhui0= go.elastic.co/apm/module/apmelasticsearch v1.7.2/go.mod h1:ZyNFuyWdt42GBZkz0SogoLzDBrBGj4orxpiUuxYeYq8= go.elastic.co/apm/module/apmhttp v1.7.2/go.mod h1:sTFWiWejnhSdZv6+dMgxGec2Nxe/ZKfHfz/xtRM+cRY= go.elastic.co/ecszap v0.3.0 h1:Zo/Y4sJLqbWDlqCHI4F4Lzeg0Fs4+n5ldVis4h9xV8w= go.elastic.co/ecszap v0.3.0/go.mod h1:HTUi+QRmr3EuZMqxPX+5fyOdMNfUu5iPebgfhgsTJYQ= go.elastic.co/fastjson v1.0.0/go.mod h1:PmeUOMMtLHQr9ZS9J9owrAVg0FkaZDRZJEFTTGHtchs= +go.elastic.co/fastjson v1.1.0 h1:3MrGBWWVIxe/xvsbpghtkFoPciPhOCmjsR/HfwEeQR4= go.elastic.co/fastjson v1.1.0/go.mod h1:boNGISWMjQsUPy/t6yqt2/1Wx4YNPSe+mZjlyw9vKKI= go.elastic.co/go-licence-detector v0.4.0 h1:it5dP+6LPxLsosdhtbAqk/zJQxzS0QSSpdNkKVuwKMs= go.elastic.co/go-licence-detector v0.4.0/go.mod h1:fSJQU8au4SAgDK+UQFbgUPsXKYNBDv4E/dwWevrMpXU= @@ -516,7 +528,6 @@ go.uber.org/zap v1.14.0 h1:/pduUoebOeeJzTDFuoMgC6nRkiasr1sBCIEorly7m4o= go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190211182817-74369b46fc67/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -525,8 +536,6 @@ golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200204104054-c9f3fb736b72/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200220183623-bac4c82f6975/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200510223506-06a226fb4e37/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -554,7 +563,6 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0 h1:RM4zey1++hCTbCVQfnWeKs9/IEsaBLA8vTkd0WVtmH4= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/net v0.0.0-20170114055629-f2499483f923/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -573,13 +581,14 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191004110552-13f9640d40b9/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191021144547-ec77196f6094/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191112182307-2180aed22343/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/net v0.0.0-20200822124328-c89045814202 h1:VvcQYSHwXgi7W+TpUR6A9g6Up98WAHf3f/ulnJ62IyA= golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -599,14 +608,13 @@ golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 h1:qwRHBd0NqMbJxfbotnDhm2ByMI1Shq4Y6oRJo21SGJA= golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20170830134202-bb24a47a89ea/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180810173357-98c5dad5d1a0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190204203706-41f3e6584952/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190209173611-3b5209105503/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190405154228-4b34438f7a67/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -621,7 +629,7 @@ golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190801041406-cbf593c0f2f3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191022100944-742c48ecaeb7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191025021431-6c3a3bfe00ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191112214154-59a1497f0cea/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -631,13 +639,15 @@ golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORKTAbhZo2AbWNRCnevdo= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -701,6 +711,8 @@ google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBr google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb h1:ADPHZzpzM4tk4V4S5cnCrr5SwzvlrPRmqqCuJDB8UTs= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -708,6 +720,7 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.29.1 h1:EC2SB8S04d2r73uptxphDSUG+kTKVgjRPF+N3xpxRB4= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= @@ -715,8 +728,12 @@ google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -759,17 +776,15 @@ honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXe honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M= howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= -k8s.io/api v0.18.3/go.mod h1:UOaMwERbqJMfeeeHc8XJKawj4P9TgDRnViIqqBeH2QA= -k8s.io/apimachinery v0.18.3/go.mod h1:OaXp26zu/5J7p0f92ASynJa1pZo06YlV9fG7BoWbCko= -k8s.io/client-go v0.18.3/go.mod h1:4a/dpQEvzAhT1BbuWW09qvIaGw6Gbu1gZYiQZIi1DMw= -k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= -k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v0.3.0/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= -k8s.io/klog v1.0.0/go.mod h1:4Bi6QPql/J/LkTDqv7R/cd3hPo4k2DG6Ptcz060Ez5I= -k8s.io/kube-openapi v0.0.0-20200410145947-61e04a5be9a6/go.mod h1:GRQhZsXIAJ1xR0C9bd8UpWHZ5plfAS9fzPjJuQ6JL3E= -k8s.io/utils v0.0.0-20200324210504-a9aa75ae1b89/go.mod h1:sZAwmy6armz5eXlNoLmJcl4F1QuKu7sr+mFQ0byX7Ew= +k8s.io/api v0.19.4/go.mod h1:SbtJ2aHCItirzdJ36YslycFNzWADYH3tgOhvBEFtZAk= +k8s.io/apimachinery v0.19.4/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= +k8s.io/client-go v0.19.4/go.mod h1:ZrEy7+wj9PjH5VMBCuu/BDlvtUAku0oVFk4MmnW9mWA= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= +k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0-20200116222232-67a7b8c61874/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= -sigs.k8s.io/structured-merge-diff/v3 v3.0.0/go.mod h1:PlARxl6Hbt/+BC80dRLi1qAmnMqwqDg62YvvVkZjemw= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/internal/pkg/config/config.go b/internal/pkg/config/config.go index 956457014..2f636792b 100644 --- a/internal/pkg/config/config.go +++ b/internal/pkg/config/config.go @@ -26,12 +26,14 @@ type Config struct { Output Output `config:"output"` Inputs []Input `config:"inputs"` Logging Logging `config:"logging"` + HTTP HTTP `config:"http"` } // InitDefaults initializes the defaults for the configuration. func (c *Config) InitDefaults() { c.Inputs = make([]Input, 1) c.Inputs[0].InitDefaults() + c.HTTP.InitDefaults() } // Validate ensures that the configuration is valid. diff --git a/internal/pkg/config/config_test.go b/internal/pkg/config/config_test.go index de2e6cfe6..49996ba17 100644 --- a/internal/pkg/config/config_test.go +++ b/internal/pkg/config/config_test.go @@ -66,6 +66,10 @@ func TestConfig(t *testing.T) { ToFiles: true, Files: nil, }, + HTTP: HTTP{ + Host: kDefaultHTTPHost, + Port: kDefaultHTTPPort, + }, }, }, "fleet-logging": { @@ -114,6 +118,10 @@ func TestConfig(t *testing.T) { ToFiles: true, Files: nil, }, + HTTP: HTTP{ + Host: kDefaultHTTPHost, + Port: kDefaultHTTPPort, + }, }, }, "input": { @@ -160,6 +168,10 @@ func TestConfig(t *testing.T) { ToFiles: true, Files: nil, }, + HTTP: HTTP{ + Host: kDefaultHTTPHost, + Port: kDefaultHTTPPort, + }, }, }, "input-config": { @@ -206,6 +218,10 @@ func TestConfig(t *testing.T) { ToFiles: true, Files: nil, }, + HTTP: HTTP{ + Host: kDefaultHTTPHost, + Port: kDefaultHTTPPort, + }, }, }, "bad-input": { diff --git a/internal/pkg/config/http.go b/internal/pkg/config/http.go new file mode 100644 index 000000000..c85aca0fc --- /dev/null +++ b/internal/pkg/config/http.go @@ -0,0 +1,23 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package config + +const kDefaultHTTPHost = "localhost" +const kDefaultHTTPPort = 5066 + +// Http is the configuration for the API endpoint. +type HTTP struct { + Enabled bool `config:"enabled"` + Host string `config:"host"` + Port int `config:"port"` + User string `config:"named_pipe.user"` + SecurityDescriptor string `config:"named_pipe.security_descriptor"` +} + +func (h *HTTP) InitDefaults() { + h.Enabled = false + h.Host = kDefaultHTTPHost + h.Port = kDefaultHTTPPort +} diff --git a/internal/pkg/logger/zapStub.go b/internal/pkg/logger/zapStub.go new file mode 100644 index 000000000..b64d02785 --- /dev/null +++ b/internal/pkg/logger/zapStub.go @@ -0,0 +1,74 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package logger + +import ( + "github.com/elastic/beats/v7/libbeat/logp" + "github.com/rs/zerolog" + "github.com/rs/zerolog/log" + "go.uber.org/zap" + "go.uber.org/zap/zapcore" +) + +func encoderConfig() zapcore.EncoderConfig { + return zapcore.EncoderConfig{ + MessageKey: "msg", + LevelKey: "level", + NameKey: "name", + TimeKey: "ts", + CallerKey: "caller", + StacktraceKey: "stacktrace", + LineEnding: "\n", + EncodeTime: zapcore.EpochTimeEncoder, + EncodeLevel: zapcore.LowercaseLevelEncoder, + EncodeDuration: zapcore.SecondsDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + } +} + +type zapStub struct { +} + +func (z zapStub) Enabled(zapLevel zapcore.Level) bool { + + zeroLevel := log.Logger.GetLevel() + + switch zapLevel { + case zapcore.DebugLevel: + return zeroLevel == zerolog.DebugLevel + case zapcore.InfoLevel: + return zeroLevel <= zerolog.InfoLevel + case zapcore.WarnLevel: + return zeroLevel <= zerolog.WarnLevel + case zapcore.ErrorLevel: + return zeroLevel <= zerolog.ErrorLevel + case zapcore.FatalLevel: + return zeroLevel <= zerolog.FatalLevel + case zapcore.DPanicLevel, zapcore.PanicLevel: + return zeroLevel <= zerolog.PanicLevel + } + + return true +} + +func (z zapStub) Sync() error { + return nil +} + +func (z zapStub) Write(p []byte) (n int, err error) { + log.Log().RawJSON("zap", p).Msg("") + return 0, nil +} + +func NewZapStub(selector string) *logp.Logger { + + wrapF := func(zapcore.Core) zapcore.Core { + enc := zapcore.NewJSONEncoder(encoderConfig()) + stub := zapStub{} + return zapcore.NewCore(enc, stub, stub) + } + + return logp.NewLogger(selector, zap.WrapCore(wrapF)) +} From 3a2179f2169794e593995ffe4edae6c85b6262f9 Mon Sep 17 00:00:00 2001 From: Sean Cunningham Date: Fri, 5 Mar 2021 12:29:58 -0500 Subject: [PATCH 024/240] =?UTF-8?q?Add=20optional=20hard=20connection=20li?= =?UTF-8?q?mit=20to=20the=20server.=20=20No=20more=20than=20N=20con?= =?UTF-8?q?=E2=80=A6=20(#122)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Add optional maximum connection limit to the server. No more than N connections will be active at any time. * And licenses * And notice --- NOTICE.txt | 74 ++++++++++++++++++------------------ cmd/fleet/server.go | 18 +++++++++ go.mod | 1 + internal/pkg/config/input.go | 2 + 4 files changed, 58 insertions(+), 37 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index 2a8ddb48e..03f71096c 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -2092,6 +2092,43 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------------------- +Dependency : golang.org/x/net +Version: v0.0.0-20200822124328-c89045814202 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/net@v0.0.0-20200822124328-c89045814202/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + -------------------------------------------------------------------------------- Dependency : golang.org/x/sync Version: v0.0.0-20200625203802-6e8e738ad208 @@ -35416,43 +35453,6 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --------------------------------------------------------------------------------- -Dependency : golang.org/x/net -Version: v0.0.0-20200822124328-c89045814202 -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/golang.org/x/net@v0.0.0-20200822124328-c89045814202/LICENSE: - -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -------------------------------------------------------------------------------- Dependency : golang.org/x/oauth2 Version: v0.0.0-20200107190931-bf48bf16ab8d diff --git a/cmd/fleet/server.go b/cmd/fleet/server.go index 3a12f05cc..eecf93233 100644 --- a/cmd/fleet/server.go +++ b/cmd/fleet/server.go @@ -18,6 +18,7 @@ import ( "github.com/elastic/beats/v7/libbeat/monitoring" "github.com/julienschmidt/httprouter" "github.com/rs/zerolog/log" + "golang.org/x/net/netutil" ) var ( @@ -107,6 +108,7 @@ func runServer(ctx context.Context, router *httprouter.Router, cfg *config.Serve } ln = wrapRateLimitter(ctx, ln, cfg) + ln = wrapConnLimitter(ctx, ln, cfg) if err := server.Serve(ln); err != nil && err != context.Canceled { return err } @@ -132,6 +134,22 @@ func wrapRateLimitter(ctx context.Context, ln net.Listener, cfg *config.Server) return ln } +func wrapConnLimitter(ctx context.Context, ln net.Listener, cfg *config.Server) net.Listener { + hardLimit := cfg.MaxConnections + + if hardLimit != 0 { + log.Info(). + Int("hardConnLimit", hardLimit). + Msg("server hard connection limiter installed") + + ln = netutil.LimitListener(ln, hardLimit) + } else { + log.Info().Msg("server hard connection limiter disabled") + } + + return ln +} + type stubLogger struct { } diff --git a/go.mod b/go.mod index 7d3f74a01..73269b7c3 100644 --- a/go.mod +++ b/go.mod @@ -20,6 +20,7 @@ require ( github.com/spf13/cobra v0.0.5 github.com/stretchr/testify v1.6.1 go.uber.org/zap v1.14.0 + golang.org/x/net v0.0.0-20200822124328-c89045814202 golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e ) diff --git a/internal/pkg/config/input.go b/internal/pkg/config/input.go index 391bfc48c..6588bfdb3 100644 --- a/internal/pkg/config/input.go +++ b/internal/pkg/config/input.go @@ -57,6 +57,7 @@ type Server struct { MaxHeaderByteSize int `config:"max_header_byte_size"` RateLimitBurst int `config:"rate_limit_burst"` RateLimitInterval time.Duration `config:"rate_limit_interval"` + MaxConnections int `config:"max_connections"` MaxEnrollPending int64 `config:"max_enroll_pending"` Profile ServerProfile `config:"profile"` } @@ -69,6 +70,7 @@ func (c *Server) InitDefaults() { c.MaxHeaderByteSize = 8192 // 8k c.RateLimitBurst = 1024 c.RateLimitInterval = 5 * time.Millisecond + c.MaxConnections = 0 // no limit c.MaxEnrollPending = 64 c.Profile.InitDefaults() } From 6595e035727e935dbe5ab0af32c907c3d033df40 Mon Sep 17 00:00:00 2001 From: Aleksandr Maus Date: Fri, 5 Mar 2021 14:40:42 -0500 Subject: [PATCH 025/240] Enable trace level logging (#112) (#127) --- internal/pkg/config/config_test.go | 2 +- internal/pkg/config/fleet.go | 7 +++++-- internal/pkg/config/testdata/bad-logging.yml | 2 +- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/internal/pkg/config/config_test.go b/internal/pkg/config/config_test.go index 49996ba17..d563a4b0b 100644 --- a/internal/pkg/config/config_test.go +++ b/internal/pkg/config/config_test.go @@ -231,7 +231,7 @@ func TestConfig(t *testing.T) { err: "only 1 fleet-server input can be defined", }, "bad-logging": { - err: "invalid log level; must be one of: debug, info, warning, error", + err: "invalid log level; must be one of: trace, debug, info, warning, error", }, "bad-output": { err: "can only contain elasticsearch key", diff --git a/internal/pkg/config/fleet.go b/internal/pkg/config/fleet.go index 7f17afbe3..5cb12d881 100644 --- a/internal/pkg/config/fleet.go +++ b/internal/pkg/config/fleet.go @@ -6,8 +6,9 @@ package config import ( "fmt" - "github.com/rs/zerolog" "strings" + + "github.com/rs/zerolog" ) // AgentLogging is the log level set on the Agent. @@ -58,6 +59,8 @@ func strToLevel(s string) (zerolog.Level, error) { s = strings.ToLower(s) switch strings.TrimSpace(s) { + case "trace": + l = zerolog.TraceLevel case "debug": l = zerolog.DebugLevel case "info": @@ -67,7 +70,7 @@ func strToLevel(s string) (zerolog.Level, error) { case "error": l = zerolog.ErrorLevel default: - return l, fmt.Errorf("invalid log level; must be one of: debug, info, warning, error") + return l, fmt.Errorf("invalid log level; must be one of: trace, debug, info, warning, error") } return l, nil diff --git a/internal/pkg/config/testdata/bad-logging.yml b/internal/pkg/config/testdata/bad-logging.yml index 503d7ffcc..844901693 100644 --- a/internal/pkg/config/testdata/bad-logging.yml +++ b/internal/pkg/config/testdata/bad-logging.yml @@ -7,4 +7,4 @@ fleet: agent: id: 1e4954ce-af37-4731-9f4a-407b08e69e42 logging: - level: trace + level: grace From c426aa81e85a2be3105bb6ee86cf4950bda14401 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Wed, 17 Mar 2021 12:11:35 -0400 Subject: [PATCH 026/240] Add API status endpoint (#131) (#138) * Add /api/status endpoint. * Run fmt. * Remove version from status. (cherry picked from commit 7cb2930c7cda0ebe50267ac2387acbdfe35e7727) --- cmd/fleet/handleStatus.go | 42 ++++++++++++++++++++++++++++ cmd/fleet/main.go | 2 +- cmd/fleet/router.go | 8 +++++- cmd/fleet/schema.go | 6 ++++ cmd/fleet/server_integration_test.go | 2 +- cmd/fleet/server_test.go | 2 +- internal/pkg/policy/self.go | 15 ++++++++++ 7 files changed, 73 insertions(+), 4 deletions(-) create mode 100644 cmd/fleet/handleStatus.go diff --git a/cmd/fleet/handleStatus.go b/cmd/fleet/handleStatus.go new file mode 100644 index 000000000..86cf6d303 --- /dev/null +++ b/cmd/fleet/handleStatus.go @@ -0,0 +1,42 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fleet + +import ( + "context" + "encoding/json" + "net/http" + + "github.com/elastic/elastic-agent-client/v7/pkg/proto" + "github.com/julienschmidt/httprouter" + "github.com/rs/zerolog/log" +) + +func (rt Router) handleStatus(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) { + status := rt.sm.Status() + resp := StatusResponse{ + Name: "fleet-server", + Status: status.String(), + } + + data, err := json.Marshal(&resp) + if err != nil { + code := http.StatusInternalServerError + log.Error().Err(err).Int("code", code).Msg("fail status") + http.Error(w, err.Error(), code) + return + } + + code := http.StatusServiceUnavailable + if status == proto.StateObserved_DEGRADED || status == proto.StateObserved_HEALTHY { + code = http.StatusOK + } + w.WriteHeader(code) + if _, err = w.Write(data); err != nil { + if err != context.Canceled { + log.Error().Err(err).Int("code", code).Msg("fail status") + } + } +} diff --git a/cmd/fleet/main.go b/cmd/fleet/main.go index 56798ff09..c08ee358c 100644 --- a/cmd/fleet/main.go +++ b/cmd/fleet/main.go @@ -526,7 +526,7 @@ func (f *FleetServer) runServer(ctx context.Context, cfg *config.Config) (err er if err != nil { return err } - router := NewRouter(bulker, ct, et) + router := NewRouter(bulker, ct, et, sm) g.Go(loggedRunFunc(ctx, "Http server", func(ctx context.Context) error { return runServer(ctx, router, &f.cfg.Inputs[0].Server) diff --git a/cmd/fleet/router.go b/cmd/fleet/router.go index d3cf5db35..fa5e53d70 100644 --- a/cmd/fleet/router.go +++ b/cmd/fleet/router.go @@ -6,10 +6,12 @@ package fleet import ( "github.com/elastic/fleet-server/v7/internal/pkg/bulk" + "github.com/elastic/fleet-server/v7/internal/pkg/policy" "github.com/julienschmidt/httprouter" ) const ( + ROUTE_STATUS = "/api/status" ROUTE_ENROLL = "/api/fleet/agents/:id" ROUTE_CHECKIN = "/api/fleet/agents/:id/checkin" ROUTE_ACKS = "/api/fleet/agents/:id/acks" @@ -17,19 +19,23 @@ const ( type Router struct { bulker bulk.Bulk + ver string ct *CheckinT et *EnrollerT + sm policy.SelfMonitor } -func NewRouter(bulker bulk.Bulk, ct *CheckinT, et *EnrollerT) *httprouter.Router { +func NewRouter(bulker bulk.Bulk, ct *CheckinT, et *EnrollerT, sm policy.SelfMonitor) *httprouter.Router { r := Router{ bulker: bulker, ct: ct, et: et, + sm: sm, } router := httprouter.New() + router.GET(ROUTE_STATUS, r.handleStatus) router.POST(ROUTE_ENROLL, r.handleEnroll) router.POST(ROUTE_CHECKIN, r.handleCheckin) router.POST(ROUTE_ACKS, r.handleAcks) diff --git a/cmd/fleet/schema.go b/cmd/fleet/schema.go index 9718a0748..9b8b9cdb2 100644 --- a/cmd/fleet/schema.go +++ b/cmd/fleet/schema.go @@ -139,3 +139,9 @@ type Event struct { Data json.RawMessage `json:"data,omitempty"` Error string `json:"error,omitempty"` } + +type StatusResponse struct { + Name string `json:"name"` + Version string `json:"version"` + Status string `json:"status"` +} diff --git a/cmd/fleet/server_integration_test.go b/cmd/fleet/server_integration_test.go index e36527910..639e077b8 100644 --- a/cmd/fleet/server_integration_test.go +++ b/cmd/fleet/server_integration_test.go @@ -105,7 +105,7 @@ func (s *tserver) waitServerUp(ctx context.Context, dur time.Duration) error { start := time.Now() cli := cleanhttp.DefaultClient() for { - res, err := cli.Get(s.baseUrl()) + res, err := cli.Get(s.baseUrl() + "/api/status") if err != nil { if time.Since(start) > dur { return err diff --git a/cmd/fleet/server_test.go b/cmd/fleet/server_test.go index 38a905548..d015b77b8 100644 --- a/cmd/fleet/server_test.go +++ b/cmd/fleet/server_test.go @@ -43,7 +43,7 @@ func TestRunServer(t *testing.T) { et, err := NewEnrollerT(cfg, nil, c) require.NoError(t, err) - router := NewRouter(bulker, ct, et) + router := NewRouter(bulker, ct, et, nil) errCh := make(chan error) var wg sync.WaitGroup diff --git a/internal/pkg/policy/self.go b/internal/pkg/policy/self.go index 64aa8c7a6..ac6b52621 100644 --- a/internal/pkg/policy/self.go +++ b/internal/pkg/policy/self.go @@ -28,6 +28,8 @@ import ( type SelfMonitor interface { // Run runs the monitor. Run(ctx context.Context) error + // Status gets current status of monitor. + Status() proto.StateObserved_Status } type selfMonitorT struct { @@ -39,6 +41,7 @@ type selfMonitorT struct { monitor monitor.Monitor policyId string + status proto.StateObserved_Status reporter status.Reporter policy *model.Policy @@ -58,6 +61,7 @@ func NewSelfMonitor(fleet config.Fleet, bulker bulk.Bulk, monitor monitor.Monito bulker: bulker, monitor: monitor, policyId: policyId, + status: proto.StateObserved_STARTING, reporter: reporter, policyF: dl.QueryLatestPolicies, policiesIndex: dl.FleetPolicies, @@ -96,6 +100,12 @@ LOOP: return nil } +func (m *selfMonitorT) Status() proto.StateObserved_Status { + m.mut.Lock() + defer m.mut.Unlock() + return m.status +} + func (m *selfMonitorT) process(ctx context.Context) error { policies, err := m.policyF(ctx, m.bulker, dl.WithIndexName(m.policiesIndex)) if err != nil { @@ -151,8 +161,12 @@ func (m *selfMonitorT) groupByLatest(policies []model.Policy) map[string]model.P } func (m *selfMonitorT) updateStatus() error { + m.mut.Lock() + defer m.mut.Unlock() + if m.policy == nil { // no policy found + m.status = proto.StateObserved_STARTING if m.policyId == "" { m.reporter.Status(proto.StateObserved_STARTING, "Waiting on default policy with Fleet Server integration", nil) } else { @@ -176,6 +190,7 @@ func (m *selfMonitorT) updateStatus() error { status = proto.StateObserved_DEGRADED extendMsg = "; missing config fleet.agent.id" } + m.status = status if m.policyId == "" { m.reporter.Status(status, fmt.Sprintf("Running on default policy with Fleet Server integration%s", extendMsg), nil) } else { From e0512ec49bb263a8d35b9c4959c62e51e9aa4090 Mon Sep 17 00:00:00 2001 From: Nicolas Ruflin Date: Thu, 18 Mar 2021 13:14:45 +0100 Subject: [PATCH 027/240] Cleanup and refactoring (#133) (#139) I was going through the code base to get more familiar with it. Along the way I made some small changes / cleanups that I stumbled over. There are also parts where I added small tests which are more for me to understand the code on what it does. Here is the list: * Remove savedObjectKey function as it is not needed anymore * add config options for monitoring to the default configuration for documentation purpose * move the creation of the nowString to where it is actually used. Seems it is used only once. * add test for convertActions to better understand the format on my end * Add note to missing parts * update t.Fatal should not be used in test go routine * add basic token test * add more entries to the config file about available config options * remove unused env package * remove not used `missing.txt` file. * remove dsl readme and filed https://github.com/elastic/fleet-server/issues/136 instead (cherry picked from commit fb3e183e8188c240b4c21c43bf5bbb676f6be117) --- cmd/fleet/handleChecking_test.go | 33 +++ cmd/fleet/handleEnroll.go | 4 +- cmd/fleet/main.go | 11 +- cmd/fleet/missing.txt | 6 - fleet-server.yml | 17 ++ go.sum | 308 +++++++++++++++++++++++++++- internal/pkg/apikey/apikey_test.go | 20 ++ internal/pkg/apikey/invalidate.go | 2 +- internal/pkg/coordinator/v0_test.go | 3 +- internal/pkg/dsl/readme.txt | 6 - internal/pkg/env/env.go | 17 -- 11 files changed, 374 insertions(+), 53 deletions(-) create mode 100644 cmd/fleet/handleChecking_test.go delete mode 100644 cmd/fleet/missing.txt create mode 100644 internal/pkg/apikey/apikey_test.go delete mode 100644 internal/pkg/dsl/readme.txt delete mode 100644 internal/pkg/env/env.go diff --git a/cmd/fleet/handleChecking_test.go b/cmd/fleet/handleChecking_test.go new file mode 100644 index 000000000..046ad0cee --- /dev/null +++ b/cmd/fleet/handleChecking_test.go @@ -0,0 +1,33 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fleet + +import ( + "github.com/elastic/fleet-server/v7/internal/pkg/model" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestConvertActionsEmpty(t *testing.T) { + resp, token := convertActions("1234", nil) + assert.Equal(t, resp, []ActionResp{}) + assert.Equal(t, token, "") +} + +func TestConvertActions(t *testing.T) { + actions := []model.Action{ + { + ActionId: "1234", + }, + } + resp, token := convertActions("agent-id", actions) + assert.Equal(t, resp, []ActionResp{ + { + AgentId: "agent-id", + Id: "1234", + }, + }) + assert.Equal(t, token, "") +} diff --git a/cmd/fleet/handleEnroll.go b/cmd/fleet/handleEnroll.go index 8117909be..3b154c4c2 100644 --- a/cmd/fleet/handleEnroll.go +++ b/cmd/fleet/handleEnroll.go @@ -160,7 +160,6 @@ func _enroll(ctx context.Context, bulker bulk.Bulk, c cache.Cache, req EnrollReq } now := time.Now() - nowStr := now.UTC().Format(time.RFC3339) // Generate an ID here so we can pre-create the api key and avoid a round trip u, err := uuid.NewV4() @@ -201,7 +200,7 @@ func _enroll(ctx context.Context, bulker bulk.Bulk, c cache.Cache, req EnrollReq Active: true, PolicyId: erec.PolicyId, Type: req.Type, - EnrolledAt: nowStr, + EnrolledAt: now.UTC().Format(time.RFC3339), LocalMetadata: localMeta, AccessApiKeyId: accessApiKey.Id, DefaultApiKeyId: defaultOutputApiKey.Id, @@ -347,6 +346,7 @@ func decodeEnrollRequest(data io.Reader) (*EnrollRequest, error) { // Validate switch req.Type { + // TODO: Should these be converted to constant? Need to be kept in sync with Kibana? case "EPHEMERAL", "PERMANENT", "TEMPORARY": default: return nil, ErrUnknownEnrollType diff --git a/cmd/fleet/main.go b/cmd/fleet/main.go index c08ee358c..ff7b128dc 100644 --- a/cmd/fleet/main.go +++ b/cmd/fleet/main.go @@ -21,7 +21,6 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/config" "github.com/elastic/fleet-server/v7/internal/pkg/coordinator" "github.com/elastic/fleet-server/v7/internal/pkg/dl" - "github.com/elastic/fleet-server/v7/internal/pkg/env" "github.com/elastic/fleet-server/v7/internal/pkg/logger" "github.com/elastic/fleet-server/v7/internal/pkg/monitor" "github.com/elastic/fleet-server/v7/internal/pkg/policy" @@ -43,15 +42,6 @@ const ( kAgentMode = "agent-mode" ) -func savedObjectKey() string { - key := env.GetStr( - "ES_SAVED_KEY", - "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", - ) - log.Debug().Str("key", key).Msg("saved objects") - return key -} - func installSignalHandler() context.Context { rootCtx := context.Background() return signal.HandleInterrupt(rootCtx) @@ -460,6 +450,7 @@ func loggedRunFunc(ctx context.Context, tag string, runfn runFunc) func() error func (f *FleetServer) runServer(ctx context.Context, cfg *config.Config) (err error) { + // The metricsServer is only enabled if http.enabled is set in the config metricsServer, err := f.initMetrics(ctx, cfg) switch { case err != nil: diff --git a/cmd/fleet/missing.txt b/cmd/fleet/missing.txt deleted file mode 100644 index 7215c197e..000000000 --- a/cmd/fleet/missing.txt +++ /dev/null @@ -1,6 +0,0 @@ -Missing stuff - -- handle upgrade and unenroll events -- audit logging on saved objects etc. -- runs as admin; doesn't drop creds. -- stats diff --git a/fleet-server.yml b/fleet-server.yml index c0fa2c593..f27763092 100644 --- a/fleet-server.yml +++ b/fleet-server.yml @@ -10,5 +10,22 @@ fleet: logging: level: '${LOG_LEVEL:INFO}' +# Input config provided by the Elastic Agent for the server +#inputs: +# - type: +# policy: +# server: + logging: to_stderr: true + #to_files: + #files: + #level: + +# Enables the stats endpoint under http://localhost:5601 by default. +# Additional stats can be found under http://127.0.0.1:5066/stats and http://127.0.0.1:5066/state +http.enabled: true +#http.host: http://127.0.0.1 +#http.port: 5601 +#http.named_pipe.user: +#http.named_pipe.security_descriptor: \ No newline at end of file diff --git a/go.sum b/go.sum index 35d4032ec..d98c929dc 100644 --- a/go.sum +++ b/go.sum @@ -1,4 +1,6 @@ +4d63.com/embedfiles v0.0.0-20190311033909-995e0740726f h1:oyYjGRBNq1TxAIG8aHqtxlvqUfzdZf+MbcRb/oweNfY= 4d63.com/embedfiles v0.0.0-20190311033909-995e0740726f/go.mod h1:HxEsUxoVZyRxsZML/S6e2xAuieFMlGO0756ncWx1aXE= +4d63.com/tz v1.1.1-0.20191124060701-6d37baae851b h1:+TO4EgK74+Qo/ilRDiF2WpY09Jk9VSJSLe3wEn+dJBw= 4d63.com/tz v1.1.1-0.20191124060701-6d37baae851b/go.mod h1:SHGqVdL7hd2ZaX2T9uEiOZ/OFAUfCCLURdLPJsd8ZNs= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= @@ -7,165 +9,269 @@ cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6A cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.51.0 h1:PvKAVQWCtlGUSlZkGW3QLelKaWq7KYv/MW1EboG8bfM= cloud.google.com/go v0.51.0/go.mod h1:hWtGJ6gnXH+KgDv+V0zFGDvpi07n3z8ZNj3T1RW0Gcw= +cloud.google.com/go/bigquery v1.0.1 h1:hL+ycaJpVE9M7nLoiXb/Pn10ENE2u+oddxbD8uu0ZVU= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/datastore v1.0.0 h1:Kt+gOPPp2LEPWp8CSfxhsM8ik9CcyE/gYu+0r+RnZvM= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/pubsub v1.0.1 h1:W9tAK3E57P75u0XLLR82LZyw8VpAnhmyTOxW9qzmyj8= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/storage v1.0.0 h1:VV2nUM3wwLLGh9lSABFgZMjInyUbJeaRSE64WuAIQ+4= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +code.cloudfoundry.org/go-diodes v0.0.0-20190809170250-f77fb823c7ee h1:iAAPf9s7/+BIiGf+RjgcXLm3NoZaLIJsBXJuUa63Lx8= code.cloudfoundry.org/go-diodes v0.0.0-20190809170250-f77fb823c7ee/go.mod h1:Jzi+ccHgo/V/PLQUaQ6hnZcC1c4BS790gx21LRRui4g= +code.cloudfoundry.org/go-loggregator v7.4.0+incompatible h1:KqZYloMQWM5Zg/BQKunOIA4OODh7djZbk48qqbowNFI= code.cloudfoundry.org/go-loggregator v7.4.0+incompatible/go.mod h1:KPBTRqj+y738Nhf1+g4JHFaBU8j7dedirR5ETNHvMXU= +code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f h1:UrKzEwTgeiff9vxdrfdqxibzpWjxLnuXDI5m6z3GJAk= code.cloudfoundry.org/gofileutils v0.0.0-20170111115228-4d0c80011a0f/go.mod h1:sk5LnIjB/nIEU7yP5sDQExVm62wu0pBh3yrElngUisI= +code.cloudfoundry.org/rfc5424 v0.0.0-20180905210152-236a6d29298a h1:8rqv2w8xEceNwckcF5ONeRt0qBHlh5bnNfFnYTrZbxs= code.cloudfoundry.org/rfc5424 v0.0.0-20180905210152-236a6d29298a/go.mod h1:tkZo8GtzBjySJ7USvxm4E36lNQw1D3xM6oKHGqdaAJ4= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9 h1:VpgP7xuJadIUuKccphEpTJnWhS2jkQyMt6Y7pJCD7fY= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +github.com/Azure/azure-amqp-common-go/v3 v3.0.0 h1:j9tjcwhypb/jek3raNrwlCIl7iKQYOug7CLpSyBBodc= github.com/Azure/azure-amqp-common-go/v3 v3.0.0/go.mod h1:SY08giD/XbhTz07tJdpw1SoxQXHPN30+DI3Z04SYqyg= +github.com/Azure/azure-event-hubs-go/v3 v3.1.2 h1:S/NjCZ1Z2R4rHJd2Hbbad6rIhxJ4lZZebKTsKHweX4A= github.com/Azure/azure-event-hubs-go/v3 v3.1.2/go.mod h1:hR40byNJjKkS74+3RhloPQ8sJ8zFQeJ920Uk3oYY0+k= github.com/Azure/azure-pipeline-go v0.1.8/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= github.com/Azure/azure-pipeline-go v0.1.9/go.mod h1:XA1kFWRVhSK+KNFiOhfv83Fv8L9achrP7OxIzeTn1Yg= +github.com/Azure/azure-pipeline-go v0.2.1 h1:OLBdZJ3yvOn2MezlWvbrBMTEUQC72zAftRZOMdj5HYo= github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= +github.com/Azure/azure-sdk-for-go v37.1.0+incompatible h1:aFlw3lP7ZHQi4m1kWCpcwYtczhDkGhDoRaMTaxcOf68= github.com/Azure/azure-sdk-for-go v37.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= github.com/Azure/azure-storage-blob-go v0.6.0/go.mod h1:oGfmITT1V6x//CswqY2gtAHND+xIP64/qL7a5QJix0Y= +github.com/Azure/azure-storage-blob-go v0.8.0 h1:53qhf0Oxa0nOjgbDeeYPUeyiNmafAFEY95rZLK0Tj6o= github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= +github.com/Azure/go-amqp v0.12.6 h1:34yItuwhA/nusvq2sPSNPQxZLCf/CtaogYH8n578mnY= github.com/Azure/go-amqp v0.12.6/go.mod h1:qApuH6OFTSKZFmCOxccvAv5rLizBQf4v8pRmG138DPo= +github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78 h1:w+iIsaOQNcT7OZ575w+acHgRric5iCyQh+xv+KJ4HB8= github.com/Azure/go-ansiterm v0.0.0-20170929234023-d6e3b3328b78/go.mod h1:LmzpDX56iTiv29bbRTIsUNlaFfuhWRQBWjQdVyAevI8= github.com/Azure/go-autorest/autorest v0.9.0/go.mod h1:xyHB1BMZT0cuDHU7I0+g046+BFDTQ8rEZB0s4Yfa6bI= github.com/Azure/go-autorest/autorest v0.9.3/go.mod h1:GsRuLYvwzLjjjRoWEIyMUaYq8GNUx2nRB378IPt/1p0= +github.com/Azure/go-autorest/autorest v0.9.6 h1:5YWtOnckcudzIw8lPPBcWOnmIFWMtHci1ZWAZulMSx0= github.com/Azure/go-autorest/autorest v0.9.6/go.mod h1:/FALq9T/kS7b5J5qsQ+RSTUdAmGFqi0vUdVNNx8q630= github.com/Azure/go-autorest/autorest/adal v0.5.0/go.mod h1:8Z9fGy2MpX0PvDjB1pEgQTmVqjGhiHBW7RJJEciWzS0= github.com/Azure/go-autorest/autorest/adal v0.8.0/go.mod h1:Z6vX6WXXuyieHAXwMj0S6HY6e6wcHn37qQMBQlvY3lc= github.com/Azure/go-autorest/autorest/adal v0.8.1/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/adal v0.8.2 h1:O1X4oexUxnZCaEUGsvMnr8ZGj8HI37tNezwY4npRqA0= github.com/Azure/go-autorest/autorest/adal v0.8.2/go.mod h1:ZjhuQClTqx435SRJ2iMlOxPYt3d2C/T/7TiQCVZSn3Q= +github.com/Azure/go-autorest/autorest/azure/auth v0.4.2 h1:iM6UAvjR97ZIeR93qTcwpKNMpV+/FTWjwEbuPD495Tk= github.com/Azure/go-autorest/autorest/azure/auth v0.4.2/go.mod h1:90gmfKdlmKgfjUpnCEpOJzsUEjrWDSLwHIG73tSXddM= +github.com/Azure/go-autorest/autorest/azure/cli v0.3.1 h1:LXl088ZQlP0SBppGFsRZonW6hSvwgL5gRByMbvUbx8U= github.com/Azure/go-autorest/autorest/azure/cli v0.3.1/go.mod h1:ZG5p860J94/0kI9mNJVoIoLgXcirM2gF5i2kWloofxw= github.com/Azure/go-autorest/autorest/date v0.1.0/go.mod h1:plvfp3oPSKwf2DNjlBjWF/7vwR+cUD/ELuzDCXwHUVA= +github.com/Azure/go-autorest/autorest/date v0.2.0 h1:yW+Zlqf26583pE43KhfnhFcdmSWlm5Ew6bxipnr/tbM= github.com/Azure/go-autorest/autorest/date v0.2.0/go.mod h1:vcORJHLJEh643/Ioh9+vPmf1Ij9AEBM5FuBIXLmIy0g= github.com/Azure/go-autorest/autorest/mocks v0.1.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= github.com/Azure/go-autorest/autorest/mocks v0.2.0/go.mod h1:OTyCOPRA2IgIlWxVYxBee2F5Gr4kF2zd2J5cFRaIDN0= +github.com/Azure/go-autorest/autorest/mocks v0.3.0 h1:qJumjCaCudz+OcqE9/XtEPfvtOjOmKaui4EOpFI6zZc= github.com/Azure/go-autorest/autorest/mocks v0.3.0/go.mod h1:a8FDP3DYzQ4RYfVAxAN3SVSiiO77gL2j2ronKKP0syM= +github.com/Azure/go-autorest/autorest/to v0.3.0 h1:zebkZaadz7+wIQYgC7GXaz3Wb28yKYfVkkBKwc38VF8= github.com/Azure/go-autorest/autorest/to v0.3.0/go.mod h1:MgwOyqaIuKdG4TL/2ywSsIWKAfJfgHDo8ObuUk3t5sA= +github.com/Azure/go-autorest/autorest/validation v0.2.0 h1:15vMO4y76dehZSq7pAaOLQxC6dZYsSrj2GQpflyM/L4= github.com/Azure/go-autorest/autorest/validation v0.2.0/go.mod h1:3EEqHnBxQGHXRYq3HT1WyXAvT7LLY3tl70hw6tQIbjI= +github.com/Azure/go-autorest/logger v0.1.0 h1:ruG4BSDXONFRrZZJ2GUXDiUyVpayPmb1GnWeHDdaNKY= github.com/Azure/go-autorest/logger v0.1.0/go.mod h1:oExouG+K6PryycPJfVSxi/koC6LSNgds39diKLz7Vrc= +github.com/Azure/go-autorest/tracing v0.5.0 h1:TRn4WjSnkcSy5AEG3pnbtFSwNtwzjr4VYyQflFE619k= github.com/Azure/go-autorest/tracing v0.5.0/go.mod h1:r/s2XiOKccPW3HrqB+W0TQzfbtp2fGCgRFtBroKn4Dk= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802 h1:1BDTz0u9nC3//pOCMdNH+CiXJVYJh5UQNCOBG7jbELc= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DataDog/zstd v1.4.1 h1:3oxKN3wbHibqx897utPC2LTQU4J+IHWWJO+glkAkpFM= github.com/DataDog/zstd v1.4.1/go.mod h1:1jcaCB/ufaK+sKp1NBhlGmpz41jOoPQ35bpF36t7BBo= +github.com/Masterminds/semver v1.4.2 h1:WBLTQ37jOCzSLtXNdoo8bNM8876KhNqOKvrlGITgsTc= github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5 h1:ygIc8M6trr62pF5DucadTWGdEB4mEyvzi0e2nbcmcyA= github.com/Microsoft/go-winio v0.4.15-0.20190919025122-fc70bd9a86b5/go.mod h1:tTuCMEN+UleMWgg9dVx4Hu52b1bJo+59jBh3ajtinzw= +github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46 h1:lsxEuwrXEAokXB9qhlbKWPpo3KMLZQ5WB5WLQRW1uq0= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/PuerkitoBio/purell v1.0.0 h1:0GoNN3taZV6QI81IXgCbxMyEaJDXMSIjArYBCYzVVvs= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2 h1:JCHLVE3B+kJde7bIEo5N4J+ZbLhp0J1Fs+ulyRws4gE= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/Shopify/toxiproxy v2.1.4+incompatible h1:TKdv8HiTLgE5wdJuEML90aBgNWsokNbMijUGhmcoBJc= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/StackExchange/wmi v0.0.0-20170221213301-9f32b5905fd6 h1:2Gl9Tray0NEjP9KC0FjdGWlszbmTIsBP3JYzgyFdL4E= github.com/StackExchange/wmi v0.0.0-20170221213301-9f32b5905fd6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg= +github.com/aerospike/aerospike-client-go v1.27.1-0.20170612174108-0f3b54da6bdc h1:9iW/Fbn/R/nyUOiqo6AgwBe8uirqUIoTGF3vKG8qjoc= github.com/aerospike/aerospike-client-go v1.27.1-0.20170612174108-0f3b54da6bdc/go.mod h1:zj8LBEnWBDOVEIJt8LvaRvDG5ARAoa5dBeHaB472NRc= +github.com/akavel/rsrc v0.8.0 h1:zjWn7ukO9Kc5Q62DOJCcxGpXC18RawVtYAGdz2aLlfw= github.com/akavel/rsrc v0.8.0/go.mod h1:uLoCtb9J+EyAqh+26kdrTgmzRBFPGOolLWKpdxkKq+c= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/aleksmaus/generate v0.0.0-20201213151810-c5bc68a6a42f h1:wr9LrxkE1Ai416C/mis1gEDsXrbERHGufCmf7xuYwI4= github.com/aleksmaus/generate v0.0.0-20201213151810-c5bc68a6a42f/go.mod h1:lvlu2Ij1bLmxB8RUWyw5IQ4/JcLX60eYhLiBmvImnhk= +github.com/andrewkroh/goja v0.0.0-20190128172624-dd2ac4456e20 h1:7rj9qZ63knnVo2ZeepYHvHuRdG76f3tRUTdIQDzRBeI= github.com/andrewkroh/goja v0.0.0-20190128172624-dd2ac4456e20/go.mod h1:cI59GRkC2FRaFYtgbYEqMlgnnfvAwXzjojyZKXwklNg= +github.com/andrewkroh/sys v0.0.0-20151128191922-287798fe3e43 h1:WFwa9pqou0Nb4DdfBOyaBTH0GqLE74Qwdf61E7ITHwQ= github.com/andrewkroh/sys v0.0.0-20151128191922-287798fe3e43/go.mod h1:tJPYQG4mnMeUtQvQKNkbsFrnmZOg59Qnf8CcctFv5v4= +github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6 h1:uZuxRZCz65cG1o6K/xUqImNcYKtmk9ylqaH0itMSvzA= github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= +github.com/antlr/antlr4 v0.0.0-20200820155224-be881fa6b91d h1:OE3kzLBpy7pOJEzE55j9sdgrSilUPzzj++FWvp1cmIs= github.com/antlr/antlr4 v0.0.0-20200820155224-be881fa6b91d/go.mod h1:T7PbCXFs94rrTttyxjbyT5+/1V8T2TYDejxUfHJjw1Y= +github.com/apoydence/eachers v0.0.0-20181020210610-23942921fe77 h1:afT88tB6u9JCKQZVAAaa9ICz/uGn5Uw9ekn6P22mYKM= github.com/apoydence/eachers v0.0.0-20181020210610-23942921fe77/go.mod h1:bXvGk6IkT1Agy7qzJ+DjIw/SJ1AaB3AvAuMDVV+Vkoo= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6 h1:G1bPvciwNyF7IUmKXNt9Ak3m6u9DE1rF+RmtIkBpVdA= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-radix v1.0.0 h1:F4z6KzEeeQIMeLFa97iZU6vupzoecKdU5TX24SNppXI= github.com/armon/go-radix v1.0.0/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= +github.com/aws/aws-lambda-go v1.6.0 h1:T+u/g79zPKw1oJM7xYhvpq7i4Sjc0iVsXZUaqRVVSOg= github.com/aws/aws-lambda-go v1.6.0/go.mod h1:zUsUQhAUjYzR8AuduJPCfhBuKWUaDbQiPOG+ouzmE1A= +github.com/aws/aws-sdk-go-v2 v0.9.0 h1:dWtJKGRFv3UZkMBQaIzMsF0/y4ge3iQPWTzeC4r/vl4= github.com/aws/aws-sdk-go-v2 v0.9.0/go.mod h1:sa1GePZ/LfBGI4dSq30f6uR4Tthll8axxtEPvlpXZ8U= +github.com/awslabs/goformation/v3 v3.1.0 h1:1WhWJrMtuwphJ+x1+0wM7v4QPDzcArvX+i4/sK1Z4e4= github.com/awslabs/goformation/v3 v3.1.0/go.mod h1:hQ5RXo3GNm2laHWKizDzU5DsDy+yNcenSca2UxN0850= +github.com/awslabs/goformation/v4 v4.1.0 h1:JRxIW0IjhYpYDrIZOTJGMu2azXKI+OK5dP56ubpywGU= github.com/awslabs/goformation/v4 v4.1.0/go.mod h1:MBDN7u1lMNDoehbFuO4uPvgwPeolTMA2TzX1yO6KlxI= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blakesmith/ar v0.0.0-20150311145944-8bd4349a67f2 h1:oMCHnXa6CCCafdPDbMh/lWRhRByN0VFLvv+g+ayx1SI= github.com/blakesmith/ar v0.0.0-20150311145944-8bd4349a67f2/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI= +github.com/bradleyfalzon/ghinstallation v1.1.0 h1:mwazVinJU0mPyLxIcdtJzu4DhWXFO5lMsWhKyFRIwFk= github.com/bradleyfalzon/ghinstallation v1.1.0/go.mod h1:p7iD8KytOOKg2wCqbwvJlq4JGpYMjwjkiqdyUqOIHLI= +github.com/bsm/sarama-cluster v2.1.14-0.20180625083203-7e67d87a6b3f+incompatible h1:4g18+HnTDwEtO0n7K8B1Kjq+04MEKJRkhJNQ/hb9d5A= github.com/bsm/sarama-cluster v2.1.14-0.20180625083203-7e67d87a6b3f+incompatible/go.mod h1:r7ao+4tTNXvWm+VRpRJchr2kQhqxgmAp2iEX5W96gMM= +github.com/cavaliercoder/badio v0.0.0-20160213150051-ce5280129e9e h1:YYUjy5BRwO5zPtfk+aa2gw255FIIoi93zMmuy19o0bc= github.com/cavaliercoder/badio v0.0.0-20160213150051-ce5280129e9e/go.mod h1:V284PjgVwSk4ETmz84rpu9ehpGg7swlIH8npP9k2bGw= +github.com/cavaliercoder/go-rpm v0.0.0-20190131055624-7a9c54e3d83e h1:Gbx+iVCXG/1m5WSnidDGuHgN+vbIwl+6fR092ANU+Y8= github.com/cavaliercoder/go-rpm v0.0.0-20190131055624-7a9c54e3d83e/go.mod h1:AZIh1CCnMrcVm6afFf96PBvE2MRpWFco91z8ObJtgDY= +github.com/census-instrumentation/opencensus-proto v0.2.1 h1:glEXhBS5PSLLv4IXzLA5yPRVX4bilULVyxxbrfOtDAk= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= +github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+qY= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e h1:fY5BOSpyZCqRo5OhCuC+XN+r/bBCmeuuJtjz+bCNIf8= github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4 h1:ta993UF76GwbvJcIo3Y68y/M3WxlpEHPWIGDkJYwzJI= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudfoundry-community/go-cfclient v0.0.0-20190808214049-35bcce23fc5f h1:fK3ikA1s77arBhpDwFuyO0hUZ2Aa8O6o2Uzy8Q6iLbs= github.com/cloudfoundry-community/go-cfclient v0.0.0-20190808214049-35bcce23fc5f/go.mod h1:RtIewdO+K/czvxvIFCMbPyx7jdxSLL1RZ+DA/Vk8Lwg= +github.com/cloudfoundry/noaa v2.1.0+incompatible h1:hr6VnM5VlYRN3YD+NmAedQLW8686sUMknOSe0mFS2vo= github.com/cloudfoundry/noaa v2.1.0+incompatible/go.mod h1:5LmacnptvxzrTvMfL9+EJhgkUfIgcwI61BVSTh47ECo= +github.com/cloudfoundry/sonde-go v0.0.0-20171206171820-b33733203bb4 h1:cWfya7mo/zbnwYVio6eWGsFJHqYw4/k/uhwIJ1eqRPI= github.com/cloudfoundry/sonde-go v0.0.0-20171206171820-b33733203bb4/go.mod h1:GS0pCHd7onIsewbw8Ue9qa9pZPv2V88cUZDttK6KzgI= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f h1:WBZRG4aNOuI15bLRrCgN8fCq8E5Xuty6jGbmSNEvSsU= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0 h1:sDMmm+q/3+BukdIpxwO365v/Rbspp2Nt5XntgQRXq8Q= github.com/codegangsta/inject v0.0.0-20150114235600-33e0aa1cb7c0/go.mod h1:4Zcjuz89kmFXt9morQgcfYZAYZ5n8WHjt81YYWIwtTM= +github.com/containerd/fifo v0.0.0-20190816180239-bda0ff6ed73c h1:KFbqHhDeaHM7IfFtXHfUHMDaUStpM2YwBR+iJCIOsKk= github.com/containerd/fifo v0.0.0-20190816180239-bda0ff6ed73c/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI= +github.com/coreos/etcd v3.3.10+incompatible h1:jFneRYjIvLMLhDLCzuTuU4rSJUjRplcJQ7pD7MnhC04= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible h1:bXhRBIXoTm9BYHS3gE0TtQuyNZyeEMux2sDi4oo5YOo= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0 h1:3Jm3tLmsgAYcjC+4Up7hJrFBPr+n7rAqYeSw/SZazuY= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e h1:Wf6HqHfScWJN9/ZjdUKyjop4mf3Qdd+1TvvltAvM3m8= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd/v22 v22.0.0 h1:XJIw/+VlJ+87J+doOxznsAWIdmWuViOVhkQamW5YV28= github.com/coreos/go-systemd/v22 v22.0.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= +github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea h1:n2Ltr3SrfQlf/9nOna1DoGKxLx3qTSI8Ttl6Xrqp6mw= github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= +github.com/cpuguy83/go-md2man v1.0.10 h1:BSKMNlYxDvnunlTymqtgONjNnaRV1sTpcovwwjF22jk= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cucumber/godog v0.8.1 h1:lVb+X41I4YDreE+ibZ50bdXmySxgRviYFgKY6Aw4XE8= github.com/cucumber/godog v0.8.1/go.mod h1:vSh3r/lM+psC1BPXvdkSEuNjmXfpVqrMGYAElF6hxnA= github.com/cyphar/filepath-securejoin v0.2.2 h1:jCwT2GTP+PY5nBz3c/YL5PAIbusElVrPujOBSCj8xRg= github.com/cyphar/filepath-securejoin v0.2.2/go.mod h1:FpkQEhXnPnOthhzymB7CGsFk2G9VLXONKD9G7QGMM+4= -github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-xdr v0.0.0-20161123171359-e6a2ba005892 h1:qg9VbHo1TlL0KDM0vYvBG9EY0X0Yku5WYIPoFWt8f6o= github.com/davecgh/go-xdr v0.0.0-20161123171359-e6a2ba005892/go.mod h1:CTDl0pzVzE5DEzZhPfvhY/9sPFMQIxaJ9VAMs9AagrE= +github.com/denisenkom/go-mssqldb v0.0.0-20200206145737-bbfc9a55622e h1:LzwWXEScfcTu7vUZNlDDWDARoSGEtvlDKK2BYHowNeE= github.com/denisenkom/go-mssqldb v0.0.0-20200206145737-bbfc9a55622e/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= +github.com/devigned/tab v0.1.2-0.20190607222403-0c15cf42f9a2 h1:6+hM8KeYKV0Z9EIINNqIEDyyIRAcNc2FW+/TUYNmWyw= github.com/devigned/tab v0.1.2-0.20190607222403-0c15cf42f9a2/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= +github.com/dgraph-io/badger/v2 v2.2007.3-0.20201012072640-f5a7e0a1c83b h1:mUDs72Rlzv6A4YN8w3Ra3hU9x/plOQPcQjZYL/1f5SM= github.com/dgraph-io/badger/v2 v2.2007.3-0.20201012072640-f5a7e0a1c83b/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgraph-io/ristretto v0.0.3 h1:jh22xisGBjrEVnRZ1DVTpBVQm0Xndu8sMl0CWDzSIBI= github.com/dgraph-io/ristretto v0.0.3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/dgrijalva/jwt-go v3.2.1-0.20190620180102-5e25c22bd5d6+incompatible h1:4jGdduO4ceTJFKf0IhgaB8NJapGqKHwC2b4xQ/cXujM= github.com/dgrijalva/jwt-go v3.2.1-0.20190620180102-5e25c22bd5d6+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2 h1:tdlZCpZ/P9DhczCTSixgIKmwPv6+wP5DGjqLYw5SUiA= github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= +github.com/digitalocean/go-libvirt v0.0.0-20180301200012-6075ea3c39a1 h1:eG5K5GNAAHvQlFmfIuy0Ocjg5dvyX22g/KknwTpmBko= github.com/digitalocean/go-libvirt v0.0.0-20180301200012-6075ea3c39a1/go.mod h1:PRcPVAAma6zcLpFd4GZrjR/MRpood3TamjKI2m/z/Uw= +github.com/dimchansky/utfbom v1.1.0 h1:FcM3g+nofKgUteL8dm/UpdRXNC9KmADgTpLKsu0TRo4= github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= +github.com/dlclark/regexp2 v1.1.7-0.20171009020623-7632a260cbaf h1:uOWCk+L8abzw0BzmnCn7j7VT3g6bv9zW8fkR0yOP0Q4= github.com/dlclark/regexp2 v1.1.7-0.20171009020623-7632a260cbaf/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= github.com/docker/docker v1.4.2-0.20170802015333-8af4db6f002a h1:pNE/kl/UUSqAi7IiyPjnaIbYBRaEORJY8/RCK9Tx39c= github.com/docker/docker v1.4.2-0.20170802015333-8af4db6f002a/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ= github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec= +github.com/docker/go-metrics v0.0.1 h1:AgB/0SvBxihN0X8OR4SjsblXkbMvalQ8cjmtKQ2rQV8= github.com/docker/go-metrics v0.0.1/go.mod h1:cG1hvH2utMXtqgqqYE9plW6lDxS3/5ayHzueweSI3Vw= +github.com/docker/go-plugins-helpers v0.0.0-20181025120712-1e6269c305b8 h1:9Hsno4vmXpQ0yVAp07bLxS5dHH24w80xzmUCLil47ME= github.com/docker/go-plugins-helpers v0.0.0-20181025120712-1e6269c305b8/go.mod h1:LFyLie6XcDbyKGeVK6bHe+9aJTYCxWLBg5IrJZOaXKA= +github.com/docker/go-units v0.4.0 h1:3uh0PgVws3nIA0Q+MwDC8yjEPf9zjRfZZWXZYDct3Tw= github.com/docker/go-units v0.4.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96 h1:cenwrSVm+Z7QLSV/BsnenAOcDXdX4cMv4wP0B/5QbPg= github.com/docker/spdystream v0.0.0-20160310174837-449fdfce4d96/go.mod h1:Qh8CwZgvJUkLughtfhJv5dyTYa91l1fOUCrgjqmcifM= +github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815 h1:bWDMxwH3px2JBh6AyO7hdCn/PkvCZXii8TGj7sbtEbQ= github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3ebgob9U8Nd0kOddGdZWjyMGR8Wziv+TBNwSE= +github.com/dop251/goja_nodejs v0.0.0-20171011081505-adff31b136e6 h1:RrkoB0pT3gnjXhL/t10BSP1mcr/0Ldea2uMyuBr2SWk= github.com/dop251/goja_nodejs v0.0.0-20171011081505-adff31b136e6/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y= +github.com/dustin/go-humanize v1.0.0 h1:VSnTsYCnlFHaM2/igO1h6X3HA71jcobQuxemgkq4zYo= github.com/dustin/go-humanize v1.0.0/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= +github.com/eapache/go-resiliency v1.2.0 h1:v7g92e/KSN71Rq7vSThKaWIq68fL4YHvWyiUKorFR1Q= github.com/eapache/go-resiliency v1.2.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21 h1:YEetp8/yCZMuEPMUDHG0CW/brkkEp8mzqk2+ODEitlw= github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0 h1:YOEu7KNc61ntiQlcEeUIoDTJ2o8mQznoNvUhiigpIqc= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/eclipse/paho.mqtt.golang v1.2.1-0.20200121105743-0d940dd29fd2 h1:DW6WrARxK5J+o8uAKCiACi5wy9EK1UzrsCpGBPsKHAA= github.com/eclipse/paho.mqtt.golang v1.2.1-0.20200121105743-0d940dd29fd2/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts= github.com/elastic/beats/v7 v7.11.1 h1:eYJRKc/mA6rhQNujUV9lUADQ0S9SZvI5d782BnNvgFY= github.com/elastic/beats/v7 v7.11.1/go.mod h1:2gJ+JvWjTYuMA37chVSfsolz7Z2ca+gL39HpmSLO+z8= +github.com/elastic/ecs v1.6.0 h1:8NmgfnsjmKXh9hVsK3H2tZtfUptepNc3msJOAynhtmc= github.com/elastic/ecs v1.6.0/go.mod h1:pgiLbQsijLOJvFR8OTILLu0Ni/R/foUNg0L+T6mU9b4= github.com/elastic/elastic-agent-client/v7 v7.0.0-20200709172729-d43b7ad5833a h1:2NHgf1RUw+f240lpTnLrCp1aBNvq2wDi0E1A423/S1k= github.com/elastic/elastic-agent-client/v7 v7.0.0-20200709172729-d43b7ad5833a/go.mod h1:uh/Gj9a0XEbYoM4NYz4LvaBVARz3QXLmlNjsrKY9fTc= +github.com/elastic/fsevents v0.0.0-20181029231046-e1d381a4d270 h1:cWPqxlPtir4RoQVCpGSRXmLqjEHpJKbR60rxh1nQZY4= github.com/elastic/fsevents v0.0.0-20181029231046-e1d381a4d270/go.mod h1:Msl1pdboCbArMF/nSCDUXgQuWTeoMmE/z8607X+k7ng= +github.com/elastic/go-concert v0.0.4 h1:pzgYCmJ/xMJsW8PSk33inAWZ065hrwSeP79TpwAbsLE= github.com/elastic/go-concert v0.0.4/go.mod h1:9MtFarjXroUgmm0m6HY3NSe1XiKhdktiNRRj9hWvIaM= github.com/elastic/go-elasticsearch/v8 v8.0.0-20200728144331-527225d8e836 h1:0ZrGQPGY7QCySD/14ht2UDggGKmqgLouMd5FFimcguA= github.com/elastic/go-elasticsearch/v8 v8.0.0-20200728144331-527225d8e836/go.mod h1:xe9a/L2aeOgFKKgrO3ibQTnMdpAeL0GC+5/HpGScSa4= +github.com/elastic/go-libaudit/v2 v2.1.0 h1:yWSKoGaoWLGFPjqWrQ4gwtuM77pTk7K4CsPxXss8he4= github.com/elastic/go-libaudit/v2 v2.1.0/go.mod h1:MM/l/4xV7ilcl+cIblL8Zn448J7RZaDwgNLE4gNKYPg= github.com/elastic/go-licenser v0.3.1 h1:RmRukU/JUmts+rpexAw0Fvt2ly7VVu6mw8z4HrEzObU= github.com/elastic/go-licenser v0.3.1/go.mod h1:D8eNQk70FOCVBl3smCGQt/lv7meBeQno2eI1S5apiHQ= +github.com/elastic/go-lookslike v0.3.0 h1:HDI/DQ65V85ZqM7D/sbxcK2wFFnh3+7iFvBk2v2FTHs= github.com/elastic/go-lookslike v0.3.0/go.mod h1:AhH+rdJux5RlVjs+6ej4jkvYyoNRkj2crxmqeHlj3hA= +github.com/elastic/go-lumber v0.1.0 h1:HUjpyg36v2HoKtXlEC53EJ3zDFiDRn65d7B8dBHNius= github.com/elastic/go-lumber v0.1.0/go.mod h1:8YvjMIRYypWuPvpxx7WoijBYdbB7XIh/9FqSYQZTtxQ= +github.com/elastic/go-perf v0.0.0-20191212140718-9c656876f595 h1:q8n4QjcLa4q39Q3fqHRknTBXBtegjriHFrB42YKgXGI= github.com/elastic/go-perf v0.0.0-20191212140718-9c656876f595/go.mod h1:s09U1b4P1ZxnKx2OsqY7KlHdCesqZWIhyq0Gs/QC/Us= +github.com/elastic/go-seccomp-bpf v1.1.0 h1:jUzzDc6LyCtdolZdvL/26dad6rZ9vsc7xZ2eadKECAU= github.com/elastic/go-seccomp-bpf v1.1.0/go.mod h1:l+89Vy5BzjVcaX8USZRMOwmwwDScE+vxCFzzvQwN7T8= +github.com/elastic/go-structform v0.0.7 h1:ihszOJQryNuIIHE2ZgsbiDq+agKO6V4yK0JYAI3tjzc= github.com/elastic/go-structform v0.0.7/go.mod h1:QrMyP3oM9Sjk92EVGLgRaL2lKt0Qx7ZNDRWDxB6khVs= github.com/elastic/go-sysinfo v1.1.1/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= github.com/elastic/go-sysinfo v1.3.0 h1:eb2XFGTMlSwG/yyU9Y8jVAYLIzU2sFzWXwo2gmetyrE= github.com/elastic/go-sysinfo v1.3.0/go.mod h1:i1ZYdU10oLNfRzq4vq62BEwD2fH8KaWh6eh0ikPT9F0= +github.com/elastic/go-txfile v0.0.7 h1:Yn28gclW7X0Qy09nSMSsx0uOAvAGMsp6XHydbiLVe2s= github.com/elastic/go-txfile v0.0.7/go.mod h1:H0nCoFae0a4ga57apgxFsgmRjevNCsEaT6g56JoeKAE= github.com/elastic/go-ucfg v0.7.0/go.mod h1:iaiY0NBIYeasNgycLyTvhJftQlQEUO2hpF+FX0JKxzo= github.com/elastic/go-ucfg v0.8.3 h1:leywnFjzr2QneZZWhE6uWd+QN/UpP0sdJRHYyuFvkeo= @@ -175,58 +281,92 @@ github.com/elastic/go-windows v1.0.1 h1:AlYZOldA+UJ0/2nBuqWdo90GFCgG9xuyw9SYzGUt github.com/elastic/go-windows v1.0.1/go.mod h1:FoVvqWSun28vaDQPbj2Elfc0JahhPB7WQEGa3c814Ss= github.com/elastic/gosigar v0.13.0 h1:EIeuQcLPKia759s6mlVztlxUyKiKYHo6y6kOODOLO7A= github.com/elastic/gosigar v0.13.0/go.mod h1:iXRIGg2tLnu7LBdpqzyQfGDEidKCfWcCMS0WKyPWoMs= +github.com/elastic/sarama v1.19.1-0.20200629123429-0e7b69039eec h1:rAHd7DeHIHjSzvnkl197GKh9TCWGKg/z2BBbbGOEiWI= github.com/elastic/sarama v1.19.1-0.20200629123429-0e7b69039eec/go.mod h1:X690XXMxlbtN8c7xcpsENKNlbj8VClCZ2hwSOhSyNmE= +github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153 h1:yUdfgN0XgIJw7foRItutHYUIhlcKzcSf5vDpdhQAKTc= github.com/elazarl/goproxy v0.0.0-20180725130230-947c36da3153/go.mod h1:/Zj4wYkgs4iZTTu3o/KG3Itv/qCCa8VVMlb3i9OVuzc= +github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633 h1:H2pdYOb3KQ1/YsqVWoWNLQO+fusocsw354rqGTZtAgw= github.com/emicklei/go-restful v0.0.0-20170410110728-ff4f55a20633/go.mod h1:otzb+WCGbkyDHkqmQmT5YD2WR4BBwUdeQoFo8l/7tVs= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4 h1:rEvIZUSZ3fx39WIi3JkQqQBitGwpELBIYWeBVh6wn+E= github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0 h1:EQciDnbrYxy13PgWoY8AqoxGiPrpgBZ1R8UNe3ddc+A= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/evanphx/json-patch v4.9.0+incompatible h1:kLcOMZeuLAJvL2BPWLMIj5oaZQobrkAqrL+WFZwQses= github.com/evanphx/json-patch v4.9.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.5.0 h1:vBh+kQp8lg9XPr56u1CPrWjFXtdphMoGWVHr9/1c+A0= github.com/fatih/color v1.5.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/fortytw2/leaktest v1.3.0 h1:u8491cBMTQ8ft8aeV+adlcytMZylmA5nnwwkRZjI8vw= github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= +github.com/frankban/quicktest v1.7.2 h1:2QxQoC1TS09S7fhCPsrvqYdvP1H5M1P1ih5ABm3BTYk= github.com/frankban/quicktest v1.7.2/go.mod h1:jaStnuzAqU1AJdCO0l53JDCJrVDKcS03DbaAcR7Ks/o= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/garyburd/redigo v1.0.1-0.20160525165706-b8dc90050f24 h1:nREVDi4H8mwnNqfxFU9NMzZrDCg8TXbEatMvHozxKwU= github.com/garyburd/redigo v1.0.1-0.20160525165706-b8dc90050f24/go.mod h1:NR3MbYisc3/PwhQ00EMzDiPmrwpPxAn5GI05/YaO1SY= github.com/ghodss/yaml v0.0.0-20150909031657-73d445a93680/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72 h1:b+9H1GAsx5RsjvDFLoS5zkNBzIQMuVKUYQDmxU3N5XE= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0 h1:wDJmvq38kDhkVxi50ni9ykkdUr1PKgqKOoi01fa0Mdk= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0 h1:MP4Eh7ZCb31lleYCFuwm0oe4/YGak+5l1vA2NOE80nA= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logr/logr v0.1.0/go.mod h1:ixOQHD9gLJUVQQ2ZOR7zLEifBX6tGkNJF4QyIY7sIas= +github.com/go-logr/logr v0.2.0 h1:QvGt2nLcHH0WK9orKa+ppBPAxREcH364nPUedEpK0TY= github.com/go-logr/logr v0.2.0/go.mod h1:z6/tIYblkpsD+a4lm/fGIIU9mZ+XfAiaFtq7xTgseGU= +github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab h1:xveKWz2iaueeTaUgdetzel+U7exyigDYBryyVfV/rZk= github.com/go-martini/martini v0.0.0-20170121215854-22fa46961aab/go.mod h1:/P9AEU963A2AYjv4d1V5eVL1CQbEJq6aCNHDDjibzu8= +github.com/go-ole/go-ole v1.2.5-0.20190920104607-14974a1cf647 h1:whypLownH338a3Ork2w9t0KUKtVxbXYySuz7V1YGsJo= github.com/go-ole/go-ole v1.2.5-0.20190920104607-14974a1cf647/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1 h1:wSt/4CYxs70xbATrGXhokKF1i0tZjENLOo1ioIO13zk= github.com/go-openapi/jsonpointer v0.0.0-20160704185906-46af16f9f7b1/go.mod h1:+35s3my2LFTysnkMfxsJBAMHj/DoqoB9knIWoYG/Vk0= +github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9 h1:tF+augKRWlWx0J0B7ZyyKSiTyV6E1zZe+7b3qQlcEf8= github.com/go-openapi/jsonreference v0.0.0-20160704190145-13c6e3589ad9/go.mod h1:W3Z9FmVs9qj+KR4zFKmDPGiLdk1D9Rlm7cyMvf57TTg= +github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501 h1:C1JKChikHGpXwT5UQDFaryIpDtyyGL/CR6C2kB7F1oc= github.com/go-openapi/spec v0.0.0-20160808142527-6aced65f8501/go.mod h1:J8+jY1nAiCcj+friV/PDoE1/3eeccG9LYBs0tYvLOWc= +github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87 h1:zP3nY8Tk2E6RTkqGYrarZXuzh+ffyLDljLxCy1iJw80= github.com/go-openapi/swag v0.0.0-20160704191624-1d0bd113de87/go.mod h1:DXUve3Dpr1UfpPtxFw+EFuQ41HhCWZfha5jSVRG7C7I= +github.com/go-sourcemap/sourcemap v2.1.2+incompatible h1:0b/xya7BKGhXuqFESKM4oIiRo9WOt2ebz7KxfreD6ug= github.com/go-sourcemap/sourcemap v2.1.2+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.4.1 h1:g24URVg0OFbNUTx9qqY1IRZ9D9z3iPyi5zKhQZpNwpA= github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/gobuffalo/here v0.6.0 h1:hYrd0a6gDmWxBM4TnrGw8mQg24iSVoIkHEk7FodQcBI= github.com/gobuffalo/here v0.6.0/go.mod h1:wAG085dHOYqUpf+Ap+WOdrPTp5IYcDAs/x7PLa8Y5fM= +github.com/gocarina/gocsv v0.0.0-20170324095351-ffef3ffc77be h1:zXHeEEJ231bTf/IXqvCfeaqjLpXsq42ybLoT4ROSR6Y= github.com/gocarina/gocsv v0.0.0-20170324095351-ffef3ffc77be/go.mod h1:/oj50ZdPq/cUjA02lMZhijk5kR31SEydKyqah1OgBuo= +github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e h1:BWhy2j3IXJhjCbC68FptL43tDKIq8FladmaTs3Xs7Z8= github.com/godbus/dbus v0.0.0-20190422162347-ade71ed3457e/go.mod h1:bBOAhwG1umN6/6ZUMtDFBMQR8jRg9O75tm9K00oMsK4= +github.com/godbus/dbus/v5 v5.0.3 h1:ZqHaoEF7TBzh4jzPmqVhE/5A1z9of6orkAe5uHoAeME= github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/godror/godror v0.10.4 h1:44FcfzDPp/PJZzen5Hm59SZQBhgrbR6E1KwCjg6gnJo= github.com/godror/godror v0.10.4/go.mod h1:9MVLtu25FBJBMHkPs0m3Ngf/VmwGcLpM2HS8PlNGw9U= github.com/gofrs/flock v0.7.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= +github.com/gofrs/flock v0.7.2-0.20190320160742-5135e617513b h1:3QNh5Xo2pmr2nZXENtnztfpjej8XY8EPmvYxF5SzY9M= github.com/gofrs/flock v0.7.2-0.20190320160742-5135e617513b/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/uuid v3.3.0+incompatible h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6rR7UHz84= github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe h1:lXe2qZdvpiX5WZkZR4hgp4KJVfY3nMkvmwbVkpv1rVY= github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s= github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -240,163 +380,247 @@ github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvq github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.2 h1:+Z5KGCizgyZCbGh1KZqA0fcLLkwbsjIzS4aV2v7wJX0= github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/flatbuffers v1.7.2-0.20170925184458-7a6b2bf521e9 h1:b4EyQBj8pgtcWOr7YCSxK6NUQzJr0n4hxJ3mc+dtKk4= github.com/google/flatbuffers v1.7.2-0.20170925184458-7a6b2bf521e9/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0 h1:xsAVV57WRhGj6kEIi8ReJzQlHHqcBYCElAvkovg3B/4= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-github/v28 v28.1.1 h1:kORf5ekX5qwXO2mGzXXOjMe/g6ap8ahVe0sBEulhSxo= github.com/google/go-github/v28 v28.1.1/go.mod h1:bsqJWQX05omyWVmc00nEUql9mhQyv38lDZ8kPZcQVoM= +github.com/google/go-github/v29 v29.0.2 h1:opYN6Wc7DOz7Ku3Oh4l7prmkOMwEcQxpFtxdU8N8Pts= github.com/google/go-github/v29 v29.0.2/go.mod h1:CHKiKKPHJ0REzfwc14QMklvtHwCveD0PxlMjLlzAM5E= +github.com/google/go-querystring v1.0.0 h1:Xkwi/a1rcvNg1PPYe5vI8GbeBY/jrVuDX5ASuANWTrk= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gofuzz v1.1.0 h1:Hsa8mG0dQ46ij8Sl2AYJDUv1oA9/d6Vk+3LG99Oe02g= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/gopacket v1.1.18-0.20191009163724-0ad7f2610e34 h1:/wV+gZsAEt7vP+fJkT1AltOejfLS3uonB4RTOdXWjVk= github.com/google/gopacket v1.1.18-0.20191009163724-0ad7f2610e34/go.mod h1:UdDNZ1OO62aGYVnPhxT1U6aI7ukYtA/kB8vaU0diBUM= github.com/google/licenseclassifier v0.0.0-20200402202327-879cb1424de0 h1:OggOMmdI0JLwg1FkOKH9S7fVHF0oEm8PX6S8kAdpOps= github.com/google/licenseclassifier v0.0.0-20200402202327-879cb1424de0/go.mod h1:qsqn2hxC+vURpyBRygGUuinTO42MFRLcsmQ/P8v94+M= +github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc h1:DLpL8pWq0v4JYoRpEhDfsJhhJyGKCcQM2WPW2TJs31c= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/renameio v0.1.0 h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/subcommands v1.0.1 h1:/eqq+otEXm5vhfBrbREPCSVQbvofip6kIz+mX5TUH7k= github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2-0.20190416172445-c2e93f3ae59f h1:XXzyYlFbxK3kWfcmu3Wc+Tv8/QQl/VqwsWuSYF1Rj0s= github.com/google/uuid v1.1.2-0.20190416172445-c2e93f3ae59f/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyycI+I= github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg= +github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= +github.com/gorhill/cronexpr v0.0.0-20161205141322-d520615e531a h1:yNuTIQkXLNAevCwQJ7ur3ZPoZPhbvAi6QXhJ/ylX6+8= github.com/gorhill/cronexpr v0.0.0-20161205141322-d520615e531a/go.mod h1:g2644b03hfBX9Ov0ZBDgXXens4rxSxmqFBbhvKv2yVA= +github.com/gorilla/mux v1.7.2 h1:zoNxOV7WjqXptQOVngLmcSQgXmgk4NMz1HibBchjl/I= github.com/gorilla/mux v1.7.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM= github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7 h1:pdN6V1QBWetyv/0+wjACpqVH+eVULgEjkurDLq3goeM= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/grpc-ecosystem/grpc-gateway v1.13.0 h1:sBDQoHXrOlfPobnKw69FIKa1wg9qsLLvvQ/Y19WtFgI= github.com/grpc-ecosystem/grpc-gateway v1.13.0/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= +github.com/h2non/filetype v1.1.1-0.20201130172452-f60988ab73d5 h1:xI88renBpIJws9OfEQq4Dng10OppnY5u9bTok/GDFEI= github.com/h2non/filetype v1.1.1-0.20201130172452-f60988ab73d5/go.mod h1:319b3zT68BvV+WRj7cwy856M2ehB3HqNOt6sy1HndBY= github.com/hashicorp/errwrap v1.0.0 h1:hLrqtEDnRye3+sgx6z4qVLNuviH3MR5aQ0ykNJa/UYA= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1 h1:dH3aiDG9Jvb5r5+bYHsikaOUIpcM0xvgMXVoDkXMzJM= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/hashicorp/go-hclog v0.9.2 h1:CG6TE5H9/JXsFWJCfoIVpKFIkFe6ysEuHirp4DxCsHI= github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-multierror v1.1.0 h1:B9UzwGQJehnUY1yNrnwREHc3fGbC2xefo8g4TbElacI= github.com/hashicorp/go-multierror v1.1.0/go.mod h1:spPvp8C1qA32ftKqdAHm4hHTbPw+vmowP0z+KUhOZdA= +github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP3V9oNE4hmsM= github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= +github.com/hashicorp/go-version v1.0.0 h1:21MVWPKDphxa7ineQQTrCU5brh7OuVVAzGOCnnCPtE8= github.com/hashicorp/go-version v1.0.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.2-0.20190520140433-59383c442f7d h1:Ft6PtvobE9vwkCsuoNO5DZDbhKkKuktAlSsiOi1X5NA= github.com/hashicorp/golang-lru v0.5.2-0.20190520140433-59383c442f7d/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/haya14busa/go-actions-toolkit v0.0.0-20200105081403-ca0307860f01 h1:HiJF8Mek+I7PY0Bm+SuhkwaAZSZP83sw6rrTMrgZ0io= github.com/haya14busa/go-actions-toolkit v0.0.0-20200105081403-ca0307860f01/go.mod h1:1DWDZmeYf0LX30zscWb7K9rUMeirNeBMd5Dum+seUhc= +github.com/haya14busa/go-checkstyle v0.0.0-20170303121022-5e9d09f51fa1 h1:biVg9rs1Vl8LAwrkjlssTaEn2csIl3LKoQVEJrWGmJ8= github.com/haya14busa/go-checkstyle v0.0.0-20170303121022-5e9d09f51fa1/go.mod h1:RsN5RGgVYeXpcXNtWyztD5VIe7VNSEqpJvF2iEH7QvI= +github.com/haya14busa/secretbox v0.0.0-20180525171038-07c7ecf409f5 h1:ylgozezbuxA/i4uFtWCG/qGKYOZydsS8VUNNwfugn2Q= github.com/haya14busa/secretbox v0.0.0-20180525171038-07c7ecf409f5/go.mod h1:FGO/dXIFZnan7KvvUSFk1hYMnoVNzB6NTMPrmke8SSI= +github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95 h1:S4qyfL2sEm5Budr4KVMyEniCy+PbS55651I/a+Kn/NQ= github.com/hectane/go-acl v0.0.0-20190604041725-da78bae5fc95/go.mod h1:QiyDdbZLaJ/mZP4Zwc9g2QsfaEA4o7XvvgZegSci5/E= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6 h1:UDMh68UUwekSh5iP2OMhRRZJiiBccgV7axzUG8vi56c= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/imdario/mergo v0.3.5/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/insomniacslk/dhcp v0.0.0-20180716145214-633285ba52b2 h1:uJiWD+lXJ+WJ9kldTB6F4T4V+oGIhd0I1ktTXk3P6Ks= github.com/insomniacslk/dhcp v0.0.0-20180716145214-633285ba52b2/go.mod h1:CfMdguCK66I5DAUJgGKyNz8aB6vO5dZzkm9Xep6WGvw= github.com/jcmturner/gofork v1.0.0 h1:J7uCkflzTEhUZ64xqKnkDxq3kzc96ajM1Gli5ktUem8= github.com/jcmturner/gofork v1.0.0/go.mod h1:MK8+TM0La+2rjBD4jE12Kj1pCCxK7d2LK/UM3ncEo0o= +github.com/jessevdk/go-flags v1.4.0 h1:4IU2WS7AumrZ/40jfhf4QVDMsQwqA7VEHozFRrGARJA= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af h1:pmfjZENx5imkbgOkpRUYLnmbU7UEFbjtDA2hxJ1ichM= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= +github.com/jmoiron/sqlx v1.2.1-0.20190826204134-d7d95172beb5 h1:lrdPtrORjGv1HbbEvKWDUAy97mPpFm4B8hp77tcCUJY= github.com/jmoiron/sqlx v1.2.1-0.20190826204134-d7d95172beb5/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks= github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901 h1:rp+c0RAYOWj8l6qbCUTSiRLG/iKnW3K3/QfPPuSsBt4= github.com/joeshaw/multierror v0.0.0-20140124173710-69b34d4ec901/go.mod h1:Z86h9688Y0wesXCyonoVr47MasHilkuLMqGhRZ4Hpak= +github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9qUBdQ= github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= +github.com/josephspurrier/goversioninfo v0.0.0-20190209210621-63e6d1acd3dd h1:KikNiFwUO3QLyeKyN4k9yBH9Pcu/gU/yficWi61cJIw= github.com/josephspurrier/goversioninfo v0.0.0-20190209210621-63e6d1acd3dd/go.mod h1:eJTEwMjXb7kZ633hO3Ln9mBUCOjX2+FlTljvpl9SYdE= github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= +github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.10 h1:Kz6Cvnvv2wGdaG/V8yMvfkmNiXq9Ya2KUv4rouJJr68= github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U= github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/justinas/nosurf v1.1.0 h1:qqV6FJmnDBJ6F9pOzhZgZitAZWBYonMOXglof7TtdZw= github.com/justinas/nosurf v1.1.0/go.mod h1:ALpWdSbuNGy2lZWtyXdjkYv4edL23oSEgfBT1gPJ5BQ= +github.com/kardianos/service v1.1.0 h1:QV2SiEeWK42P0aEmGcsAgjApw/lRxkwopvT+Gu6t1/0= github.com/kardianos/service v1.1.0/go.mod h1:RrJI2xn5vve/r32U5suTbeaSGoMU6GbNPoj36CVYcHc= github.com/karrick/godirwalk v1.15.6 h1:Yf2mmR8TJy+8Fa0SuQVto5SYap6IF7lNVX4Jdl8G1qA= github.com/karrick/godirwalk v1.15.6/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= +github.com/kisielk/errcheck v1.2.0 h1:reN85Pxc5larApoH1keMBiu2GWtPqXQ1nc9gx+jOU+E= github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= +github.com/kisielk/gotool v1.0.0 h1:AV2c/EiW3KqPNT9ZKl07ehoAGi4C5/01Cfbblndcapg= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/compress v1.9.8 h1:VMAMUUOh+gaxKTMk+zqbjsSjsIcUcL/LF4o63i82QyA= github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2 h1:DB17ag19krx9CFsz4o3enTrPXyIXCl+2iCXH/aMAp9s= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515 h1:T+h1c/A9Gawja4Y9mFVWj2vyii2bbUNDw3kt9VxK2EY= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0 h1:s5hAObm+yFO5uHYt5dYjxi2rXrsnmRpJx4OYvIWUaQs= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= +github.com/kr/pty v1.1.1 h1:VkoXIwSboBpnk99O/KFauAEILuNHv5DVFKZMBN/gUgw= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.2-0.20190507191818-2ff3cb3adc01 h1:EPw7R3OAyxHBCyl0oqh3lUZqS5lu3KSxzzGasE0opXQ= github.com/lib/pq v1.1.2-0.20190507191818-2ff3cb3adc01/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/magefile/mage v1.9.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= github.com/magefile/mage v1.10.0 h1:3HiXzCUY12kh9bIuyXShaVe529fJfyqoVM42o/uom2g= github.com/magefile/mage v1.10.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A= +github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.7.1 h1:mdxE1MF9o53iCb2Ghj1VfWvh7ZOwHpnVG/xwXrV90U8= github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= github.com/markbates/pkger v0.17.0 h1:RFfyBPufP2V6cddUyyEVSHBpaAnM1WzaMNyqomeT+iY= github.com/markbates/pkger v0.17.0/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= +github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11 h1:YFh+sjyJTMQSYjKwM4dFKhJPJC/wfo98tPUc17HdoYw= github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11/go.mod h1:Ah2dBMoxZEqk118as2T4u4fjfXarE0pPnMJaArZQZsI= github.com/mattn/go-colorable v0.0.8 h1:KatiXbcoFpoKmM5pL0yhug+tx/POfZO+0aVsuGhUhgo= github.com/mattn/go-colorable v0.0.8/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= +github.com/mattn/go-ieproxy v0.0.0-20191113090002-7c0f6868bffe h1:YioO2TiJyAHWHyCRQCP8jk5IzTqmsbGc5qQPIhHo6xs= github.com/mattn/go-ieproxy v0.0.0-20191113090002-7c0f6868bffe/go.mod h1:pYabZ6IHcRpFh7vIaLfK7rdcWgFEb3SFJ6/gNWuh88E= github.com/mattn/go-isatty v0.0.2 h1:F+DnWktyadxnOrohKLNUC9/GjFii5RJgY4GFG6ilggw= github.com/mattn/go-isatty v0.0.2/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= +github.com/mattn/go-shellwords v1.0.7 h1:KqhVjVZomx2puPACkj9vrGFqnp42Htvo9SEAWePHKOs= github.com/mattn/go-shellwords v1.0.7/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-sqlite3 v1.9.0 h1:pDRiWfl+++eC2FEFRy6jXmQlvp4Yh3z1MJKg4UeYM/4= github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 h1:I0XW9+e1XWDxdcEniV4rQAIOPUGDq67JSCiRCgGCZLI= github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/miekg/dns v1.1.15 h1:CSSIDtllwGLMoA6zjdKnaE6Tx6eVUxQ29LUgGetiDCI= github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/gox v1.0.1 h1:x0jD3dcHk9a9xPSDN6YEL4xL6Qz0dvNYm8yZqui5chI= github.com/mitchellh/gox v1.0.1/go.mod h1:ED6BioOGXMswlXa2zxfh/xdd5QhwYliBFn9V18Ap4z4= +github.com/mitchellh/hashstructure v0.0.0-20170116052023-ab25296c0f51 h1:qdHlMllk/PTLUrX3XdtXDrLL1lPSfcqUmJD1eYfbapg= github.com/mitchellh/hashstructure v0.0.0-20170116052023-ab25296c0f51/go.mod h1:QjSHrPWS+BGUVBYkbTZWEnOh3G1DutKwClXU/ABz6AQ= +github.com/mitchellh/iochan v1.0.0 h1:C+X3KsSTLFVBr/tK1eYN/vs4rJcvsiLU338UhYPJWeY= github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d h1:7PxY7LVfSZm7PEeBTyK1rj1gABdCO2mbri6GKO1cMDs= github.com/munnerz/goautoneg v0.0.0-20120707110453-a547fc61f48d/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223 h1:F9x/1yl3T2AeKLr2AMdilSD8+f9bvMnNN8VS5iDtovc= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= +github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/onsi/ginkgo v0.0.0-20170829012221-11459a886d9c/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.5.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.11.0 h1:JAKSXpt1YjtLA7YpPiqO9ss6sNXEsPfSGdwN0UHqzrw= github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.2.0/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0 h1:XPnZz8VVBHjVsy1vzJmRwIcSwiUO+JFfrv/xGiigmME= github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/opencontainers/go-digest v1.0.0-rc1.0.20190228220655-ac19fd6e7483 h1:eFd3FsB01m/zNg/yBMYdm/XqiqCztcN9SVRPtGtzDHo= github.com/opencontainers/go-digest v1.0.0-rc1.0.20190228220655-ac19fd6e7483/go.mod h1:cMLVZDEM3+U2I4VmLI6N8jQYUd2OVphdqWwCJHrFt2s= +github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6 h1:yN8BPXVwMBAm3Cuvh1L5XE8XpvYRMdsVLd82ILprhUU= github.com/opencontainers/image-spec v1.0.2-0.20190823105129-775207bd45b6/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= +github.com/otiai10/copy v1.2.0 h1:HvG945u96iNadPoG2/Ja2+AUJeW5YuFQMixq9yirC+k= github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= +github.com/otiai10/curr v1.0.0 h1:TJIWdbX0B+kpNagQrjgq8bCMrbhiuX73M2XwgtDMoOI= github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= +github.com/otiai10/mint v1.3.1 h1:BCmzIS3n71sGfHB5NMNDB3lHYPz8fWSkCAErHed//qc= github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= +github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2 h1:CXwSGu/LYmbjEab5aMCs5usQRVBGThelUKBNnoSOuso= github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2/go.mod h1:L3UMQOThbttwfYRNFOWLLVXMhk5Lkio4GGOtw5UrxS0= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= github.com/peterbourgon/diskv v2.0.1+incompatible/go.mod h1:uqqh8zWWbv1HBMNONnaR/tNboyR3/BZd58JJSHlUSCU= +github.com/pierrec/lz4 v2.4.1+incompatible h1:mFe7ttWaflA46Mhqh+jUfjp2qTbPYxLB2/OyBppH9dg= github.com/pierrec/lz4 v2.4.1+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pierrre/gotestcover v0.0.0-20160517101806-924dca7d15f0 h1:i5VIxp6QB8oWZ8IkK8zrDgeT6ORGIUeiN+61iETwJbI= github.com/pierrre/gotestcover v0.0.0-20160517101806-924dca7d15f0/go.mod h1:4xpMLz7RBWyB+ElzHu8Llua96TRCB3YwX+l5EP1wmHk= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1-0.20170505043639-c605e284fe17/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= @@ -405,16 +629,20 @@ github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/poy/eachers v0.0.0-20181020210610-23942921fe77 h1:SNdqPRvRsVmYR0gKqFvrUKhFizPJ6yDiGQ++VAJIoDg= github.com/poy/eachers v0.0.0-20181020210610-23942921fe77/go.mod h1:x1vqpbcMW9T/KRcQ4b48diSiSVtYgvwQ5xzDByEg4WE= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g= +github.com/prometheus/client_golang v1.1.1-0.20190913103102-20428fa0bffc h1:6B8wpniGN4FtqzqWhe2OBOGkeZFbhwZpCh+V/pv/oik= github.com/prometheus/client_golang v1.1.1-0.20190913103102-20428fa0bffc/go.mod h1:ikMPikHu8SMvBGWoKulvvOOZN227amf2E9eMYqyAwAY= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4 h1:gQz4mCbXsO+nc9n1hCxHcGA3Zx3Eo+UHZoInFGUIXNM= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190425082905-87a4384529e0/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= @@ -422,89 +650,130 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ= github.com/prometheus/procfs v0.0.11 h1:DhHlBtkHWPYi8O2y31JkK0TF+DGM+51OopZjH/Ia5qI= github.com/prometheus/procfs v0.0.11/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/prometheus v2.5.0+incompatible h1:7QPitgO2kOFG8ecuRn9O/4L9+10He72rVRJvMXrE9Hg= github.com/prometheus/prometheus v2.5.0+incompatible/go.mod h1:oAIUtOny2rjMX0OWN5vPR5/q/twIROJvdqnQKDdil/s= +github.com/rakyll/statik v0.1.6 h1:uICcfUXpgqtw2VopbIncslhAmE5hwc4g20TEyEENBNs= github.com/rakyll/statik v0.1.6/go.mod h1:OEi9wJV/fMUAGx1eNjq75DKDsJVuEv1U0oYdX6GX8Zs= +github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563 h1:dY6ETXrvDG7Sa4vE8ZQG4yqWg6UnOcbqTAahkV813vQ= github.com/rcrowley/go-metrics v0.0.0-20190826022208-cac0b30c2563/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/reviewdog/errorformat v0.0.0-20200109134752-8983be9bc7dd h1:fvaEkjpr2NJbtnFRCft7D6y/mQ5/2OQU0pKJLW8dwFA= github.com/reviewdog/errorformat v0.0.0-20200109134752-8983be9bc7dd/go.mod h1:giYAXnpegRDPsXUO7TRpDKXJo1lFGYxyWRfEt5iQ+OA= +github.com/reviewdog/reviewdog v0.9.17 h1:MKb3rlQZgkEXr3d85iqtYNITXn7gDJr2kT0IhgX/X9A= github.com/reviewdog/reviewdog v0.9.17/go.mod h1:Y0yPFDTi9L5ohkoecJdgbvAhq+dUXp+zI7atqVibwKg= +github.com/rogpeppe/fastuuid v1.2.0 h1:Ppwyp6VYCF1nvBTXL3trRso7mXMlRrw9ooo375wvi2s= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= +github.com/rogpeppe/go-internal v1.3.0 h1:RR9dF3JtopPvtkroDZuVD7qquD0bnHlKSqaQhgwt8yk= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rs/xid v1.2.1 h1:mhH9Nq+C1fY2l1XIpgxIiUOfNpRBYH1kKcr+qfKgjRc= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/zerolog v1.19.0 h1:hYz4ZVdUgjXTBUmrkrw55j1nHx68LfOKIQk5IYtyScg= github.com/rs/zerolog v1.19.0/go.mod h1:IzD0RJ65iWH0w97OQQebJEvTZYvsCUm9WVLWBQrJRjo= +github.com/russross/blackfriday v1.5.2 h1:HyvC0ARfnZBqnXwABFeSZHpKvJHJJfPz81GNueLj0oo= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/samuel/go-parser v0.0.0-20130731160455-ca8abbf65d0e h1:hUGyBE/4CXRPThr4b6kt+f1CN90no4Fs5CNrYOKYSIg= github.com/samuel/go-parser v0.0.0-20130731160455-ca8abbf65d0e/go.mod h1:Sb6li54lXV0yYEjI4wX8cucdQ9gqUJV3+Ngg3l9g30I= +github.com/samuel/go-thrift v0.0.0-20140522043831-2187045faa54 h1:jbchLJWyhKcmOjkbC4zDvT/n5EEd7g6hnnF760rEyRA= github.com/samuel/go-thrift v0.0.0-20140522043831-2187045faa54/go.mod h1:Vrkh1pnjV9Bl8c3P9zH0/D4NlOHWP5d4/hF4YTULaec= github.com/sanathkr/go-yaml v0.0.0-20170819195128-ed9d249f429b h1:jUK33OXuZP/l6babJtnLo1qsGvq6G9so9KMflGAm4YA= github.com/sanathkr/go-yaml v0.0.0-20170819195128-ed9d249f429b/go.mod h1:8458kAagoME2+LN5//WxE71ysZ3B7r22fdgb7qVmXSY= github.com/sanathkr/yaml v0.0.0-20170819201035-0056894fa522/go.mod h1:tQTYKOQgxoH3v6dEmdHiz4JG+nbxWwM5fgPQUpSZqVQ= +github.com/sanathkr/yaml v1.0.1-0.20170819201035-0056894fa522 h1:39BJIaZIhIBmXATIhdlTBlTQpAiGXHnz17CrO7vF2Ss= github.com/sanathkr/yaml v1.0.1-0.20170819201035-0056894fa522/go.mod h1:tQTYKOQgxoH3v6dEmdHiz4JG+nbxWwM5fgPQUpSZqVQ= github.com/santhosh-tekuri/jsonschema v1.2.4 h1:hNhW8e7t+H1vgY+1QeEQpveR6D4+OwKPXCfD2aieJis= github.com/santhosh-tekuri/jsonschema v1.2.4/go.mod h1:TEAUOeZSmIxTTuHatJzrvARHiuO9LYd+cIxzgEHCQI4= +github.com/satori/go.uuid v1.2.0 h1:0uYX9dsZ2yD7q2RtLRtPSdGDWzjeM3TbMJP9utgA0ww= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= github.com/sergi/go-diff v1.1.0 h1:we8PVUC3FE2uYfodKH/nBHMSetSfHDR6scGdBi+erh0= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/shirou/gopsutil v2.19.11+incompatible h1:lJHR0foqAjI4exXqWsU3DbH7bX1xvdhGdnXTIARA9W4= github.com/shirou/gopsutil v2.19.11+incompatible/go.mod h1:5b4v6he4MtMOwMlS0TUMTu2PcXUg8+E1lC7eC3UO/RA= +github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= +github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a h1:pa8hGb/2YqsZKovtsgrwcDH1RZhVbTKCjLp47XpqCDs= github.com/smartystreets/goconvey v0.0.0-20190330032615-68dc04aab96a/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72 h1:qLC7fQah7D6K1B0ujays3HV9gkFtllcxhzImRR7ArPQ= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= github.com/spaolacci/murmur3 v1.1.0/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/afero v1.2.2 h1:5jhuqJyZCZf2JRofRvN/nIFgIWNzPa3/Vz8mYylgbWc= github.com/spf13/afero v1.2.2/go.mod h1:9ZxEEn6pIJ8Rxe320qSDBk6AsU0r9pR7Q4OcevTdifk= +github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v0.0.0-20170130214245-9ff6c6923cff/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2 h1:VUFqw5KcqRf7i70GOzW7N+Q7+gxVBkSSqiXB12+JQ4M= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0 h1:Hbg2NidpLE8veEBkEZTL3CvlkUIVzuU9jDplZO54c48= github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/testify v1.1.5-0.20170601210322-f6abca593680/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.0/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/tsg/go-daemon v0.0.0-20200207173439-e704b93fd89b h1:X/8hkb4rQq3+QuOxpJK7gWmAXmZucF0EI1s1BfBLq6U= github.com/tsg/go-daemon v0.0.0-20200207173439-e704b93fd89b/go.mod h1:jAqhj/JBVC1PwcLTWd6rjQyGyItxxrhpiBl8LSuAGmw= +github.com/tsg/gopacket v0.0.0-20200626092518-2ab8e397a786 h1:B/IVHYiI0d04dudYw+CvCAGqSMq8d0yWy56eD6p85BQ= github.com/tsg/gopacket v0.0.0-20200626092518-2ab8e397a786/go.mod h1:RIkfovP3Y7my19aXEjjbNd9E5TlHozzAyt7B8AaEcwg= +github.com/ugorji/go v1.1.8 h1:/D9x7IRpfMHDlizVOgxrag5Fh+/NY+LtI8bsr+AswRA= github.com/ugorji/go v1.1.8/go.mod h1:0lNM99SwWUIRhCXnigEMClngXBk/EmpTXa7mgiewYWA= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/ugorji/go/codec v1.1.8 h1:4dryPvxMP9OtkjIbuNeK2nb27M38XMHLGlfNSNph/5s= github.com/ugorji/go/codec v1.1.8/go.mod h1:X00B19HDtwvKbQY2DcYjvZxKQp8mzrJoQ6EgoIY/D2E= +github.com/urso/diag v0.0.0-20200210123136-21b3cc8eb797 h1:OHNw/6pXODJAB32NujjdQO/KIYQ3KAbHQfCzH81XdCs= github.com/urso/diag v0.0.0-20200210123136-21b3cc8eb797/go.mod h1:pNWFTeQ+V1OYT/TzWpnWb6eQBdoXpdx+H+lrH97/Oyo= +github.com/urso/go-bin v0.0.0-20180220135811-781c575c9f0e h1:NiofbjIUI5gR+ybDsGSVH1fWyjSeDYiYVJHT1+kcsak= github.com/urso/go-bin v0.0.0-20180220135811-781c575c9f0e/go.mod h1:6GfHrdWBQYjFRIznu7XuQH4lYB2w8nO4bnImVKkzPOM= +github.com/urso/magetools v0.0.0-20190919040553-290c89e0c230 h1:Ft1EJ6JL0F/RV6o2qJ1Be+wYxjYUSfRA3srfHgSgojc= github.com/urso/magetools v0.0.0-20190919040553-290c89e0c230/go.mod h1:DFxTNgS/ExCGmmjVjSOgS2WjtfjKXgCyDzAFgbtovSA= +github.com/urso/qcgen v0.0.0-20180131103024-0b059e7db4f4 h1:hhA8EBThzz9PztawVTycKvfETVuBqxAQ5keFlAVtbAw= github.com/urso/qcgen v0.0.0-20180131103024-0b059e7db4f4/go.mod h1:RspW+E2Yb7Fs7HclB2tiDaiu6Rp41BiIG4Wo1YaoXGc= +github.com/urso/sderr v0.0.0-20200210124243-c2a16f3d43ec h1:HkZIDJrMKZHPsYhmH2XjTTSk1pbMCFfpxSnyzZUFm+k= github.com/urso/sderr v0.0.0-20200210124243-c2a16f3d43ec/go.mod h1:Wp40HwmjM59FkDIVFfcCb9LzBbnc0XAMp8++hJuWvSU= +github.com/vmware/govmomi v0.0.0-20170802214208-2cad15190b41 h1:NeNpIvfvaFOh0BH7nMEljE5Rk/VJlxhm58M41SeOD20= github.com/vmware/govmomi v0.0.0-20170802214208-2cad15190b41/go.mod h1:URlwyTFZX72RmxtxuaFL2Uj3fD1JTvZdx59bHWk6aFU= +github.com/xanzy/go-gitlab v0.22.3 h1:/rNlZ2hquUWNc6rJdntVM03tEOoTmnZ1lcNyJCl0WlU= github.com/xanzy/go-gitlab v0.22.3/go.mod h1:t4Bmvnxj7k37S4Y17lfLx+nLqkf/oQwT2HagfWKv5Og= +github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c h1:u40Z8hqBAAQyv+vATcGgV0YCnDjqSL7/q/JyPhhJSPk= github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v1.0.0 h1:d9X0esnoa3dFsV0FG35rAT0RIhYFlPq7MiP+DW89La0= github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb h1:zGWFAtiMcyryUHoUjUJX0/lt1H2+i2Ka2n+D3DImSNo= github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb/go.mod h1:N2zxlSyiKSe5eX1tZViRH5QA0qijqEDrYZiPEAiq3wU= +github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 h1:EzJWgHovont7NscjpAxXsDA8S8BMYve8Y5+7cuRE7R0= github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1:GwrjFmJcFw6At/Gs6z4yjiIwzuJ1/+UwLxMQDVQXShQ= +github.com/xeipuuv/gojsonschema v0.0.0-20181112162635-ac52e6811b56 h1:yhqBHs09SmmUoNOHc9jgK4a60T3XFRtPAkYxVnqgY50= github.com/xeipuuv/gojsonschema v0.0.0-20181112162635-ac52e6811b56/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77 h1:ESFSdwYZvkeru3RtdrYueztKhOBCSAAzS4Gf+k0tEow= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1 h1:ruQGxdhGHe7FWOJPT0mKs5+pD2Xs1Bm/kdGlHO04FmM= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/gopher-lua v0.0.0-20170403160031-b402f3114ec7 h1:0gYLpmzecnaDCoeWxSfEJ7J1b6B/67+NV++4HKQXx+Y= github.com/yuin/gopher-lua v0.0.0-20170403160031-b402f3114ec7/go.mod h1:aEV29XrmTYFr3CiRxZeGHpkvbwq+prZduBqMaascyCU= go.elastic.co/apm v1.7.2/go.mod h1:tCw6CkOJgkWnzEthFN9HUP1uL3Gjc/Ur6m7gRPLaoH0= go.elastic.co/apm v1.8.1-0.20200909061013-2aef45b9cf4b h1:Sf+V3eV91ZuXjF3824SABFgXU+z4ZEuIX5ikDvt2lCE= go.elastic.co/apm v1.8.1-0.20200909061013-2aef45b9cf4b/go.mod h1:qoOSi09pnzJDh5fKnfY7bPmQgl8yl2tULdOu03xhui0= +go.elastic.co/apm/module/apmelasticsearch v1.7.2 h1:5STGHLZLSeAzxordMc+dFVKiyVtMmxADOV+TgRaXXJg= go.elastic.co/apm/module/apmelasticsearch v1.7.2/go.mod h1:ZyNFuyWdt42GBZkz0SogoLzDBrBGj4orxpiUuxYeYq8= +go.elastic.co/apm/module/apmhttp v1.7.2 h1:2mRh7SwBuEVLmJlX+hsMdcSg9xaielCLElaPn/+i34w= go.elastic.co/apm/module/apmhttp v1.7.2/go.mod h1:sTFWiWejnhSdZv6+dMgxGec2Nxe/ZKfHfz/xtRM+cRY= go.elastic.co/ecszap v0.3.0 h1:Zo/Y4sJLqbWDlqCHI4F4Lzeg0Fs4+n5ldVis4h9xV8w= go.elastic.co/ecszap v0.3.0/go.mod h1:HTUi+QRmr3EuZMqxPX+5fyOdMNfUu5iPebgfhgsTJYQ= @@ -513,12 +782,15 @@ go.elastic.co/fastjson v1.1.0 h1:3MrGBWWVIxe/xvsbpghtkFoPciPhOCmjsR/HfwEeQR4= go.elastic.co/fastjson v1.1.0/go.mod h1:boNGISWMjQsUPy/t6yqt2/1Wx4YNPSe+mZjlyw9vKKI= go.elastic.co/go-licence-detector v0.4.0 h1:it5dP+6LPxLsosdhtbAqk/zJQxzS0QSSpdNkKVuwKMs= go.elastic.co/go-licence-detector v0.4.0/go.mod h1:fSJQU8au4SAgDK+UQFbgUPsXKYNBDv4E/dwWevrMpXU= +go.etcd.io/bbolt v1.3.4 h1:hi1bXHMVrlQh6WwxAy+qZCV/SYIlqo+Ushwdpa4tAKg= go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2 h1:75k/FF0Q2YM8QYo07VPddOLBslDt1MZOdEslOHvmzAs= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.uber.org/atomic v1.5.0 h1:OI5t8sDa1Or+q8AeE+yKeB/SDYioSHAgcVljj9JIETY= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/goleak v1.0.0 h1:qsup4IcBdlmsnGfqyLl4Ntn3C2XCCuKAE7DwHpScyUo= go.uber.org/goleak v1.0.0/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= go.uber.org/multierr v1.3.0 h1:sFPn2GLc3poCkfrpIXGhBD2X0CMIo4Q/zSULXrj/+uc= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= @@ -528,7 +800,6 @@ go.uber.org/zap v1.14.0 h1:/pduUoebOeeJzTDFuoMgC6nRkiasr1sBCIEorly7m4o= go.uber.org/zap v1.14.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2 h1:VklqNMn3ovrHsnt90PveolxSbWFaJdECFbxSq0Mqo2M= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= @@ -542,8 +813,10 @@ golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299 h1:zQpM52jfKHG6II1ISZY1ZcpygvuSFZpLwfluuF89XOg= golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b h1:+qEpEAPhDZ1o0x3tHzZTQDArnOixOzGD9HUJfcg0mb4= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -556,6 +829,7 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu golang.org/x/lint v0.0.0-20200130185559-910be7a94367 h1:0IiAsCRByjO2QjX7ZPkw5oU9x+n1YqRL802rjC0c3Aw= golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028 h1:4+4C/Iv2U4fMZBiMCc98MG1In4gJY5YRhtpDNeDeHWs= golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= @@ -597,12 +871,12 @@ golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4Iltr golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BGiEpg5IG2JkU5FkPIawgtw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58 h1:8gQV6CLnAEikrhgkHFbMAEhagSSnXWGV915qUMm9mrU= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -644,7 +918,6 @@ golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORK golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3 h1:cokOdA+Jmi5PJGXLlLllQSgYigAEfHXJAERHVMaCc2k= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= @@ -692,6 +965,7 @@ google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEt google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.15.0 h1:yzlyyDW/J0w8yNFJIhiAJy4kq74S+1DOLdawELNxFMA= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -699,6 +973,7 @@ google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7 google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= @@ -709,7 +984,6 @@ google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98 google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb h1:ADPHZzpzM4tk4V4S5cnCrr5SwzvlrPRmqqCuJDB8UTs= google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013 h1:+kGHl1aib/qcwaRi1CbqBZ1rk19r85MNUf8HaBghugY= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= @@ -729,20 +1003,22 @@ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQ google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.23.0 h1:4MY060fB1DLGMB/7MBTLnwQUY6+F09GEiz6SsrNqyzM= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0 h1:UhZDfRO8JRQru4/+LlLE0BRKGF8L+PICnvYZmx/fEGA= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/jcmturner/aescts.v1 v1.0.1 h1:cVVZBK2b1zY26haWB4vbBiZrfFQnfbTVrE3xZq6hrEw= gopkg.in/jcmturner/aescts.v1 v1.0.1/go.mod h1:nsR8qBOg+OucoIW+WMhB3GspUQXq9XorLnQb9XtvcOo= @@ -754,10 +1030,11 @@ gopkg.in/jcmturner/gokrb5.v7 v7.5.0 h1:a9tsXlIDD9SKxotJMK3niV7rPZAJeX2aD/0yg3qlI gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuvyavf11/WM= gopkg.in/jcmturner/rpc.v1 v1.1.0 h1:QHIUxTX1ISuAv9dD2wJ9HWQVuWDX/Zc0PfeC2tjc4rU= gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= +gopkg.in/mgo.v2 v2.0.0-20160818020120-3f83fa500528 h1:/saqWwm73dLmuzbNhe92F0QsZ/KiFND+esHco2v1hiY= gopkg.in/mgo.v2 v2.0.0-20160818020120-3f83fa500528/go.mod h1:yeKp02qBN3iKW1OzL3MGk2IdtZzaj7SFntXj72NppTA= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= @@ -767,6 +1044,7 @@ gopkg.in/yaml.v2 v2.3.0 h1:clyUAQHOM3G0M3f5vQj7LuJrETvjVot3Z5el9nffUtU= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -776,15 +1054,25 @@ honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXe honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= howett.net/plist v0.0.0-20181124034731-591f970eefbb h1:jhnBjNi9UFpfpl8YZhA9CrOqpnJdvzuiHsl/dnxl11M= howett.net/plist v0.0.0-20181124034731-591f970eefbb/go.mod h1:vMygbs4qMhSZSc4lCUl2OEE+rDiIIJAIdR4m7MiMcm0= +k8s.io/api v0.19.4 h1:I+1I4cgJYuCDgiLNjKx7SLmIbwgj9w7N7Zr5vSIdwpo= k8s.io/api v0.19.4/go.mod h1:SbtJ2aHCItirzdJ36YslycFNzWADYH3tgOhvBEFtZAk= +k8s.io/apimachinery v0.19.4 h1:+ZoddM7nbzrDCp0T3SWnyxqf8cbWPT2fkZImoyvHUG0= k8s.io/apimachinery v0.19.4/go.mod h1:DnPGDnARWFvYa3pMHgSxtbZb7gpzzAZ1pTfaUNDVlmA= +k8s.io/client-go v0.19.4 h1:85D3mDNoLF+xqpyE9Dh/OtrJDyJrSRKkHmDXIbEzer8= k8s.io/client-go v0.19.4/go.mod h1:ZrEy7+wj9PjH5VMBCuu/BDlvtUAku0oVFk4MmnW9mWA= +k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac h1:sAvhNk5RRuc6FNYGqe7Ygz3PSo/2wGWbulskmzRX8Vs= k8s.io/gengo v0.0.0-20200413195148-3a45101e95ac/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog/v2 v2.0.0/go.mod h1:PBfzABfn139FHAV07az/IF9Wp1bkk3vpT2XSJ76fSDE= +k8s.io/klog/v2 v2.2.0 h1:XRvcwJozkgZ1UQJmfMGpvRthQHOvihEhYtDfAaxMz/A= k8s.io/klog/v2 v2.2.0/go.mod h1:Od+F08eJP+W3HUb4pSrPpgp9DGU4GzlpG/TmITuYh/Y= +k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6 h1:+WnxoVtG8TMiudHBSEtrVL1egv36TkkJm+bA8AxicmQ= k8s.io/kube-openapi v0.0.0-20200805222855-6aeccd4b50c6/go.mod h1:UuqjUnNftUyPE5H64/qeyjQoUZhGpeFDVdxjTeEVN2o= +k8s.io/utils v0.0.0-20200729134348-d5654de09c73 h1:uJmqzgNWG7XyClnU/mLPBWwfKKF1K8Hf8whTseBgJcg= k8s.io/utils v0.0.0-20200729134348-d5654de09c73/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA= +rsc.io/binaryregexp v0.2.0 h1:HfqmD5MEmC0zvwBuF187nq9mdnXjXsSivRiXN7SmRkE= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +sigs.k8s.io/structured-merge-diff/v4 v4.0.1 h1:YXTMot5Qz/X1iBRJhAt+vI+HVttY0WkSqqhKxQ0xVbA= sigs.k8s.io/structured-merge-diff/v4 v4.0.1/go.mod h1:bJZC9H9iH24zzfZ/41RGcq60oK1F7G282QMXDPYydCw= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= +sigs.k8s.io/yaml v1.2.0 h1:kr/MCeFWJWTwyaHoR9c8EjH9OumOmoF9YGiZd7lFm/Q= sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= diff --git a/internal/pkg/apikey/apikey_test.go b/internal/pkg/apikey/apikey_test.go new file mode 100644 index 000000000..efd6d0fb6 --- /dev/null +++ b/internal/pkg/apikey/apikey_test.go @@ -0,0 +1,20 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package apikey + +import ( + "encoding/base64" + "github.com/stretchr/testify/assert" + "testing" +) + +func TestMonitorLeadership(t *testing.T) { + rawToken := " foo:bar" + token := base64.StdEncoding.EncodeToString([]byte(rawToken)) + apiKey, err := NewApiKeyFromToken(token) + assert.NoError(t, err) + assert.Equal(t, *apiKey, ApiKey{" foo", "bar"}) + assert.Equal(t, token, apiKey.Token()) +} diff --git a/internal/pkg/apikey/invalidate.go b/internal/pkg/apikey/invalidate.go index 938e2bd52..2e47ea6df 100644 --- a/internal/pkg/apikey/invalidate.go +++ b/internal/pkg/apikey/invalidate.go @@ -14,7 +14,7 @@ import ( "github.com/elastic/go-elasticsearch/v8/esapi" ) -// Invalidate invalidates the provides API keys by ID. +// Invalidate invalidates the provided API keys by ID. func Invalidate(ctx context.Context, client *elasticsearch.Client, ids ...string) error { payload := struct { diff --git a/internal/pkg/coordinator/v0_test.go b/internal/pkg/coordinator/v0_test.go index a60a53c1c..12ebc6966 100644 --- a/internal/pkg/coordinator/v0_test.go +++ b/internal/pkg/coordinator/v0_test.go @@ -32,7 +32,8 @@ func TestCoordinatorZero(t *testing.T) { go func() { if err := coord.Run(ctx); err != nil && err != context.Canceled { - t.Fatal(err) + t.Error(err) + return } }() diff --git a/internal/pkg/dsl/readme.txt b/internal/pkg/dsl/readme.txt deleted file mode 100644 index c42c76702..000000000 --- a/internal/pkg/dsl/readme.txt +++ /dev/null @@ -1,6 +0,0 @@ -readme.txt - - -Very basic elastic DSL query builder; grossly incomplete; probably broken. - -Only the parts that were needed were fleshed out. Needs work. diff --git a/internal/pkg/env/env.go b/internal/pkg/env/env.go deleted file mode 100644 index 4ff1833e0..000000000 --- a/internal/pkg/env/env.go +++ /dev/null @@ -1,17 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package env - -import ( - "os" -) - -func GetStr(key, defaultVal string) string { - val, ok := os.LookupEnv(key) - if !ok { - val = defaultVal - } - return val -} From 45105f1f3f78a00b784c0cde8284debb4c20deb0 Mon Sep 17 00:00:00 2001 From: Sean Cunningham Date: Mon, 8 Mar 2021 15:21:29 -0500 Subject: [PATCH 028/240] Handle artifact request from agent. Return from cache if available, otherwise pull record from Elastic Search directly. (cherry picked from commit 951c45484aa79ef46020ab5aeac490f682b2ab51) --- cmd/fleet/auth.go | 5 +- cmd/fleet/handleArtifacts.go | 210 ++++++++++++++++++++++++ cmd/fleet/handleCheckin.go | 3 +- cmd/fleet/router.go | 10 +- internal/pkg/cache/cache.go | 30 ++++ internal/pkg/dl/artifact.go | 65 ++++++++ internal/pkg/dl/constants.go | 2 + internal/pkg/es/mapping.go | 41 +++++ internal/pkg/model/schema.go | 36 +++++ internal/pkg/throttle/throttle.go | 154 ++++++++++++++++++ internal/pkg/throttle/throttle_test.go | 216 +++++++++++++++++++++++++ model/schema.json | 58 +++++++ 12 files changed, 822 insertions(+), 8 deletions(-) create mode 100644 cmd/fleet/handleArtifacts.go create mode 100644 internal/pkg/dl/artifact.go create mode 100644 internal/pkg/throttle/throttle.go create mode 100644 internal/pkg/throttle/throttle_test.go diff --git a/cmd/fleet/auth.go b/cmd/fleet/auth.go index 88ae83b61..14b9e16bc 100644 --- a/cmd/fleet/auth.go +++ b/cmd/fleet/auth.go @@ -23,6 +23,7 @@ const ( ) var ErrApiKeyNotEnabled = errors.New("APIKey not enabled") +var ErrAgentCorrupted = errors.New("agent record corrupted") func authApiKey(r *http.Request, client *elasticsearch.Client, c cache.Cache) (*apikey.ApiKey, error) { @@ -79,11 +80,11 @@ func authAgent(r *http.Request, id string, bulker bulk.Bulk, c cache.Cache) (*mo // validate key alignment if agent.AccessApiKeyId != key.Id { - log.Debug(). + log.Info(). Err(ErrAgentCorrupted). Interface("agent", &agent). Str("key.Id", key.Id). - Msg("agent id mismatch") + Msg("agent API key id mismatch agent record") return nil, ErrAgentCorrupted } diff --git a/cmd/fleet/handleArtifacts.go b/cmd/fleet/handleArtifacts.go new file mode 100644 index 000000000..7fa452ba2 --- /dev/null +++ b/cmd/fleet/handleArtifacts.go @@ -0,0 +1,210 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fleet + +import ( + "context" + "encoding/base64" + "encoding/hex" + "encoding/json" + "errors" + "net/http" + "time" + + "github.com/elastic/fleet-server/v7/internal/pkg/bulk" + "github.com/elastic/fleet-server/v7/internal/pkg/cache" + "github.com/elastic/fleet-server/v7/internal/pkg/dl" + "github.com/elastic/fleet-server/v7/internal/pkg/model" + "github.com/elastic/fleet-server/v7/internal/pkg/throttle" + + "github.com/julienschmidt/httprouter" + "github.com/rs/zerolog/log" +) + +const ( + defaultCacheTTL = time.Hour * 24 // TODO: configurable + defaultMaxParallel = 8 // TODO: configurable + defaultThrottleTTL = time.Minute // TODO: configurable +) + +var ( + artThrottle = throttle.NewThrottle(defaultMaxParallel) + ErrorThrottle = errors.New("Cannot acquire throttle token") + ErrorIdentifier = errors.New("Identifier mismatch") + ErrorBadSha2 = errors.New("Malformed sha256") +) + +func (rt Router) handleArtifacts(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { + var ( + id = ps.ByName("id") + sha2 = ps.ByName("sha2") + ) + + err := _handleArtifacts(w, r, id, sha2, rt.ct.bulker, rt.ct.cache) + + if err != nil { + + var code int + switch err { + case dl.ErrNotFound: + code = http.StatusNotFound + case ErrorThrottle: + code = http.StatusTooManyRequests + case context.Canceled: + code = http.StatusServiceUnavailable + default: + code = http.StatusBadRequest + } + + // TODO: return a 503 on elastic timeout, connection drop + + log.Debug(). + Err(err). + Str("sha2", sha2). + Str("id", id). + Int("code", code). + Msg("Fail artifact") + + http.Error(w, "", code) + } +} + +func _handleArtifacts(w http.ResponseWriter, r *http.Request, id, sha2 string, bulker bulk.Bulk, c cache.Cache) error { + + // Authenticate the APIKey; retrieve agent record. + // NOTE: We are currently not using the agent record aside from logging. + // Eventually we will use the policy id in the record to authorize access to the artifact. + agent, err := authAgent(r, "", bulker, c) + if err != nil { + return err + } + + zlog := log.With(). + Str("id", id). + Str("sha2", sha2). + Str("agent", agent.Id). + Logger() + + // Input validation + if err := validateSha2(sha2); err != nil { + return err + } + + // Grab artifact, whether from cache or elastic. + artifact, err := getArtifact(r.Context(), bulker, c, sha2) + if err != nil { + return err + } + + // TODO: Add per policy authorization on artifacts; + // ie. limit the set of allowed artifacts by policy + + // Validate idenitifer in artifact record is same as url + if artifact.Identifier != id { + err = ErrorIdentifier + zlog.Info(). + Err(err). + Str("artifact_id", artifact.Identifier). + Msg("Identifier mismatch on url") + return err + } + + zlog.Debug(). + Int("sz", len(artifact.Body)). + Int64("decodedSz", artifact.DecodedSize). + Str("compression", artifact.CompressionAlgorithm). + Str("encryption", artifact.EncryptionAlgorithm). + Str("created", artifact.Created). + Msg("Artifact GET") + + // Write the payload + if _, err = w.Write(artifact.Body); err != nil { + zlog.Debug().Err(err).Msg("Fail HTTP write") + return err + } + + return nil +} + +// Return artifact from cache by sha2 or fetch directly from Elastic, updating cache. +func getArtifact(ctx context.Context, bulker bulk.Bulk, c cache.Cache, sha2 string) (*model.Artifact, error) { + + // Check the cache; return immediately if found. + if artifact, ok := c.GetArtifact(sha2); ok { + return &artifact, nil + } + + zlog := log.With().Str("sha2", sha2).Logger() + + // Fetch the artifact from elastic + art, err := fetchArtifact(ctx, bulker, sha2) + + if err != nil { + zlog.Info().Err(err).Msg("Fail retrieve artifact") + return nil, err + } + + // The 'Body' field type is Raw; extract to string. + var srcPayload string + if err = json.Unmarshal(art.Body, &srcPayload); err != nil { + zlog.Error().Err(err).Msg("Cannot unmarshal artifact payload") + return nil, err + } + + // Artifact is stored base64 encoded in ElasticSearch. + // Base64 decode the payload before putting in cache to avoid having to decode on each cache hit. + dstPayload, err := base64.StdEncoding.DecodeString(srcPayload) + if err != nil { + zlog.Error().Err(err).Msg("Fail base64 decode artifact") + return nil, err + } + + // Reassign decoded payload before adding to cache + art.Body = dstPayload + + // And cache it + c.SetArtifact(sha2, *art, defaultCacheTTL) + + return art, nil +} + +// Attempt to fetch the artifact down from elastic +// TODO: Design a mechanism to mitigate a DDOS attack on bogus hashes. +// Perhaps have a cache of the most recently used hashes available, and items that aren't +// in the cache can do a lookup but throttle as below. We could update the cache every 10m or so. +func fetchArtifact(ctx context.Context, bulker bulk.Bulk, sha2 string) (*model.Artifact, error) { + // Throttle prevents more than N outstanding requests to elastic globally and per sha2. + if token := artThrottle.Acquire(sha2, defaultThrottleTTL); token == nil { + return nil, ErrorThrottle + } else { + defer token.Release() + } + + start := time.Now() + artifact, err := dl.FindArtifactBySha256(ctx, bulker, sha2) + + log.Debug(). + Err(err). + Str("sha2", sha2). + Dur("rtt", time.Since(start)). + Msg("fetch artifact") + + return artifact, err +} + +func validateSha2(sha2 string) error { + + if len(sha2) != 64 { + log.Info().Str("sha2", sha2).Msg("sha2 bad length") + return ErrorBadSha2 + } + + if _, err := hex.DecodeString(sha2); err != nil { + log.Info().Err(err).Str("sha2", sha2).Msg("sha2 is not hex") + return ErrorBadSha2 + } + + return nil +} diff --git a/cmd/fleet/handleCheckin.go b/cmd/fleet/handleCheckin.go index 7f57d90db..ae4b764d6 100644 --- a/cmd/fleet/handleCheckin.go +++ b/cmd/fleet/handleCheckin.go @@ -27,8 +27,7 @@ import ( ) var ( - ErrAgentNotFound = errors.New("agent not found") - ErrAgentCorrupted = errors.New("agent record corrupted") + ErrAgentNotFound = errors.New("agent not found") kCheckinTimeout = 30 * time.Second kLongPollTimeout = 300 * time.Second // 5m diff --git a/cmd/fleet/router.go b/cmd/fleet/router.go index fa5e53d70..018c182c1 100644 --- a/cmd/fleet/router.go +++ b/cmd/fleet/router.go @@ -11,10 +11,11 @@ import ( ) const ( - ROUTE_STATUS = "/api/status" - ROUTE_ENROLL = "/api/fleet/agents/:id" - ROUTE_CHECKIN = "/api/fleet/agents/:id/checkin" - ROUTE_ACKS = "/api/fleet/agents/:id/acks" + ROUTE_STATUS = "/api/status" + ROUTE_ENROLL = "/api/fleet/agents/:id" + ROUTE_CHECKIN = "/api/fleet/agents/:id/checkin" + ROUTE_ACKS = "/api/fleet/agents/:id/acks" + ROUTE_ARTIFACTS = "/api/fleet/artifacts/:id/:sha2" ) type Router struct { @@ -39,5 +40,6 @@ func NewRouter(bulker bulk.Bulk, ct *CheckinT, et *EnrollerT, sm policy.SelfMoni router.POST(ROUTE_ENROLL, r.handleEnroll) router.POST(ROUTE_CHECKIN, r.handleCheckin) router.POST(ROUTE_ACKS, r.handleAcks) + router.GET(ROUTE_ARTIFACTS, r.handleArtifacts) return router } diff --git a/internal/pkg/cache/cache.go b/internal/pkg/cache/cache.go index b03bb4410..dbfe7a0c3 100644 --- a/internal/pkg/cache/cache.go +++ b/internal/pkg/cache/cache.go @@ -139,3 +139,33 @@ func (c Cache) SetEnrollmentApiKey(id string, key model.EnrollmentApiKey, cost i Dur("ttl", ttl). Msg("EnrollmentApiKey cache SET") } + +func (c Cache) GetArtifact(sha2 string) (model.Artifact, bool) { + scopedKey := "artifact:" + sha2 + if v, ok := c.cache.Get(scopedKey); ok { + log.Trace().Str("sha2", sha2).Msg("Artifact cache HIT") + key, ok := v.(model.Artifact) + + if !ok { + log.Error().Str("sha2", sha2).Msg("Artifact cache cast fail") + return model.Artifact{}, false + } + return key, ok + } + + log.Trace().Str("sha2", sha2).Msg("Artifact cache MISS") + return model.Artifact{}, false +} + +// TODO: strip body and spool to on disk cache if larger than a size threshold +func (c Cache) SetArtifact(sha2 string, artifact model.Artifact, ttl time.Duration) { + scopedKey := "artifact:" + sha2 + cost := int64(len(artifact.Body)) + ok := c.cache.SetWithTTL(scopedKey, artifact, cost, ttl) + log.Trace(). + Bool("ok", ok). + Str("sha2", sha2). + Int64("cost", cost). + Dur("ttl", ttl). + Msg("Artifact cache SET") +} diff --git a/internal/pkg/dl/artifact.go b/internal/pkg/dl/artifact.go new file mode 100644 index 000000000..7ee501d74 --- /dev/null +++ b/internal/pkg/dl/artifact.go @@ -0,0 +1,65 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package dl + +import ( + "context" + "encoding/json" + "errors" + + "github.com/elastic/fleet-server/v7/internal/pkg/bulk" + "github.com/elastic/fleet-server/v7/internal/pkg/dsl" + "github.com/elastic/fleet-server/v7/internal/pkg/model" +) + +const ( + artifactsIndexName = ".fleet-artifacts" +) + +var ( + QueryArtifactBySha2 = prepareQueryArtifactBySha2() + ErrConflict = errors.New("Fail multiple artifacts for the same sha256") +) + +func prepareQueryArtifactBySha2() *dsl.Tmpl { + root := dsl.NewRoot() + tmpl := dsl.NewTmpl() + + root.Query().Bool().Filter().Term(FieldEncodedSha256, tmpl.Bind(FieldEncodedSha256), nil) + tmpl.MustResolve(root) + return tmpl +} + +func FindArtifactBySha256(ctx context.Context, bulker bulk.Bulk, sha2 string) (*model.Artifact, error) { + + res, err := SearchWithOneParam( + ctx, + bulker, + QueryArtifactBySha2, + artifactsIndexName, + FieldEncodedSha256, + sha2, + ) + + if err != nil { + return nil, err + } + + if len(res.Hits) == 0 { + return nil, ErrNotFound + } + + if len(res.Hits) > 1 { + return nil, ErrConflict + } + + // deserialize + var artifact model.Artifact + if err = json.Unmarshal(res.Hits[0].Source, &artifact); err != nil { + return nil, err + } + + return &artifact, nil +} diff --git a/internal/pkg/dl/constants.go b/internal/pkg/dl/constants.go index b585af688..06164f821 100644 --- a/internal/pkg/dl/constants.go +++ b/internal/pkg/dl/constants.go @@ -34,6 +34,8 @@ const ( FieldActive = "active" FieldUpdatedAt = "updated_at" FieldUnenrolledAt = "unenrolled_at" + + FieldEncodedSha256 = "encodedSha256" ) // Public constants diff --git a/internal/pkg/es/mapping.go b/internal/pkg/es/mapping.go index e2581482b..5e27297a7 100644 --- a/internal/pkg/es/mapping.go +++ b/internal/pkg/es/mapping.go @@ -171,6 +171,47 @@ const ( } }` + // Artifact An artifact served by Fleet + MappingArtifact = `{ + "properties": { + "body": { + "enabled" : false, + "type": "object" + }, + "compressionAlgorithm": { + "type": "keyword" + }, + "created": { + "type": "date" + }, + "decodedSha256": { + "type": "keyword" + }, + "decodedSize": { + "type": "integer" + }, + "encodedSha256": { + "type": "keyword" + }, + "encodedSize": { + "type": "integer" + }, + "encryptionAlgorithm": { + "type": "keyword" + }, + "identifier": { + "type": "keyword" + } + } +}` + + // Body Encoded artifact data + MappingBody = `{ + "properties": { + + } +}` + // Data The opaque payload. MappingData = `{ "properties": { diff --git a/internal/pkg/model/schema.go b/internal/pkg/model/schema.go index 60b2920a6..2f870e814 100644 --- a/internal/pkg/model/schema.go +++ b/internal/pkg/model/schema.go @@ -170,6 +170,42 @@ type AgentMetadata struct { Version string `json:"version"` } +// Artifact An artifact served by Fleet +type Artifact struct { + ESDocument + + // Encoded artifact data + Body json.RawMessage `json:"body"` + + // Name of compression algorithm applied to artifact + CompressionAlgorithm string `json:"compressionAlgorithm"` + + // Timestamp artifact was created + Created string `json:"created"` + + // SHA256 of artifact before encoding has been applied + DecodedSha256 string `json:"decodedSha256"` + + // Size of artifact before encoding has been applied + DecodedSize int64 `json:"decodedSize"` + + // SHA256 of artifact after encoding has been applied + EncodedSha256 string `json:"encodedSha256"` + + // Size of artifact after encoding has been applied + EncodedSize int64 `json:"encodedSize"` + + // Name of encryption algorithm applied to artifact + EncryptionAlgorithm string `json:"encryptionAlgorithm,omitempty"` + + // Human readable artifact identifier + Identifier string `json:"identifier"` +} + +// Body Encoded artifact data +type Body struct { +} + // Data The opaque payload. type Data struct { } diff --git a/internal/pkg/throttle/throttle.go b/internal/pkg/throttle/throttle.go new file mode 100644 index 000000000..b58ef8470 --- /dev/null +++ b/internal/pkg/throttle/throttle.go @@ -0,0 +1,154 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package throttle + +import ( + "github.com/rs/zerolog/log" + "sync" + "time" +) + +type Token struct { + id uint64 + key string + throttle *Throttle +} + +type tstate struct { + id uint64 + expire time.Time +} + +type Throttle struct { + mut sync.Mutex + maxParallel int + tokenCnt uint64 + tokenMap map[string]tstate +} + +// Throttle provides two controls: +// 1) Only one Token per key at a time can be acquired. Token expires if not released by ttl. +// 2) Only max unexpired tokens acquired at any one time. + +func NewThrottle(max int) *Throttle { + return &Throttle{ + maxParallel: max, + tokenMap: make(map[string]tstate), + } +} + +func (tt *Throttle) Acquire(key string, ttl time.Duration) *Token { + var token *Token + + tt.mut.Lock() + defer tt.mut.Unlock() + + if tt.checkAtMaxPending(key) { + log.Trace(). + Str("key", key). + Int("max", tt.maxParallel). + Int("szMap", len(tt.tokenMap)). + Msg("Throttle fail acquire on max pending") + return nil + } + + // Is there already a pending request on this key? + state, ok := tt.tokenMap[key] + + // If there's nohting pending on 'key' or previous timed out create token + + now := time.Now() + if !ok || state.expire.Before(now) { + tt.tokenCnt += 1 + + token = &Token{ + id: tt.tokenCnt, + key: key, + throttle: tt, + } + + state := tstate{ + id: token.id, + expire: now.Add(ttl), + } + + tt.tokenMap[key] = state + + log.Trace(). + Str("key", key). + Uint64("token", token.id). + Time("expire", state.expire). + Msg("Throttle acquired") + + return token + } + + log.Trace(). + Str("key", key). + Msg("Throttle fail acquire on existing token") + + return token +} + +// WARNING: Assumes mutex already held +func (tt *Throttle) checkAtMaxPending(key string) bool { + + // Are we already at max parallel? + if tt.maxParallel == 0 || len(tt.tokenMap) < tt.maxParallel { + return false + } + + now := time.Now() + + // Try to eject the target key first + if state, ok := tt.tokenMap[key]; ok && state.expire.Before(now) { + delete(tt.tokenMap, key) + log.Trace(). + Str("key", key). + Msg("Ejected target token on expiration") + + return false + } + + // Scan through map looking for something to expire. + // Not very efficient, O(N), but perhaps not worth optimizing + var found bool + for skey, state := range tt.tokenMap { + if state.expire.Before(now) { + found = true + delete(tt.tokenMap, skey) + log.Trace(). + Str("key", key). + Msg("Ejected token on expiration") + break + } + } + + return !found +} + +func (tt *Throttle) release(id uint64, key string) bool { + + tt.mut.Lock() + defer tt.mut.Unlock() + + state, ok := tt.tokenMap[key] + if !ok { + log.Trace().Uint64("id", id).Str("key", key).Msg("Token not found to release") + return false + } + + if state.id == id { + log.Trace().Uint64("id", id).Str("key", key).Msg("Token released") + delete(tt.tokenMap, key) + return true + } + + return false +} + +func (t Token) Release() bool { + return t.throttle.release(t.id, t.key) +} diff --git a/internal/pkg/throttle/throttle_test.go b/internal/pkg/throttle/throttle_test.go new file mode 100644 index 000000000..d2228e475 --- /dev/null +++ b/internal/pkg/throttle/throttle_test.go @@ -0,0 +1,216 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package throttle + +import ( + "math/rand" + "strconv" + "testing" + "time" +) + +func TestThrottleZero(t *testing.T) { + + // Zero max parallel means we can acquire as many as we want, + // but still cannot acquire existing that has not timed out + throttle := NewThrottle(0) + + N := rand.Intn(2048) + 10 + + var tokens []*Token + for i := 0; i < N; i++ { + + key := strconv.Itoa(i) + + // Acquire token for key with long timeout so doesn't trip unit test + token1 := throttle.Acquire(key, time.Hour) + if token1 == nil { + t.Fatal("Acquire failed") + } + tokens = append(tokens, token1) + + // Second acquire should fail because we have not released the orginal token, + // or possibly if i == N-1 we could max parallel + token2 := throttle.Acquire(key, time.Hour) + if token2 != nil { + t.Error("Expected second acquire to fail on conflict") + } + } + + // Validate again that all tokens are blocked after allocating N + for i := 0; i < N; i++ { + + key := strconv.Itoa(i) + + // Acquire should fail because we have not released the orginal token, + token := throttle.Acquire(key, time.Hour) + if token != nil { + t.Error("Expected acquire to fail on conflict") + } + } + + for i, token := range tokens { + + found := token.Release() + if !found { + t.Error("Expect token to be found") + } + + // Second release should return false + found = token.Release() + if found { + t.Error("Expect token to not found on second release") + } + + // We should now be able to to acquire + key := strconv.Itoa(i) + + token = throttle.Acquire(key, time.Hour) + if token == nil { + t.Fatal("Acquire failed") + } + + found = token.Release() + if !found { + t.Error("Expect token to be found") + } + } +} + +func TestThrottleN(t *testing.T) { + + for N := 1; N < 11; N++ { + + throttle := NewThrottle(N) + + var tokens []*Token + for i := 0; i < N; i++ { + + key := strconv.Itoa(i) + + // Acquire token for key with long timeout so doesn't trip unit test + token1 := throttle.Acquire(key, time.Hour) + if token1 == nil { + t.Fatal("Acquire failed") + } + tokens = append(tokens, token1) + + // Second acquire should fail because we have not released the orginal token, + // or possibly if i == N-1 we could max parallel + token2 := throttle.Acquire(key, time.Hour) + if token2 != nil { + t.Error("Expected second acquire to fail on conflict") + } + } + + // Any subsequent request should fail because at max + try := rand.Intn(1000) + 1 + for i := 0; i < try; i++ { + + key := strconv.Itoa(N + i) + + token1 := throttle.Acquire(key, time.Hour) + if token1 != nil { + t.Fatal("Expect acquire to fail on max tokens") + } + } + + // Release one at a time, validate that we can reacquire + for i, token := range tokens { + + found := token.Release() + if !found { + t.Error("Expect token to be found") + } + + // Second release should return false + found = token.Release() + if found { + t.Error("Expect token to not found on second release") + } + + // We should now be able to to acquire + key := strconv.Itoa(i) + + token = throttle.Acquire(key, time.Hour) + if token == nil { + t.Fatal("Acquire failed") + } + + found = token.Release() + if !found { + t.Error("Expect token to be found") + } + } + } +} + +func TestThrottleExpireIdentity(t *testing.T) { + throttle := NewThrottle(1) + + key := "xxx" + token := throttle.Acquire(key, time.Second) + + // Should *NOT* be able to re-acquire until TTL + token2 := throttle.Acquire(key, time.Hour) + if token2 != nil { + t.Error("Expected second acquire to fail on conflict") + } + + time.Sleep(time.Second) + + // Should be able to re-acquire on expiration + token3 := throttle.Acquire(key, time.Hour) + if token3 == nil { + t.Error("Expected third aquire to succeed") + } + + // Original token should fail release + found := token.Release() + if found { + t.Error("Expected token to have expired") + } + + // However, third token should release fine + found = token3.Release() + if !found { + t.Error("Expect recently acquired token to release cleanly") + } +} + +// Test that a token from a different key is expired when at max +func TestThrottleExpireAtMax(t *testing.T) { + throttle := NewThrottle(1) + + key1 := "xxx" + token1 := throttle.Acquire(key1, time.Second) + + // Should be at max, cannot acquire different key + key2 := "yyy" + token2 := throttle.Acquire(key2, time.Hour) + if token2 != nil { + t.Error("Expected second acquire to fail on max") + } + + time.Sleep(time.Second) + + // Should be able acquire second after timeout + token2 = throttle.Acquire(key2, time.Hour) + if token2 == nil { + t.Error("Expected third aquire to succeed") + } + + // Original token should fail release + found := token1.Release() + if found { + t.Error("Expected token to have expired") + } + + // However, third token should release fine + found = token2.Release() + if !found { + t.Error("Expect recently acquired token2 to release cleanly") + } +} diff --git a/model/schema.json b/model/schema.json index 7fdccf79a..c332a0aba 100644 --- a/model/schema.json +++ b/model/schema.json @@ -1,6 +1,7 @@ { "$schema": "http://json-schema.org/draft-07/schema#", "definitions": { + "action": { "title": "Agent action", "description": "An Elastic Agent action", @@ -125,6 +126,63 @@ "version" ] }, + + "artifact": { + "title": "Artifact", + "description": "An artifact served by Fleet", + "type": "object", + "properties": { + "identifier": { + "description": "Human readable artifact identifier", + "type": "string" + }, + "compressionAlgorithm": { + "description": "Name of compression algorithm applied to artifact", + "type": "string" + }, + "encryptionAlgorithm": { + "description": "Name of encryption algorithm applied to artifact", + "type": "string" + }, + "encodedSha256": { + "description": "SHA256 of artifact after encoding has been applied", + "type": "string" + }, + "encodedSize": { + "description": "Size of artifact after encoding has been applied", + "type": "integer" + }, + "decodedSha256": { + "description": "SHA256 of artifact before encoding has been applied", + "type": "string" + }, + "decodedSize": { + "description": "Size of artifact before encoding has been applied", + "type": "integer" + }, + "created": { + "description": "Timestamp artifact was created", + "type": "string", + "format": "date-time" + }, + "body": { + "description": "Encoded artifact data", + "type": "object", + "format": "raw" + } + }, + "required": [ + "identifier", + "compressionAlgorithm", + "encodedSha256", + "encodedSize", + "decodedSha256", + "decodedSize", + "created", + "body" + ] + }, + "host-metadata": { "title": "Host Metadata", "description": "The host metadata for the Elastic Agent", From 584bcc5704ec93b96366b3100ca04cc0b23599c9 Mon Sep 17 00:00:00 2001 From: Sean Cunningham Date: Tue, 16 Mar 2021 16:47:24 -0400 Subject: [PATCH 029/240] Search/Cache artifact by ident/hash. Tolerate duplicate records. Validate hash. Kibana API uses decoded sha2 in relative URL, not the encoded sha2. (cherry picked from commit 8c6cf79fd37b328388eb39326c8feb82a1a719e7) --- cmd/fleet/auth.go | 15 ++- cmd/fleet/handleArtifacts.go | 238 +++++++++++++++++++++++------------ cmd/fleet/handleEnroll.go | 1 + cmd/fleet/router.go | 7 ++ internal/pkg/cache/cache.go | 19 +-- internal/pkg/dl/artifact.go | 39 +++--- internal/pkg/dl/constants.go | 4 +- 7 files changed, 218 insertions(+), 105 deletions(-) diff --git a/cmd/fleet/auth.go b/cmd/fleet/auth.go index 14b9e16bc..6e9752869 100644 --- a/cmd/fleet/auth.go +++ b/cmd/fleet/auth.go @@ -25,6 +25,9 @@ const ( var ErrApiKeyNotEnabled = errors.New("APIKey not enabled") var ErrAgentCorrupted = errors.New("agent record corrupted") +// This authenticates that the provided API key exists and is enabled. +// WARNING: This does not validate that the api key is valid for the Fleet Domain. +// An additional check must be executed to validate it is not a random api key. func authApiKey(r *http.Request, client *elasticsearch.Client, c cache.Cache) (*apikey.ApiKey, error) { key, err := apikey.ExtractAPIKey(r) @@ -41,16 +44,17 @@ func authApiKey(r *http.Request, client *elasticsearch.Client, c cache.Cache) (* info, err := key.Authenticate(r.Context(), client) if err != nil { - log.Error(). + log.Info(). Err(err). - Dur("tdiff", time.Since(start)). + Str("id", key.Id). + Dur("rtt", time.Since(start)). Msg("ApiKey fail authentication") return nil, err } log.Trace(). Str("id", key.Id). - Dur("tdiff", time.Since(start)). + Dur("rtt", time.Since(start)). Str("UserName", info.UserName). Strs("Roles", info.Roles). Bool("enabled", info.Enabled). @@ -61,6 +65,11 @@ func authApiKey(r *http.Request, client *elasticsearch.Client, c cache.Cache) (* c.SetApiKey(*key, kAPIKeyTTL) } else { err = ErrApiKeyNotEnabled + log.Info(). + Err(err). + Str("id", key.Id). + Dur("rtt", time.Since(start)). + Msg("ApiKey not enabled") } return key, err diff --git a/cmd/fleet/handleArtifacts.go b/cmd/fleet/handleArtifacts.go index 7fa452ba2..27a6f1917 100644 --- a/cmd/fleet/handleArtifacts.go +++ b/cmd/fleet/handleArtifacts.go @@ -5,11 +5,14 @@ package fleet import ( + "bytes" "context" + "crypto/sha256" "encoding/base64" "encoding/hex" "encoding/json" "errors" + "io" "net/http" "time" @@ -20,98 +23,151 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/throttle" "github.com/julienschmidt/httprouter" + "github.com/rs/zerolog" "github.com/rs/zerolog/log" ) const ( - defaultCacheTTL = time.Hour * 24 // TODO: configurable defaultMaxParallel = 8 // TODO: configurable + defaultCacheTTL = time.Hour * 24 // TODO: configurable defaultThrottleTTL = time.Minute // TODO: configurable ) var ( - artThrottle = throttle.NewThrottle(defaultMaxParallel) - ErrorThrottle = errors.New("Cannot acquire throttle token") - ErrorIdentifier = errors.New("Identifier mismatch") - ErrorBadSha2 = errors.New("Malformed sha256") + artThrottle = throttle.NewThrottle(defaultMaxParallel) + ErrorThrottle = errors.New("cannot acquire throttle token") + ErrorBadSha2 = errors.New("malformed sha256") + ErrorRecord = errors.New("artifact record mismatch") + ErrorMismatchSha2 = errors.New("mismatched sha256") ) func (rt Router) handleArtifacts(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { + start := time.Now() + var ( - id = ps.ByName("id") - sha2 = ps.ByName("sha2") + id = ps.ByName("id") // Identifier in the artifact record + sha2 = ps.ByName("sha2") // DecodedSha256 in the artifact record ) - err := _handleArtifacts(w, r, id, sha2, rt.ct.bulker, rt.ct.cache) + zlog := log.With(). + Str("id", id). + Str("sha2", sha2). + Str("remoteAddr", r.RemoteAddr). + Logger() + // Authenticate the APIKey; retrieve agent record. + // Note: This is going to be a bit slow even if we hit the cache on the api key. + // In order to validate that the agent still has that api key, we fetch the agent record from elastic. + agent, err := authAgent(r, "", rt.ct.bulker, rt.ct.cache) if err != nil { - - var code int - switch err { - case dl.ErrNotFound: - code = http.StatusNotFound - case ErrorThrottle: - code = http.StatusTooManyRequests - case context.Canceled: - code = http.StatusServiceUnavailable - default: - code = http.StatusBadRequest - } - - // TODO: return a 503 on elastic timeout, connection drop - - log.Debug(). + code := http.StatusUnauthorized + zlog.Info(). Err(err). - Str("sha2", sha2). - Str("id", id). Int("code", code). - Msg("Fail artifact") + Msg("Fail auth") http.Error(w, "", code) + return + } + + zlog = zlog.With(). + Str("APIKeyId", agent.AccessApiKeyId). + Str("agentId", agent.Id). + Logger() + + ah := artHandler{ + zlog: zlog, + bulker: rt.ct.bulker, + c: rt.ct.cache, } -} -func _handleArtifacts(w http.ResponseWriter, r *http.Request, id, sha2 string, bulker bulk.Bulk, c cache.Cache) error { + rdr, err := ah.handle(r.Context(), agent, id, sha2) + + var nWritten int64 + if err == nil { + nWritten, err = io.Copy(w, rdr) + zlog.Trace(). + Err(err). + Int64("nWritten", nWritten). + Dur("rtt", time.Since(start)). + Msg("Response sent") + } - // Authenticate the APIKey; retrieve agent record. - // NOTE: We are currently not using the agent record aside from logging. - // Eventually we will use the policy id in the record to authorize access to the artifact. - agent, err := authAgent(r, "", bulker, c) if err != nil { - return err + code, lvl := assessError(err) + + zlog.WithLevel(lvl). + Err(err). + Int("code", code). + Int64("nWritten", nWritten). + Dur("rtt", time.Since(start)). + Msg("Fail handle artifact") + + http.Error(w, "", code) } +} - zlog := log.With(). - Str("id", id). - Str("sha2", sha2). - Str("agent", agent.Id). - Logger() +func assessError(err error) (int, zerolog.Level) { + lvl := zerolog.DebugLevel + + // TODO: return a 503 on elastic timeout, connection drop + + var code int + switch err { + case dl.ErrNotFound: + // Artifact not found indicates a race condition upstream + // or an attack on the fleet server. Either way it should + // show up in the logs at a higher level than debug + lvl = zerolog.WarnLevel + code = http.StatusNotFound + case ErrorThrottle: + code = http.StatusTooManyRequests + case context.Canceled: + code = http.StatusServiceUnavailable + default: + code = http.StatusBadRequest + } + + return code, lvl +} + +type artHandler struct { + zlog zerolog.Logger + bulker bulk.Bulk + c cache.Cache +} + +func (ah artHandler) handle(ctx context.Context, agent *model.Agent, id, sha2 string) (io.Reader, error) { // Input validation - if err := validateSha2(sha2); err != nil { - return err + if err := validateSha2String(sha2); err != nil { + return nil, err + } + + // Determine whether the agent should have access to this artifact + if err := ah.authorizeArtifact(ctx, agent, id, sha2); err != nil { + ah.zlog.Warn().Err(err).Msg("Unauthorized GET on artifact") + return nil, err } // Grab artifact, whether from cache or elastic. - artifact, err := getArtifact(r.Context(), bulker, c, sha2) + artifact, err := ah.getArtifact(ctx, id, sha2) if err != nil { - return err + return nil, err } - // TODO: Add per policy authorization on artifacts; - // ie. limit the set of allowed artifacts by policy - - // Validate idenitifer in artifact record is same as url - if artifact.Identifier != id { - err = ErrorIdentifier - zlog.Info(). + // Sanity check; just in case something underneath is misbehaving + if artifact.Identifier != id || artifact.DecodedSha256 != sha2 { + err = ErrorRecord + ah.zlog.Info(). Err(err). Str("artifact_id", artifact.Identifier). + Str("artifact_sha2", artifact.DecodedSha256). Msg("Identifier mismatch on url") - return err + return nil, err } - zlog.Debug(). + ah.zlog.Debug(). Int("sz", len(artifact.Body)). Int64("decodedSz", artifact.DecodedSize). Str("compression", artifact.CompressionAlgorithm). @@ -120,61 +176,76 @@ func _handleArtifacts(w http.ResponseWriter, r *http.Request, id, sha2 string, b Msg("Artifact GET") // Write the payload - if _, err = w.Write(artifact.Body); err != nil { - zlog.Debug().Err(err).Msg("Fail HTTP write") - return err - } + rdr := bytes.NewReader(artifact.Body) + return rdr, nil +} - return nil +// TODO: Pull the policy record for this agent and validate that the +// requested artifact is assigned to this policy. This will prevent +// agents from retrieving artifacts that they do not have access to. +// Note that this is racy, the policy could have changed to allow an +// artifact before this instantiation of FleetServer has its local +// copy updated. Take the race conditions into consideration. +// +// Initial implementation is dependent on security by obscurity; ie. +// it should be difficult for an attacker to guess a guid. +func (ah artHandler) authorizeArtifact(ctx context.Context, agent *model.Agent, ident, sha2 string) error { + return nil // TODO } -// Return artifact from cache by sha2 or fetch directly from Elastic, updating cache. -func getArtifact(ctx context.Context, bulker bulk.Bulk, c cache.Cache, sha2 string) (*model.Artifact, error) { +// Return artifact from cache by sha2 or fetch directly from Elastic. +// Update cache on successful retrieval from Elastic. +func (ah artHandler) getArtifact(ctx context.Context, ident, sha2 string) (*model.Artifact, error) { // Check the cache; return immediately if found. - if artifact, ok := c.GetArtifact(sha2); ok { + if artifact, ok := ah.c.GetArtifact(ident, sha2); ok { return &artifact, nil } - zlog := log.With().Str("sha2", sha2).Logger() - // Fetch the artifact from elastic - art, err := fetchArtifact(ctx, bulker, sha2) + art, err := ah.fetchArtifact(ctx, ident, sha2) if err != nil { - zlog.Info().Err(err).Msg("Fail retrieve artifact") + ah.zlog.Info().Err(err).Msg("Fail retrieve artifact") return nil, err } // The 'Body' field type is Raw; extract to string. var srcPayload string if err = json.Unmarshal(art.Body, &srcPayload); err != nil { - zlog.Error().Err(err).Msg("Cannot unmarshal artifact payload") + ah.zlog.Error().Err(err).Msg("Cannot unmarshal artifact payload") return nil, err } // Artifact is stored base64 encoded in ElasticSearch. - // Base64 decode the payload before putting in cache to avoid having to decode on each cache hit. + // Base64 decode the payload before putting in cache + // to avoid having to decode on each cache hit. dstPayload, err := base64.StdEncoding.DecodeString(srcPayload) if err != nil { - zlog.Error().Err(err).Msg("Fail base64 decode artifact") + ah.zlog.Error().Err(err).Msg("Fail base64 decode artifact") + return nil, err + } + + // Validate the sha256 hash; this is just good hygiene. + if err = validateSha2Data(dstPayload, art.EncodedSha256); err != nil { + ah.zlog.Error().Err(err).Msg("Fail sha2 hash validation") return nil, err } - // Reassign decoded payload before adding to cache + // Reassign decoded payload before adding to cache, avoid base64 decode on cache hit. art.Body = dstPayload - // And cache it - c.SetArtifact(sha2, *art, defaultCacheTTL) + // Update the cache. + ah.c.SetArtifact(*art, defaultCacheTTL) return art, nil } -// Attempt to fetch the artifact down from elastic +// Attempt to fetch the artifact from Elastic // TODO: Design a mechanism to mitigate a DDOS attack on bogus hashes. // Perhaps have a cache of the most recently used hashes available, and items that aren't // in the cache can do a lookup but throttle as below. We could update the cache every 10m or so. -func fetchArtifact(ctx context.Context, bulker bulk.Bulk, sha2 string) (*model.Artifact, error) { +func (ah artHandler) fetchArtifact(ctx context.Context, ident, sha2 string) (*model.Artifact, error) { // Throttle prevents more than N outstanding requests to elastic globally and per sha2. if token := artThrottle.Acquire(sha2, defaultThrottleTTL); token == nil { return nil, ErrorThrottle @@ -183,28 +254,39 @@ func fetchArtifact(ctx context.Context, bulker bulk.Bulk, sha2 string) (*model.A } start := time.Now() - artifact, err := dl.FindArtifactBySha256(ctx, bulker, sha2) + artifact, err := dl.FindArtifact(ctx, ah.bulker, ident, sha2) - log.Debug(). + ah.zlog.Info(). Err(err). - Str("sha2", sha2). Dur("rtt", time.Since(start)). Msg("fetch artifact") return artifact, err } -func validateSha2(sha2 string) error { +func validateSha2String(sha2 string) error { if len(sha2) != 64 { - log.Info().Str("sha2", sha2).Msg("sha2 bad length") return ErrorBadSha2 } if _, err := hex.DecodeString(sha2); err != nil { - log.Info().Err(err).Str("sha2", sha2).Msg("sha2 is not hex") return ErrorBadSha2 } return nil } + +func validateSha2Data(data []byte, sha2 string) error { + src, err := hex.DecodeString(sha2) + if err != nil { + return err + } + + sum := sha256.Sum256(data) + if !bytes.Equal(sum[:], src) { + return ErrorMismatchSha2 + } + + return nil +} diff --git a/cmd/fleet/handleEnroll.go b/cmd/fleet/handleEnroll.go index 3b154c4c2..fc0e44ec9 100644 --- a/cmd/fleet/handleEnroll.go +++ b/cmd/fleet/handleEnroll.go @@ -133,6 +133,7 @@ func (et *EnrollerT) handleEnroll(r *http.Request) ([]byte, error) { return nil, err } + // Validate that an enrollment record exists for a key with this id. erec, err := et.fetchEnrollmentKeyRecord(r.Context(), key.Id) if err != nil { return nil, err diff --git a/cmd/fleet/router.go b/cmd/fleet/router.go index 018c182c1..7fb76645b 100644 --- a/cmd/fleet/router.go +++ b/cmd/fleet/router.go @@ -16,6 +16,9 @@ const ( ROUTE_CHECKIN = "/api/fleet/agents/:id/checkin" ROUTE_ACKS = "/api/fleet/agents/:id/acks" ROUTE_ARTIFACTS = "/api/fleet/artifacts/:id/:sha2" + + // Support previous relative path exposed in Kibana until all feature flags are flipped + ROUTE_ARTIFACTS_DEPRECATED = "/api/endpoint/artifacts/download/:id/:sha2" ) type Router struct { @@ -41,5 +44,9 @@ func NewRouter(bulker bulk.Bulk, ct *CheckinT, et *EnrollerT, sm policy.SelfMoni router.POST(ROUTE_CHECKIN, r.handleCheckin) router.POST(ROUTE_ACKS, r.handleAcks) router.GET(ROUTE_ARTIFACTS, r.handleArtifacts) + + // deprecated: TODO: remove + router.GET(ROUTE_ARTIFACTS_DEPRECATED, r.handleArtifacts) + return router } diff --git a/internal/pkg/cache/cache.go b/internal/pkg/cache/cache.go index dbfe7a0c3..de77941dd 100644 --- a/internal/pkg/cache/cache.go +++ b/internal/pkg/cache/cache.go @@ -5,6 +5,7 @@ package cache import ( + "fmt" "time" "github.com/dgraph-io/ristretto" @@ -140,10 +141,14 @@ func (c Cache) SetEnrollmentApiKey(id string, key model.EnrollmentApiKey, cost i Msg("EnrollmentApiKey cache SET") } -func (c Cache) GetArtifact(sha2 string) (model.Artifact, bool) { - scopedKey := "artifact:" + sha2 +func makeArtifactKey(ident, sha2 string) string { + return fmt.Sprintf("artifact:%s:%s", ident, sha2) +} + +func (c Cache) GetArtifact(ident, sha2 string) (model.Artifact, bool) { + scopedKey := makeArtifactKey(ident, sha2) if v, ok := c.cache.Get(scopedKey); ok { - log.Trace().Str("sha2", sha2).Msg("Artifact cache HIT") + log.Trace().Str("key", scopedKey).Msg("Artifact cache HIT") key, ok := v.(model.Artifact) if !ok { @@ -153,18 +158,18 @@ func (c Cache) GetArtifact(sha2 string) (model.Artifact, bool) { return key, ok } - log.Trace().Str("sha2", sha2).Msg("Artifact cache MISS") + log.Trace().Str("key", scopedKey).Msg("Artifact cache MISS") return model.Artifact{}, false } // TODO: strip body and spool to on disk cache if larger than a size threshold -func (c Cache) SetArtifact(sha2 string, artifact model.Artifact, ttl time.Duration) { - scopedKey := "artifact:" + sha2 +func (c Cache) SetArtifact(artifact model.Artifact, ttl time.Duration) { + scopedKey := makeArtifactKey(artifact.Identifier, artifact.DecodedSha256) cost := int64(len(artifact.Body)) ok := c.cache.SetWithTTL(scopedKey, artifact, cost, ttl) log.Trace(). Bool("ok", ok). - Str("sha2", sha2). + Str("key", scopedKey). Int64("cost", cost). Dur("ttl", ttl). Msg("Artifact cache SET") diff --git a/internal/pkg/dl/artifact.go b/internal/pkg/dl/artifact.go index 7ee501d74..c7e3f5090 100644 --- a/internal/pkg/dl/artifact.go +++ b/internal/pkg/dl/artifact.go @@ -7,40 +7,42 @@ package dl import ( "context" "encoding/json" - "errors" + + "github.com/rs/zerolog/log" "github.com/elastic/fleet-server/v7/internal/pkg/bulk" "github.com/elastic/fleet-server/v7/internal/pkg/dsl" "github.com/elastic/fleet-server/v7/internal/pkg/model" ) -const ( - artifactsIndexName = ".fleet-artifacts" -) - var ( - QueryArtifactBySha2 = prepareQueryArtifactBySha2() - ErrConflict = errors.New("Fail multiple artifacts for the same sha256") + QueryArtifactTmpl = prepareQueryArtifact() ) -func prepareQueryArtifactBySha2() *dsl.Tmpl { +func prepareQueryArtifact() *dsl.Tmpl { root := dsl.NewRoot() tmpl := dsl.NewTmpl() - root.Query().Bool().Filter().Term(FieldEncodedSha256, tmpl.Bind(FieldEncodedSha256), nil) + must := root.Query().Bool().Must() + must.Term(FieldDecodedSha256, tmpl.Bind(FieldDecodedSha256), nil) + must.Term(FieldIdentifier, tmpl.Bind(FieldIdentifier), nil) tmpl.MustResolve(root) return tmpl } -func FindArtifactBySha256(ctx context.Context, bulker bulk.Bulk, sha2 string) (*model.Artifact, error) { +func FindArtifact(ctx context.Context, bulker bulk.Bulk, ident, sha2 string) (*model.Artifact, error) { + + params := map[string]interface{}{ + FieldDecodedSha256: sha2, + FieldIdentifier: ident, + } - res, err := SearchWithOneParam( + res, err := Search( ctx, bulker, - QueryArtifactBySha2, - artifactsIndexName, - FieldEncodedSha256, - sha2, + QueryArtifactTmpl, + FleetArtifacts, + params, ) if err != nil { @@ -52,7 +54,12 @@ func FindArtifactBySha256(ctx context.Context, bulker bulk.Bulk, sha2 string) (* } if len(res.Hits) > 1 { - return nil, ErrConflict + log.Warn(). + Str("ident", ident). + Str("sha2", sha2). + Int("cnt", len(res.Hits)). + Str("used", res.Hits[0].Id). + Msg("Multiple HITS on artifact query. Using the first returned.") } // deserialize diff --git a/internal/pkg/dl/constants.go b/internal/pkg/dl/constants.go index 06164f821..e760de729 100644 --- a/internal/pkg/dl/constants.go +++ b/internal/pkg/dl/constants.go @@ -9,6 +9,7 @@ const ( FleetActions = ".fleet-actions" FleetActionsResults = ".fleet-actions-results" FleetAgents = ".fleet-agents" + FleetArtifacts = ".fleet-artifacts" FleetEnrollmentAPIKeys = ".fleet-enrollment-api-keys" FleetPolicies = ".fleet-policies" FleetPoliciesLeader = ".fleet-policies-leader" @@ -35,7 +36,8 @@ const ( FieldUpdatedAt = "updated_at" FieldUnenrolledAt = "unenrolled_at" - FieldEncodedSha256 = "encodedSha256" + FieldDecodedSha256 = "decodedSha256" + FieldIdentifier = "identifier" ) // Public constants From c5683431bfa690f3d47c829bf3b1bed631e567aa Mon Sep 17 00:00:00 2001 From: Sean Cunningham Date: Thu, 18 Mar 2021 12:31:58 -0400 Subject: [PATCH 030/240] Add cache configuration option. (cherry picked from commit 4c97d5396750ade202c939e6ceb4aed1aa1152de) --- cmd/fleet/main.go | 30 +++++++++++++++++++++----- cmd/fleet/server_integration_test.go | 2 +- cmd/fleet/server_test.go | 2 +- internal/pkg/cache/cache.go | 15 ++++++++----- internal/pkg/config/cache.go | 20 +++++++++++++++++ internal/pkg/config/config.go | 2 ++ internal/pkg/config/config_test.go | 16 ++++++++++++++ internal/pkg/throttle/throttle_test.go | 4 ++-- 8 files changed, 77 insertions(+), 14 deletions(-) create mode 100644 internal/pkg/config/cache.go diff --git a/cmd/fleet/main.go b/cmd/fleet/main.go index ff7b128dc..5779a6b04 100644 --- a/cmd/fleet/main.go +++ b/cmd/fleet/main.go @@ -47,13 +47,23 @@ func installSignalHandler() context.Context { return signal.HandleInterrupt(rootCtx) } +func makeCache(cfg *config.Config) (cache.Cache, error) { + + log.Info(). + Int64("numCounters", cfg.Cache.NumCounters). + Int64("maxCost", cfg.Cache.MaxCost). + Msg("makeCache") + + cacheCfg := cache.Config{ + NumCounters: cfg.Cache.NumCounters, + MaxCost: cfg.Cache.MaxCost, + } + + return cache.New(cacheCfg) +} + func getRunCommand(version string) func(cmd *cobra.Command, args []string) error { return func(cmd *cobra.Command, args []string) error { - c, err := cache.New() - if err != nil { - return err - } - cfgObject := cmd.Flags().Lookup("E").Value.(*config.Flag) cliCfg := cfgObject.Config() @@ -74,6 +84,11 @@ func getRunCommand(version string) func(cmd *cobra.Command, args []string) error return err } + c, err := makeCache(cfg) + if err != nil { + return err + } + agent, err := NewAgentMode(cliCfg, os.Stdin, c, version, l) if err != nil { return err @@ -103,6 +118,11 @@ func getRunCommand(version string) func(cmd *cobra.Command, args []string) error return err } + c, err := makeCache(cfg) + if err != nil { + return err + } + srv, err := NewFleetServer(cfg, c, version, status.NewLog()) if err != nil { return err diff --git a/cmd/fleet/server_integration_test.go b/cmd/fleet/server_integration_test.go index 639e077b8..c7e848182 100644 --- a/cmd/fleet/server_integration_test.go +++ b/cmd/fleet/server_integration_test.go @@ -63,7 +63,7 @@ func startTestServer(ctx context.Context) (*tserver, error) { return nil, err } - c, err := cache.New() + c, err := cache.New(cache.Config{NumCounters: 100, MaxCost: 100000}) if err != nil { return nil, err } diff --git a/cmd/fleet/server_test.go b/cmd/fleet/server_test.go index d015b77b8..6f20286d0 100644 --- a/cmd/fleet/server_test.go +++ b/cmd/fleet/server_test.go @@ -33,7 +33,7 @@ func TestRunServer(t *testing.T) { cfg.Host = "localhost" cfg.Port = port - c, err := cache.New() + c, err := cache.New(cache.Config{NumCounters: 100, MaxCost: 100000}) require.NoError(t, err) bulker := ftesting.MockBulk{} pim := mock.NewMockIndexMonitor() diff --git a/internal/pkg/cache/cache.go b/internal/pkg/cache/cache.go index de77941dd..4057dacc0 100644 --- a/internal/pkg/cache/cache.go +++ b/internal/pkg/cache/cache.go @@ -22,20 +22,25 @@ type Cache struct { cache *ristretto.Cache } +type Config struct { + NumCounters int64 // number of keys to track frequency of + MaxCost int64 // maximum cost of cache in 'cost' units +} + type actionCache struct { actionId string actionType string } // New creates a new cache. -func New() (Cache, error) { - cfg := &ristretto.Config{ - NumCounters: 1000000, // number of keys to track frequency of - MaxCost: 100 * 1024 * 1024, // maximum cost of cache (100MB) +func New(cfg Config) (Cache, error) { + rcfg := &ristretto.Config{ + NumCounters: cfg.NumCounters, + MaxCost: cfg.MaxCost, BufferItems: 64, } - cache, err := ristretto.NewCache(cfg) + cache, err := ristretto.NewCache(rcfg) return Cache{cache}, err } diff --git a/internal/pkg/config/cache.go b/internal/pkg/config/cache.go new file mode 100644 index 000000000..b6e1a744a --- /dev/null +++ b/internal/pkg/config/cache.go @@ -0,0 +1,20 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package config + +const ( + defaultCacheNumCounters = 500000 // 10x times expected count + defaultCacheMaxCost = 50 * 1024 * 1024 // 50MiB cache size +) + +type Cache struct { + NumCounters int64 `config:"num_counters"` + MaxCost int64 `config:"max_cost"` +} + +func (c *Cache) InitDefaults() { + c.NumCounters = defaultCacheNumCounters + c.MaxCost = defaultCacheMaxCost +} diff --git a/internal/pkg/config/config.go b/internal/pkg/config/config.go index 2f636792b..1c78a9f7a 100644 --- a/internal/pkg/config/config.go +++ b/internal/pkg/config/config.go @@ -27,6 +27,7 @@ type Config struct { Inputs []Input `config:"inputs"` Logging Logging `config:"logging"` HTTP HTTP `config:"http"` + Cache Cache `config:"cache"` } // InitDefaults initializes the defaults for the configuration. @@ -34,6 +35,7 @@ func (c *Config) InitDefaults() { c.Inputs = make([]Input, 1) c.Inputs[0].InitDefaults() c.HTTP.InitDefaults() + c.Cache.InitDefaults() } // Validate ensures that the configuration is valid. diff --git a/internal/pkg/config/config_test.go b/internal/pkg/config/config_test.go index d563a4b0b..507aa6cde 100644 --- a/internal/pkg/config/config_test.go +++ b/internal/pkg/config/config_test.go @@ -70,6 +70,10 @@ func TestConfig(t *testing.T) { Host: kDefaultHTTPHost, Port: kDefaultHTTPPort, }, + Cache: Cache{ + NumCounters: defaultCacheNumCounters, + MaxCost: defaultCacheMaxCost, + }, }, }, "fleet-logging": { @@ -122,6 +126,10 @@ func TestConfig(t *testing.T) { Host: kDefaultHTTPHost, Port: kDefaultHTTPPort, }, + Cache: Cache{ + NumCounters: defaultCacheNumCounters, + MaxCost: defaultCacheMaxCost, + }, }, }, "input": { @@ -172,6 +180,10 @@ func TestConfig(t *testing.T) { Host: kDefaultHTTPHost, Port: kDefaultHTTPPort, }, + Cache: Cache{ + NumCounters: defaultCacheNumCounters, + MaxCost: defaultCacheMaxCost, + }, }, }, "input-config": { @@ -222,6 +234,10 @@ func TestConfig(t *testing.T) { Host: kDefaultHTTPHost, Port: kDefaultHTTPPort, }, + Cache: Cache{ + NumCounters: defaultCacheNumCounters, + MaxCost: defaultCacheMaxCost, + }, }, }, "bad-input": { diff --git a/internal/pkg/throttle/throttle_test.go b/internal/pkg/throttle/throttle_test.go index d2228e475..aafbc7f23 100644 --- a/internal/pkg/throttle/throttle_test.go +++ b/internal/pkg/throttle/throttle_test.go @@ -17,7 +17,7 @@ func TestThrottleZero(t *testing.T) { // but still cannot acquire existing that has not timed out throttle := NewThrottle(0) - N := rand.Intn(2048) + 10 + N := rand.Intn(64) + 10 var tokens []*Token for i := 0; i < N; i++ { @@ -106,7 +106,7 @@ func TestThrottleN(t *testing.T) { } // Any subsequent request should fail because at max - try := rand.Intn(1000) + 1 + try := rand.Intn(64) + 1 for i := 0; i < try; i++ { key := strconv.Itoa(N + i) From 6a7c5399b65f9e27502bc76c6a3fa2bfaf8d78e3 Mon Sep 17 00:00:00 2001 From: Sean Cunningham Date: Thu, 18 Mar 2021 14:16:07 -0400 Subject: [PATCH 031/240] Support buildmode=pie on 64 bit platforms. (cherry picked from commit b92712a8451abbcea9c0139d401328d5b35617cc) --- Makefile | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index eed30f208..166a749df 100644 --- a/Makefile +++ b/Makefile @@ -3,7 +3,10 @@ DEFAULT_VERSION=$(shell awk '/const defaultVersion/{print $$NF}' main.go | tr -d TARGET_ARCH_386=x86 TARGET_ARCH_amd64=x86_64 TARGET_ARCH_arm64=arm64 -PLATFORMS ?= darwin/amd64 linux/386 linux/amd64 linux/arm64 windows/386 windows/amd64 +BUILDMODE_ARCH_386= ## ASLR either not supported or weak on 32bit machines +BUILDMODE_ARCH_amd64=-buildmode=pie +BUILDMODE_ARCH_arm64=-buildmode=pie +PLATFORMS ?= darwin/amd64 darwin/arm64 linux/386 linux/amd64 linux/arm64 windows/386 windows/amd64 ifeq ($(SNAPSHOT),true) VERSION=${DEFAULT_VERSION}-SNAPSHOT @@ -108,7 +111,8 @@ $(PLATFORM_TARGETS): release-%: $(eval $@_OS := $(firstword $(subst /, ,$(lastword $(subst release-, ,$@))))) $(eval $@_GO_ARCH := $(lastword $(subst /, ,$(lastword $(subst release-, ,$@))))) $(eval $@_ARCH := $(TARGET_ARCH_$($@_GO_ARCH))) - GOOS=$($@_OS) GOARCH=$($@_GO_ARCH) go build -ldflags="${LDFLAGS}" -o build/binaries/fleet-server-$(VERSION)-$($@_OS)-$($@_ARCH)/fleet-server . + $(eval $@_BUILDMODE:= $(BUILDMODE_ARCH_$($@_GO_ARCH))) + GOOS=$($@_OS) GOARCH=$($@_GO_ARCH) go build -ldflags="${LDFLAGS}" $($@_BUILDMODE) -o build/binaries/fleet-server-$(VERSION)-$($@_OS)-$($@_ARCH)/fleet-server . @$(MAKE) OS=$($@_OS) ARCH=$($@_ARCH) package-target .PHONY: package-target From 0d3c6c4b234cb5006a83a804b63b56756f6e49e8 Mon Sep 17 00:00:00 2001 From: Sean Cunningham Date: Mon, 22 Mar 2021 12:45:43 -0400 Subject: [PATCH 032/240] Move from camelCase to snake_case to reflect change in Kibana. (cherry picked from commit c1249dabf80ee418241472528021ce01d5624671) --- internal/pkg/dl/constants.go | 2 +- internal/pkg/es/mapping.go | 15 +++++++++------ internal/pkg/model/schema.go | 15 +++++++++------ model/schema.json | 16 ++++++++++------ 4 files changed, 29 insertions(+), 19 deletions(-) diff --git a/internal/pkg/dl/constants.go b/internal/pkg/dl/constants.go index e760de729..195df2294 100644 --- a/internal/pkg/dl/constants.go +++ b/internal/pkg/dl/constants.go @@ -36,7 +36,7 @@ const ( FieldUpdatedAt = "updated_at" FieldUnenrolledAt = "unenrolled_at" - FieldDecodedSha256 = "decodedSha256" + FieldDecodedSha256 = "decoded_sha256" FieldIdentifier = "identifier" ) diff --git a/internal/pkg/es/mapping.go b/internal/pkg/es/mapping.go index 5e27297a7..0aa36732e 100644 --- a/internal/pkg/es/mapping.go +++ b/internal/pkg/es/mapping.go @@ -178,29 +178,32 @@ const ( "enabled" : false, "type": "object" }, - "compressionAlgorithm": { + "compression_algorithm": { "type": "keyword" }, "created": { "type": "date" }, - "decodedSha256": { + "decoded_sha256": { "type": "keyword" }, - "decodedSize": { + "decoded_size": { "type": "integer" }, - "encodedSha256": { + "encoded_sha256": { "type": "keyword" }, - "encodedSize": { + "encoded_size": { "type": "integer" }, - "encryptionAlgorithm": { + "encryption_algorithm": { "type": "keyword" }, "identifier": { "type": "keyword" + }, + "package_name": { + "type": "keyword" } } }` diff --git a/internal/pkg/model/schema.go b/internal/pkg/model/schema.go index 2f870e814..be6449bfd 100644 --- a/internal/pkg/model/schema.go +++ b/internal/pkg/model/schema.go @@ -178,28 +178,31 @@ type Artifact struct { Body json.RawMessage `json:"body"` // Name of compression algorithm applied to artifact - CompressionAlgorithm string `json:"compressionAlgorithm"` + CompressionAlgorithm string `json:"compression_algorithm,omitempty"` // Timestamp artifact was created Created string `json:"created"` // SHA256 of artifact before encoding has been applied - DecodedSha256 string `json:"decodedSha256"` + DecodedSha256 string `json:"decoded_sha256,omitempty"` // Size of artifact before encoding has been applied - DecodedSize int64 `json:"decodedSize"` + DecodedSize int64 `json:"decoded_size,omitempty"` // SHA256 of artifact after encoding has been applied - EncodedSha256 string `json:"encodedSha256"` + EncodedSha256 string `json:"encoded_sha256,omitempty"` // Size of artifact after encoding has been applied - EncodedSize int64 `json:"encodedSize"` + EncodedSize int64 `json:"encoded_size,omitempty"` // Name of encryption algorithm applied to artifact - EncryptionAlgorithm string `json:"encryptionAlgorithm,omitempty"` + EncryptionAlgorithm string `json:"encryption_algorithm,omitempty"` // Human readable artifact identifier Identifier string `json:"identifier"` + + // Name of the package that owns this artifact + PackageName string `json:"package_name,omitempty"` } // Body Encoded artifact data diff --git a/model/schema.json b/model/schema.json index c332a0aba..6ecf110c0 100644 --- a/model/schema.json +++ b/model/schema.json @@ -136,27 +136,27 @@ "description": "Human readable artifact identifier", "type": "string" }, - "compressionAlgorithm": { + "compression_algorithm": { "description": "Name of compression algorithm applied to artifact", "type": "string" }, - "encryptionAlgorithm": { + "encryption_algorithm": { "description": "Name of encryption algorithm applied to artifact", "type": "string" }, - "encodedSha256": { + "encoded_sha256": { "description": "SHA256 of artifact after encoding has been applied", "type": "string" }, - "encodedSize": { + "encoded_size": { "description": "Size of artifact after encoding has been applied", "type": "integer" }, - "decodedSha256": { + "decoded_sha256": { "description": "SHA256 of artifact before encoding has been applied", "type": "string" }, - "decodedSize": { + "decoded_size": { "description": "Size of artifact before encoding has been applied", "type": "integer" }, @@ -169,6 +169,10 @@ "description": "Encoded artifact data", "type": "object", "format": "raw" + }, + "package_name": { + "description": "Name of the package that owns this artifact", + "type": "string" } }, "required": [ From 6babdb2aa214cd540d18fac65c0ad5a7614b74b5 Mon Sep 17 00:00:00 2001 From: Aleksandr Maus Date: Tue, 9 Mar 2021 18:54:33 -0500 Subject: [PATCH 033/240] Update Event payload type. (#128) (cherry picked from commit 7c4449bae782dee013ad21ff63599d088c8147bb) --- cmd/fleet/schema.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/fleet/schema.go b/cmd/fleet/schema.go index 9b8b9cdb2..926aabaf0 100644 --- a/cmd/fleet/schema.go +++ b/cmd/fleet/schema.go @@ -132,7 +132,7 @@ type Event struct { StreamId string `json:"stream_id"` Timestamp string `json:"timestamp"` Message string `json:"message"` - Payload string `json:"payload,omitempty"` + Payload json.RawMessage `json:"payload,omitempty"` StartedAt string `json:"started_at"` CompletedAt string `json:"completed_at"` ActionData json.RawMessage `json:"action_data,omitempty"` From b2c5da59c4ed4041ce310817fa831be27dcc0acd Mon Sep 17 00:00:00 2001 From: Sean Cunningham Date: Tue, 23 Mar 2021 13:51:53 +0000 Subject: [PATCH 034/240] Remove darwin/arm64 support. Remove ASLR support for darwin. Both fail on the linux build machines. This is a temporary change until we can get the cross compile resolved. (cherry picked from commit 1d5c86dfcb9e23a4afca9a177067eb77045342bc) --- Makefile | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/Makefile b/Makefile index 166a749df..19b61e4a3 100644 --- a/Makefile +++ b/Makefile @@ -3,10 +3,11 @@ DEFAULT_VERSION=$(shell awk '/const defaultVersion/{print $$NF}' main.go | tr -d TARGET_ARCH_386=x86 TARGET_ARCH_amd64=x86_64 TARGET_ARCH_arm64=arm64 -BUILDMODE_ARCH_386= ## ASLR either not supported or weak on 32bit machines -BUILDMODE_ARCH_amd64=-buildmode=pie -BUILDMODE_ARCH_arm64=-buildmode=pie -PLATFORMS ?= darwin/amd64 darwin/arm64 linux/386 linux/amd64 linux/arm64 windows/386 windows/amd64 +PLATFORMS ?= darwin/amd64 linux/386 linux/amd64 linux/arm64 windows/386 windows/amd64 +BUILDMODE_linux_amd64=-buildmode=pie +BUILDMODE_linux_arm64=-buildmode=pie +BUILDMODE_windows_386=-buildmode=pie +BUILDMODE_windows_amd64=-buildmode=pie ifeq ($(SNAPSHOT),true) VERSION=${DEFAULT_VERSION}-SNAPSHOT @@ -111,7 +112,7 @@ $(PLATFORM_TARGETS): release-%: $(eval $@_OS := $(firstword $(subst /, ,$(lastword $(subst release-, ,$@))))) $(eval $@_GO_ARCH := $(lastword $(subst /, ,$(lastword $(subst release-, ,$@))))) $(eval $@_ARCH := $(TARGET_ARCH_$($@_GO_ARCH))) - $(eval $@_BUILDMODE:= $(BUILDMODE_ARCH_$($@_GO_ARCH))) + $(eval $@_BUILDMODE:= $(BUILDMODE_$($@_OS)_$($@_GO_ARCH))) GOOS=$($@_OS) GOARCH=$($@_GO_ARCH) go build -ldflags="${LDFLAGS}" $($@_BUILDMODE) -o build/binaries/fleet-server-$(VERSION)-$($@_OS)-$($@_ARCH)/fleet-server . @$(MAKE) OS=$($@_OS) ARCH=$($@_ARCH) package-target From 9d8ffa15ffbf49f4e617a322dbbbd180373cdc32 Mon Sep 17 00:00:00 2001 From: Sean Cunningham Date: Tue, 23 Mar 2021 11:01:35 -0400 Subject: [PATCH 035/240] Upgrade go compiler to 1.16.2 to support darwin/arm64 and buildmode=pie on darwin. --- .go-version | 2 +- Makefile | 4 +++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/.go-version b/.go-version index 98e863cdf..4a02d2c31 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.15.8 +1.16.2 diff --git a/Makefile b/Makefile index 19b61e4a3..a584f92eb 100644 --- a/Makefile +++ b/Makefile @@ -3,11 +3,13 @@ DEFAULT_VERSION=$(shell awk '/const defaultVersion/{print $$NF}' main.go | tr -d TARGET_ARCH_386=x86 TARGET_ARCH_amd64=x86_64 TARGET_ARCH_arm64=arm64 -PLATFORMS ?= darwin/amd64 linux/386 linux/amd64 linux/arm64 windows/386 windows/amd64 +PLATFORMS ?= darwin/amd64 darwin/arm64 linux/386 linux/amd64 linux/arm64 windows/386 windows/amd64 BUILDMODE_linux_amd64=-buildmode=pie BUILDMODE_linux_arm64=-buildmode=pie BUILDMODE_windows_386=-buildmode=pie BUILDMODE_windows_amd64=-buildmode=pie +BUILDMODE_darwin_amd64=-buildmode=pie +BUILDMODE_darwin_amd64=-buildmode=pie ifeq ($(SNAPSHOT),true) VERSION=${DEFAULT_VERSION}-SNAPSHOT From b27e722ef8ddb5f29775feb8fb3aa1fb11b32b07 Mon Sep 17 00:00:00 2001 From: Sean Cunningham Date: Tue, 23 Mar 2021 15:49:20 +0000 Subject: [PATCH 036/240] Fix incorrect buildmode for arm64/darwin --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index a584f92eb..3601e568e 100644 --- a/Makefile +++ b/Makefile @@ -9,7 +9,7 @@ BUILDMODE_linux_arm64=-buildmode=pie BUILDMODE_windows_386=-buildmode=pie BUILDMODE_windows_amd64=-buildmode=pie BUILDMODE_darwin_amd64=-buildmode=pie -BUILDMODE_darwin_amd64=-buildmode=pie +BUILDMODE_darwin_arm64=-buildmode=pie ifeq ($(SNAPSHOT),true) VERSION=${DEFAULT_VERSION}-SNAPSHOT From 4d27208e50907c9c94101cb8ee8e0937d4e5f77c Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 23 Mar 2021 15:11:28 -0400 Subject: [PATCH 037/240] Fix configuration for cache and server profiler off by default (#162) (#165) * Adjust cache config to be under the input. Disable server profiler by default. * Fix tests and fmt. (cherry picked from commit 68e75a70e88ff9fe1c6f454afdcc3b142202c0b0) Co-authored-by: Blake Rouse --- cmd/fleet/main.go | 19 ++++++----- fleet-server.yml | 8 +++-- internal/pkg/config/config.go | 2 -- internal/pkg/config/config_test.go | 52 ++++++++++++++++++------------ internal/pkg/config/input.go | 6 +++- 5 files changed, 54 insertions(+), 33 deletions(-) diff --git a/cmd/fleet/main.go b/cmd/fleet/main.go index 5779a6b04..c029b4b1a 100644 --- a/cmd/fleet/main.go +++ b/cmd/fleet/main.go @@ -50,13 +50,13 @@ func installSignalHandler() context.Context { func makeCache(cfg *config.Config) (cache.Cache, error) { log.Info(). - Int64("numCounters", cfg.Cache.NumCounters). - Int64("maxCost", cfg.Cache.MaxCost). + Int64("numCounters", cfg.Inputs[0].Cache.NumCounters). + Int64("maxCost", cfg.Inputs[0].Cache.MaxCost). Msg("makeCache") cacheCfg := cache.Config{ - NumCounters: cfg.Cache.NumCounters, - MaxCost: cfg.Cache.MaxCost, + NumCounters: cfg.Inputs[0].Cache.NumCounters, + MaxCost: cfg.Inputs[0].Cache.MaxCost, } return cache.New(cacheCfg) @@ -422,11 +422,14 @@ func (f *FleetServer) Run(ctx context.Context) error { } // Restart profiler - if curCfg == nil || curCfg.Inputs[0].Server.Profile.Bind != newCfg.Inputs[0].Server.Profile.Bind { + if curCfg == nil || curCfg.Inputs[0].Server.Profile.Enabled != newCfg.Inputs[0].Server.Profile.Enabled || curCfg.Inputs[0].Server.Profile.Bind != newCfg.Inputs[0].Server.Profile.Bind { stop(proCancel, proEg) - proEg, proCancel = start(ctx, func(ctx context.Context) error { - return profile.RunProfiler(ctx, newCfg.Inputs[0].Server.Profile.Bind) - }, ech) + proEg, proCancel = nil, nil + if newCfg.Inputs[0].Server.Profile.Enabled { + proEg, proCancel = start(ctx, func(ctx context.Context) error { + return profile.RunProfiler(ctx, newCfg.Inputs[0].Server.Profile.Bind) + }, ech) + } } // Restart server diff --git a/fleet-server.yml b/fleet-server.yml index f27763092..830d3cf97 100644 --- a/fleet-server.yml +++ b/fleet-server.yml @@ -12,9 +12,13 @@ fleet: # Input config provided by the Elastic Agent for the server #inputs: -# - type: -# policy: +# - type: fleet-server # server: +# host: localhost +# port: 8220 +# cache: +# num_counters: 500000 # 10x times expected count +# max_cost: 50 * 1024 * 1024 # 50MiB cache size logging: to_stderr: true diff --git a/internal/pkg/config/config.go b/internal/pkg/config/config.go index 1c78a9f7a..2f636792b 100644 --- a/internal/pkg/config/config.go +++ b/internal/pkg/config/config.go @@ -27,7 +27,6 @@ type Config struct { Inputs []Input `config:"inputs"` Logging Logging `config:"logging"` HTTP HTTP `config:"http"` - Cache Cache `config:"cache"` } // InitDefaults initializes the defaults for the configuration. @@ -35,7 +34,6 @@ func (c *Config) InitDefaults() { c.Inputs = make([]Input, 1) c.Inputs[0].InitDefaults() c.HTTP.InitDefaults() - c.Cache.InitDefaults() } // Validate ensures that the configuration is valid. diff --git a/internal/pkg/config/config_test.go b/internal/pkg/config/config_test.go index 507aa6cde..fe9ec7215 100644 --- a/internal/pkg/config/config_test.go +++ b/internal/pkg/config/config_test.go @@ -56,7 +56,14 @@ func TestConfig(t *testing.T) { MaxEnrollPending: 64, RateLimitBurst: 1024, RateLimitInterval: 5 * time.Millisecond, - Profile: ServerProfile{Bind: "localhost:6060"}, + Profile: ServerProfile{ + Enabled: false, + Bind: "localhost:6060", + }, + }, + Cache: Cache{ + NumCounters: defaultCacheNumCounters, + MaxCost: defaultCacheMaxCost, }, }, }, @@ -70,10 +77,6 @@ func TestConfig(t *testing.T) { Host: kDefaultHTTPHost, Port: kDefaultHTTPPort, }, - Cache: Cache{ - NumCounters: defaultCacheNumCounters, - MaxCost: defaultCacheMaxCost, - }, }, }, "fleet-logging": { @@ -112,7 +115,14 @@ func TestConfig(t *testing.T) { MaxEnrollPending: 64, RateLimitBurst: 1024, RateLimitInterval: 5 * time.Millisecond, - Profile: ServerProfile{Bind: "localhost:6060"}, + Profile: ServerProfile{ + Enabled: false, + Bind: "localhost:6060", + }, + }, + Cache: Cache{ + NumCounters: defaultCacheNumCounters, + MaxCost: defaultCacheMaxCost, }, }, }, @@ -126,10 +136,6 @@ func TestConfig(t *testing.T) { Host: kDefaultHTTPHost, Port: kDefaultHTTPPort, }, - Cache: Cache{ - NumCounters: defaultCacheNumCounters, - MaxCost: defaultCacheMaxCost, - }, }, }, "input": { @@ -166,7 +172,14 @@ func TestConfig(t *testing.T) { MaxEnrollPending: 64, RateLimitBurst: 1024, RateLimitInterval: 5 * time.Millisecond, - Profile: ServerProfile{Bind: "localhost:6060"}, + Profile: ServerProfile{ + Enabled: false, + Bind: "localhost:6060", + }, + }, + Cache: Cache{ + NumCounters: defaultCacheNumCounters, + MaxCost: defaultCacheMaxCost, }, }, }, @@ -180,10 +193,6 @@ func TestConfig(t *testing.T) { Host: kDefaultHTTPHost, Port: kDefaultHTTPPort, }, - Cache: Cache{ - NumCounters: defaultCacheNumCounters, - MaxCost: defaultCacheMaxCost, - }, }, }, "input-config": { @@ -220,7 +229,14 @@ func TestConfig(t *testing.T) { MaxEnrollPending: 64, RateLimitBurst: 1024, RateLimitInterval: 5 * time.Millisecond, - Profile: ServerProfile{Bind: "localhost:6060"}, + Profile: ServerProfile{ + Enabled: false, + Bind: "localhost:6060", + }, + }, + Cache: Cache{ + NumCounters: defaultCacheNumCounters, + MaxCost: defaultCacheMaxCost, }, }, }, @@ -234,10 +250,6 @@ func TestConfig(t *testing.T) { Host: kDefaultHTTPHost, Port: kDefaultHTTPPort, }, - Cache: Cache{ - NumCounters: defaultCacheNumCounters, - MaxCost: defaultCacheMaxCost, - }, }, }, "bad-input": { diff --git a/internal/pkg/config/input.go b/internal/pkg/config/input.go index 6588bfdb3..f9b2577b2 100644 --- a/internal/pkg/config/input.go +++ b/internal/pkg/config/input.go @@ -34,11 +34,13 @@ func (c *ServerTimeouts) InitDefaults() { // ServerProfile is the configuration for profiling the server. type ServerProfile struct { - Bind string `config:"bind"` + Enabled bool `config:"enabled"` + Bind string `config:"bind"` } // InitDefaults initializes the defaults for the configuration. func (c *ServerProfile) InitDefaults() { + c.Enabled = false c.Bind = "localhost:6060" } @@ -89,12 +91,14 @@ type Input struct { Type string `config:"type"` Policy Policy `config:"policy"` Server Server `config:"server"` + Cache Cache `config:"cache"` } // InitDefaults initializes the defaults for the configuration. func (c *Input) InitDefaults() { c.Type = "fleet-server" c.Server.InitDefaults() + c.Cache.InitDefaults() } // Validate ensures that the configuration is valid. From f1edcf74fc87eca2a23a2c3611b3a69b9226b1df Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Mon, 29 Mar 2021 16:23:04 -0400 Subject: [PATCH 038/240] Fix the handling of the upgrade ack. (#177) (#179) (cherry picked from commit 2a69fe47abf2b20f1186ad2db77804aab2efcf01) Co-authored-by: Blake Rouse --- cmd/fleet/handleAck.go | 38 ++++++++++++++++++++++++++++++------ cmd/fleet/schema.go | 1 + internal/pkg/dl/constants.go | 8 +++++--- 3 files changed, 38 insertions(+), 9 deletions(-) diff --git a/cmd/fleet/handleAck.go b/cmd/fleet/handleAck.go index 8ce7e4b7e..6a1aaeb7f 100644 --- a/cmd/fleet/handleAck.go +++ b/cmd/fleet/handleAck.go @@ -42,7 +42,6 @@ func (rt Router) handleAcks(w http.ResponseWriter, r *http.Request, ps httproute } } -// TODO: Handle UPGRADE func _handleAcks(w http.ResponseWriter, r *http.Request, id string, bulker bulk.Bulk, c cache.Cache) error { agent, err := authAgent(r, id, bulker, c) if err != nil { @@ -65,7 +64,6 @@ func _handleAcks(w http.ResponseWriter, r *http.Request, id string, bulker bulk. return err } - // TODO: flesh this out resp := AckResponse{"acks"} data, err := json.Marshal(&resp) @@ -121,8 +119,14 @@ func _handleAckEvents(ctx context.Context, agent *model.Agent, events []Event, b return err } - if ev.Error == "" && action.Type == TypeUnenroll { - unenroll = true + if ev.Error == "" { + if action.Type == TypeUnenroll { + unenroll = true + } else if action.Type == TypeUpgrade { + if err := _handleUpgrade(ctx, bulker, agent); err != nil { + return err + } + } } } @@ -138,8 +142,6 @@ func _handleAckEvents(ctx context.Context, agent *model.Agent, events []Event, b } } - // TODO: handle UPGRADE - return nil } @@ -223,6 +225,30 @@ func _handleUnenroll(ctx context.Context, bulker bulk.Bulk, agent *model.Agent) return bulker.MUpdate(ctx, updates, bulk.WithRefresh()) } +func _handleUpgrade(ctx context.Context, bulker bulk.Bulk, agent *model.Agent) error { + updates := make([]bulk.BulkOp, 0, 1) + now := time.Now().UTC().Format(time.RFC3339) + fields := map[string]interface{}{ + dl.FieldUpgradedAt: now, + dl.FieldUpgradeStartedAt: nil, + } + + source, err := json.Marshal(map[string]interface{}{ + "doc": fields, + }) + if err != nil { + return err + } + + updates = append(updates, bulk.BulkOp{ + Id: agent.Id, + Body: source, + Index: dl.FleetAgents, + }) + + return bulker.MUpdate(ctx, updates, bulk.WithRefresh()) +} + func _getAPIKeyIDs(agent *model.Agent) []string { keys := make([]string, 0, 1) if agent.AccessApiKeyId != "" { diff --git a/cmd/fleet/schema.go b/cmd/fleet/schema.go index 926aabaf0..4262f5d51 100644 --- a/cmd/fleet/schema.go +++ b/cmd/fleet/schema.go @@ -15,6 +15,7 @@ const ( const ( TypePolicyChange = "POLICY_CHANGE" TypeUnenroll = "UNENROLL" + TypeUpgrade = "UPGRADE" ) const ( diff --git a/internal/pkg/dl/constants.go b/internal/pkg/dl/constants.go index 195df2294..8be370690 100644 --- a/internal/pkg/dl/constants.go +++ b/internal/pkg/dl/constants.go @@ -32,9 +32,11 @@ const ( FieldPolicyRevisionIdx = "policy_revision_idx" FieldPolicyCoordinatorIdx = "policy_coordinator_idx" - FieldActive = "active" - FieldUpdatedAt = "updated_at" - FieldUnenrolledAt = "unenrolled_at" + FieldActive = "active" + FieldUpdatedAt = "updated_at" + FieldUnenrolledAt = "unenrolled_at" + FieldUpgradedAt = "upgraded_at" + FieldUpgradeStartedAt = "upgrade_started_at" FieldDecodedSha256 = "decoded_sha256" FieldIdentifier = "identifier" From 0b802d653406ee1dbc6b22f063a3ef06993371d8 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Mon, 29 Mar 2021 17:51:36 -0400 Subject: [PATCH 039/240] Remove the left over systemd and rpm build items. (#170) (#181) (cherry picked from commit b4e8ba965f1ca931a317484841b9ec8630f5b116) Co-authored-by: Blake Rouse --- dev-tools/rpm/nfpm.yaml | 24 ------------------------ systemd/fleet.service | 13 ------------- 2 files changed, 37 deletions(-) delete mode 100644 dev-tools/rpm/nfpm.yaml delete mode 100644 systemd/fleet.service diff --git a/dev-tools/rpm/nfpm.yaml b/dev-tools/rpm/nfpm.yaml deleted file mode 100644 index dc9fb62eb..000000000 --- a/dev-tools/rpm/nfpm.yaml +++ /dev/null @@ -1,24 +0,0 @@ -# nfpm example config file -# env APP_RELEASE=11.1 APP_VERSION=1.2.3 ../nfpm/nfpm -f ./build/rpm/nfpm.yaml pkg --target evmon.rpm -name: "fleet" -arch: "x86_64" -platform: "linux" -version: "v${APP_VERSION}" -release: "${APP_RELEASE}" -section: "default" -maintainer: "devops@endgame.com" -description: Elastic Fleet -vendor: "Elastic NV" -homepage: "http://www.elastic.co/" -contents: - - src: ./bin/fleet - dst: /usr/bin/fleet - - - src: ./systemd/fleet.service - dst: /usr/lib/systemd/system/fleet.service - type: config - - - src: ./fleet-server.yml - dst: /usr/share/fleet/fleet-server.yml - type: config - diff --git a/systemd/fleet.service b/systemd/fleet.service deleted file mode 100644 index 9c64765f7..000000000 --- a/systemd/fleet.service +++ /dev/null @@ -1,13 +0,0 @@ -[Unit] -Description=Elastic Fleet Daemon -After=network.target -Requires=network.target - -[Service] -ExecStart=/usr/bin/fleet-server -c /usr/share/fleet/fleet-server.yml -Type=simple -Restart=always -RestartSec=3 -StartLimitInterval=40 -StartLimitBurst=10 -LimitNOFILE=999999 From b84dfc627232f9f59736c726ef2625c35c122393 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 30 Mar 2021 09:19:23 +0200 Subject: [PATCH 040/240] Fix issue with rolling out policies from the coordinator. (#180) (#183) * Fix issue with rolling out policies from the coordinator. * Simplify branch logic. (cherry picked from commit 548ef5e9a690c622ac690e586a6f865fed01f82f) Co-authored-by: Blake Rouse --- internal/pkg/coordinator/v0.go | 38 ++++++++++++++++++++-------------- internal/pkg/dl/policies.go | 2 +- internal/pkg/dsl/term.go | 6 ++++-- 3 files changed, 28 insertions(+), 18 deletions(-) diff --git a/internal/pkg/coordinator/v0.go b/internal/pkg/coordinator/v0.go index 57ecabeea..5c8da5837 100644 --- a/internal/pkg/coordinator/v0.go +++ b/internal/pkg/coordinator/v0.go @@ -28,7 +28,7 @@ func NewCoordinatorZero(policy model.Policy) (Coordinator, error) { return &coordinatorZeroT{ log: log.With().Str("ctx", "coordinator v0").Str("policyId", policy.PolicyId).Logger(), policy: policy, - in: make(chan model.Policy, 1), + in: make(chan model.Policy), out: make(chan model.Policy), }, nil } @@ -40,26 +40,19 @@ func (c *coordinatorZeroT) Name() string { // Run runs the coordinator for the policy. func (c *coordinatorZeroT) Run(ctx context.Context) error { - c.in <- c.policy + err := c.updatePolicy(c.policy) + if err != nil { + c.log.Err(err).Msg("failed to handle policy") + } + for { select { case p := <-c.in: - newData, err := c.handlePolicy(p.Data) + err = c.updatePolicy(p) if err != nil { c.log.Err(err).Msg("failed to handle policy") continue } - if p.CoordinatorIdx == 0 { - p.CoordinatorIdx = 1 - p.Data = newData - c.policy = p - c.out <- p - } else if string(newData) != string(p.Data) { - p.CoordinatorIdx += 1 - p.Data = newData - c.policy = p - c.out <- p - } case <-ctx.Done(): return ctx.Err() } @@ -77,7 +70,22 @@ func (c *coordinatorZeroT) Output() <-chan model.Policy { return c.out } -// handlePolicy handles the new policy. +// updatePolicy performs the working of incrementing the coordinator idx. +func (c *coordinatorZeroT) updatePolicy(p model.Policy) error { + newData, err := c.handlePolicy(p.Data) + if err != nil { + return err + } + if p.CoordinatorIdx == 0 || string(newData) != string(p.Data) { + p.CoordinatorIdx += 1 + p.Data = newData + c.policy = p + c.out <- p + } + return nil +} + +// handlePolicy performs the actual work of coordination. // // Does nothing at the moment. func (c *coordinatorZeroT) handlePolicy(data json.RawMessage) (json.RawMessage, error) { diff --git a/internal/pkg/dl/policies.go b/internal/pkg/dl/policies.go index f5277e731..78817c87b 100644 --- a/internal/pkg/dl/policies.go +++ b/internal/pkg/dl/policies.go @@ -27,7 +27,7 @@ func prepareQueryLatestPolicies() []byte { root := dsl.NewRoot() root.Size(0) policyId := root.Aggs().Agg(FieldPolicyId) - policyId.Terms("field", FieldPolicyId, nil) + policyId.Terms("field", FieldPolicyId, nil).Size(10000) revisionIdx := policyId.Aggs().Agg(FieldRevisionIdx).TopHits() revisionIdx.Size(1) rSort := revisionIdx.Sort() diff --git a/internal/pkg/dsl/term.go b/internal/pkg/dsl/term.go index 8a13b80e5..7da88b08a 100644 --- a/internal/pkg/dsl/term.go +++ b/internal/pkg/dsl/term.go @@ -4,7 +4,7 @@ package dsl -func (n *Node) Term(field string, value interface{}, boost *float64) { +func (n *Node) Term(field string, value interface{}, boost *float64) *Node { childNode := n.appendOrSetChildNode(kKeywordTerm) leaf := value @@ -22,9 +22,10 @@ func (n *Node) Term(field string, value interface{}, boost *float64) { childNode.nodeMap = nodeMapT{field: &Node{ leaf: leaf, }} + return childNode } -func (n *Node) Terms(field string, value interface{}, boost *float64) { +func (n *Node) Terms(field string, value interface{}, boost *float64) *Node { childNode := n.appendOrSetChildNode(kKeywordTerms) childNode.nodeMap = nodeMapT{ @@ -34,4 +35,5 @@ func (n *Node) Terms(field string, value interface{}, boost *float64) { if boost != nil { childNode.nodeMap[kKeywordBoost] = &Node{leaf: *boost} } + return childNode } From 338f15b0144aa9d09217bd9acab7055740b45d00 Mon Sep 17 00:00:00 2001 From: Aleksandr Maus Date: Tue, 30 Mar 2021 14:22:43 -0400 Subject: [PATCH 041/240] Increase the monitor throughput: increase the default fetch size, make it configurable. (#171) (#188) * Increase the monitor throughput: increase the default fetch size, make it configurable. * Address the code review feedback (cherry picked from commit e475479249adb1027df72aa017e38e7cfbc01f58) --- cmd/fleet/main.go | 5 ++--- internal/pkg/config/config_test.go | 12 ++++++++++++ internal/pkg/config/input.go | 10 ++++++---- internal/pkg/config/monitor.go | 17 +++++++++++++++++ internal/pkg/monitor/monitor.go | 20 ++++++++++++++++++-- 5 files changed, 55 insertions(+), 9 deletions(-) create mode 100644 internal/pkg/config/monitor.go diff --git a/cmd/fleet/main.go b/cmd/fleet/main.go index c029b4b1a..0d63aa968 100644 --- a/cmd/fleet/main.go +++ b/cmd/fleet/main.go @@ -496,7 +496,7 @@ func (f *FleetServer) runServer(ctx context.Context, cfg *config.Config) (err er g, ctx := errgroup.WithContext(ctx) // Coordinator policy monitor - pim, err := monitor.New(dl.FleetPolicies, es) + pim, err := monitor.New(dl.FleetPolicies, es, monitor.WithFetchSize(cfg.Inputs[0].Monitor.FetchSize)) if err != nil { return err } @@ -518,8 +518,7 @@ func (f *FleetServer) runServer(ctx context.Context, cfg *config.Config) (err er var ad *action.Dispatcher var tr *action.TokenResolver - // Behind the feature flag - am, err = monitor.NewSimple(dl.FleetActions, es, monitor.WithExpiration(true)) + am, err = monitor.NewSimple(dl.FleetActions, es, monitor.WithExpiration(true), monitor.WithFetchSize(cfg.Inputs[0].Monitor.FetchSize)) if err != nil { return err } diff --git a/internal/pkg/config/config_test.go b/internal/pkg/config/config_test.go index fe9ec7215..c953a6a77 100644 --- a/internal/pkg/config/config_test.go +++ b/internal/pkg/config/config_test.go @@ -65,6 +65,9 @@ func TestConfig(t *testing.T) { NumCounters: defaultCacheNumCounters, MaxCost: defaultCacheMaxCost, }, + Monitor: Monitor{ + FetchSize: defaultFetchSize, + }, }, }, Logging: Logging{ @@ -124,6 +127,9 @@ func TestConfig(t *testing.T) { NumCounters: defaultCacheNumCounters, MaxCost: defaultCacheMaxCost, }, + Monitor: Monitor{ + FetchSize: defaultFetchSize, + }, }, }, Logging: Logging{ @@ -181,6 +187,9 @@ func TestConfig(t *testing.T) { NumCounters: defaultCacheNumCounters, MaxCost: defaultCacheMaxCost, }, + Monitor: Monitor{ + FetchSize: defaultFetchSize, + }, }, }, Logging: Logging{ @@ -238,6 +247,9 @@ func TestConfig(t *testing.T) { NumCounters: defaultCacheNumCounters, MaxCost: defaultCacheMaxCost, }, + Monitor: Monitor{ + FetchSize: defaultFetchSize, + }, }, }, Logging: Logging{ diff --git a/internal/pkg/config/input.go b/internal/pkg/config/input.go index f9b2577b2..e24047fcd 100644 --- a/internal/pkg/config/input.go +++ b/internal/pkg/config/input.go @@ -88,10 +88,11 @@ func (c *Server) BindAddress() string { // Input is the input defined by Agent to run Fleet Server. type Input struct { - Type string `config:"type"` - Policy Policy `config:"policy"` - Server Server `config:"server"` - Cache Cache `config:"cache"` + Type string `config:"type"` + Policy Policy `config:"policy"` + Server Server `config:"server"` + Cache Cache `config:"cache"` + Monitor Monitor `config:"monitor"` } // InitDefaults initializes the defaults for the configuration. @@ -99,6 +100,7 @@ func (c *Input) InitDefaults() { c.Type = "fleet-server" c.Server.InitDefaults() c.Cache.InitDefaults() + c.Monitor.InitDefaults() } // Validate ensures that the configuration is valid. diff --git a/internal/pkg/config/monitor.go b/internal/pkg/config/monitor.go new file mode 100644 index 000000000..1d3f8a31d --- /dev/null +++ b/internal/pkg/config/monitor.go @@ -0,0 +1,17 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package config + +const ( + defaultFetchSize = 1000 +) + +type Monitor struct { + FetchSize int `config:"fetch_size"` +} + +func (m *Monitor) InitDefaults() { + m.FetchSize = defaultFetchSize +} diff --git a/internal/pkg/monitor/monitor.go b/internal/pkg/monitor/monitor.go index a9c726899..8d4812bf8 100644 --- a/internal/pkg/monitor/monitor.go +++ b/internal/pkg/monitor/monitor.go @@ -25,9 +25,16 @@ const ( defaultCheckInterval = 1 * time.Second // check every second for the new action defaultSeqNo = int64(-1) // the _seq_no in elasticsearch start with 0 defaultWithExpiration = false - defaultFetchSize = 10 - tightLoopCheckInterval = 50 * time.Millisecond // when we get a full page (fetchSize) of documents, use this interval to repeatedly poll for more records + // Making the default fetch size larger, in order to increase the throughput of the monitor. + // This is configurable as well, so can be adjusted based on the memory size of the container if needed. + // Seems like the usage of smaller actions, one or few agents in the action document would be more prevalent in the future. + // For example, as of now the current size of osquery action JSON document for 1000 agents is 40KB. + // Assuiming the worst case scenario of 1000 of document fetched, we are looking at 50MB slice. + // One action can be split up into multiple documents up to the 1000 agents per action if needed. + defaultFetchSize = 1000 + + tightLoopCheckInterval = 10 * time.Millisecond // when we get a full page (fetchSize) of documents, use this interval to repeatedly poll for more records ) const ( @@ -130,6 +137,15 @@ func NewSimple(index string, cli *elasticsearch.Client, opts ...Option) (SimpleM return m, nil } +// WithCheckInterval sets a periodic check interval +func WithFetchSize(fetchSize int) Option { + return func(m SimpleMonitor) { + if fetchSize > 0 { + m.(*simpleMonitorT).fetchSize = fetchSize + } + } +} + // WithCheckInterval sets a periodic check interval func WithCheckInterval(interval time.Duration) Option { return func(m SimpleMonitor) { From 066cbb96cfbb115d999c2ac78134098d599509c6 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 30 Mar 2021 18:28:42 +0000 Subject: [PATCH 042/240] Increase the monitor throughput: increase the default fetch size, make it configurable. (#171) (#189) * Increase the monitor throughput: increase the default fetch size, make it configurable. * Address the code review feedback (cherry picked from commit e475479249adb1027df72aa017e38e7cfbc01f58) Co-authored-by: Aleksandr Maus Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> From f6a9db9e78f4543fbd63580d1e77690935d3a587 Mon Sep 17 00:00:00 2001 From: Aleksandr Maus Date: Tue, 30 Mar 2021 14:33:59 -0400 Subject: [PATCH 043/240] Change the agent.action_seq_no storage format from number to an array (#174) (#190) (cherry picked from commit fbca5f9482925bf830d87f5c3e6244f34bc9fd1e) Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- NOTICE.txt | 4 ++-- cmd/fleet/bulkCheckin.go | 7 ++++--- cmd/fleet/handleCheckin.go | 9 +++++---- cmd/fleet/handleEnroll.go | 3 ++- go.mod | 2 +- go.sum | 4 ++-- internal/pkg/action/dispatcher.go | 5 +++-- internal/pkg/dl/constants.go | 9 +++------ internal/pkg/model/schema.go | 2 +- internal/pkg/sqn/sqn.go | 33 +++++++++++++++++++++++++++++++ model/schema.json | 5 ++++- 11 files changed, 60 insertions(+), 23 deletions(-) create mode 100644 internal/pkg/sqn/sqn.go diff --git a/NOTICE.txt b/NOTICE.txt index 03f71096c..db2e72769 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -11,11 +11,11 @@ Third party libraries used by the Elastic Beats project: -------------------------------------------------------------------------------- Dependency : github.com/aleksmaus/generate -Version: v0.0.0-20201213151810-c5bc68a6a42f +Version: v0.0.0-20210326194607-c630e07a2742 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/aleksmaus/generate@v0.0.0-20201213151810-c5bc68a6a42f/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/aleksmaus/generate@v0.0.0-20210326194607-c630e07a2742/LICENSE.txt: MIT License diff --git a/cmd/fleet/bulkCheckin.go b/cmd/fleet/bulkCheckin.go index bb508c917..eeace0549 100644 --- a/cmd/fleet/bulkCheckin.go +++ b/cmd/fleet/bulkCheckin.go @@ -12,6 +12,7 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/bulk" "github.com/elastic/fleet-server/v7/internal/pkg/dl" + "github.com/elastic/fleet-server/v7/internal/pkg/sqn" "github.com/rs/zerolog/log" ) @@ -22,7 +23,7 @@ const kBulkCheckinFlushInterval = 10 * time.Second type PendingData struct { fields Fields - seqNo int64 + seqNo sqn.SeqNo } type BulkCheckin struct { @@ -38,7 +39,7 @@ func NewBulkCheckin(bulker bulk.Bulk) *BulkCheckin { } } -func (bc *BulkCheckin) CheckIn(id string, fields Fields, seqno int64) error { +func (bc *BulkCheckin) CheckIn(id string, fields Fields, seqno sqn.SeqNo) error { if fields == nil { fields = make(Fields) @@ -93,7 +94,7 @@ func (bc *BulkCheckin) flush(ctx context.Context) error { for id, pendingData := range pending { doc := pendingData.fields doc[dl.FieldUpdatedAt] = time.Now().UTC().Format(time.RFC3339) - if pendingData.seqNo >= 0 { + if pendingData.seqNo.IsSet() { doc[dl.FieldActionSeqNo] = pendingData.seqNo } diff --git a/cmd/fleet/handleCheckin.go b/cmd/fleet/handleCheckin.go index ae4b764d6..bd6132e40 100644 --- a/cmd/fleet/handleCheckin.go +++ b/cmd/fleet/handleCheckin.go @@ -21,6 +21,7 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/model" "github.com/elastic/fleet-server/v7/internal/pkg/monitor" "github.com/elastic/fleet-server/v7/internal/pkg/policy" + "github.com/elastic/fleet-server/v7/internal/pkg/sqn" "github.com/julienschmidt/httprouter" "github.com/rs/zerolog/log" @@ -200,7 +201,7 @@ func (ct *CheckinT) _handleCheckin(w http.ResponseWriter, r *http.Request, id st } // Resolve AckToken from request, fallback on the agent record -func (ct *CheckinT) resolveSeqNo(ctx context.Context, req CheckinRequest, agent *model.Agent) (seqno int64, err error) { +func (ct *CheckinT) resolveSeqNo(ctx context.Context, req CheckinRequest, agent *model.Agent) (seqno sqn.SeqNo, err error) { // Resolve AckToken from request, fallback on the agent record ackToken := req.AckToken seqno = agent.ActionSeqNo @@ -216,16 +217,16 @@ func (ct *CheckinT) resolveSeqNo(ctx context.Context, req CheckinRequest, agent return } } - seqno = sn + seqno = []int64{sn} } return seqno, nil } -func (ct *CheckinT) fetchAgentPendingActions(ctx context.Context, seqno int64, agentId string) ([]model.Action, error) { +func (ct *CheckinT) fetchAgentPendingActions(ctx context.Context, seqno sqn.SeqNo, agentId string) ([]model.Action, error) { now := time.Now().UTC().Format(time.RFC3339) return dl.FindActions(ctx, ct.bulker, dl.QueryAgentActions, map[string]interface{}{ - dl.FieldSeqNo: seqno, + dl.FieldSeqNo: seqno.Get(0), dl.FieldMaxSeqNo: ct.gcp.GetCheckpoint(), dl.FieldExpiration: now, dl.FieldAgents: []string{agentId}, diff --git a/cmd/fleet/handleEnroll.go b/cmd/fleet/handleEnroll.go index fc0e44ec9..c733e409a 100644 --- a/cmd/fleet/handleEnroll.go +++ b/cmd/fleet/handleEnroll.go @@ -19,6 +19,7 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/config" "github.com/elastic/fleet-server/v7/internal/pkg/dl" "github.com/elastic/fleet-server/v7/internal/pkg/model" + "github.com/elastic/fleet-server/v7/internal/pkg/sqn" "github.com/elastic/go-elasticsearch/v8" "github.com/gofrs/uuid" @@ -206,7 +207,7 @@ func _enroll(ctx context.Context, bulker bulk.Bulk, c cache.Cache, req EnrollReq AccessApiKeyId: accessApiKey.Id, DefaultApiKeyId: defaultOutputApiKey.Id, DefaultApiKey: defaultOutputApiKey.Agent(), - ActionSeqNo: dl.UndefinedSeqNo, + ActionSeqNo: []int64{sqn.UndefinedSeqNo}, } err = createFleetAgent(ctx, bulker, agentId, agentData) diff --git a/go.mod b/go.mod index 73269b7c3..f419e1008 100644 --- a/go.mod +++ b/go.mod @@ -3,7 +3,7 @@ module github.com/elastic/fleet-server/v7 go 1.15 require ( - github.com/aleksmaus/generate v0.0.0-20201213151810-c5bc68a6a42f + github.com/aleksmaus/generate v0.0.0-20210326194607-c630e07a2742 github.com/dgraph-io/ristretto v0.0.3 github.com/elastic/beats/v7 v7.11.1 github.com/elastic/elastic-agent-client/v7 v7.0.0-20200709172729-d43b7ad5833a diff --git a/go.sum b/go.sum index d98c929dc..6129ee24e 100644 --- a/go.sum +++ b/go.sum @@ -106,8 +106,8 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/aleksmaus/generate v0.0.0-20201213151810-c5bc68a6a42f h1:wr9LrxkE1Ai416C/mis1gEDsXrbERHGufCmf7xuYwI4= -github.com/aleksmaus/generate v0.0.0-20201213151810-c5bc68a6a42f/go.mod h1:lvlu2Ij1bLmxB8RUWyw5IQ4/JcLX60eYhLiBmvImnhk= +github.com/aleksmaus/generate v0.0.0-20210326194607-c630e07a2742 h1:lDBhj+4eBCS9tNiJLXrNbvwO5xwkn2/kjvy+tO+PWlI= +github.com/aleksmaus/generate v0.0.0-20210326194607-c630e07a2742/go.mod h1:lvlu2Ij1bLmxB8RUWyw5IQ4/JcLX60eYhLiBmvImnhk= github.com/andrewkroh/goja v0.0.0-20190128172624-dd2ac4456e20 h1:7rj9qZ63knnVo2ZeepYHvHuRdG76f3tRUTdIQDzRBeI= github.com/andrewkroh/goja v0.0.0-20190128172624-dd2ac4456e20/go.mod h1:cI59GRkC2FRaFYtgbYEqMlgnnfvAwXzjojyZKXwklNg= github.com/andrewkroh/sys v0.0.0-20151128191922-287798fe3e43 h1:WFwa9pqou0Nb4DdfBOyaBTH0GqLE74Qwdf61E7ITHwQ= diff --git a/internal/pkg/action/dispatcher.go b/internal/pkg/action/dispatcher.go index 10bd53a31..d23fd16b6 100644 --- a/internal/pkg/action/dispatcher.go +++ b/internal/pkg/action/dispatcher.go @@ -11,13 +11,14 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/es" "github.com/elastic/fleet-server/v7/internal/pkg/model" "github.com/elastic/fleet-server/v7/internal/pkg/monitor" + "github.com/elastic/fleet-server/v7/internal/pkg/sqn" "github.com/rs/zerolog/log" ) type Sub struct { agentId string - seqNo int64 + seqNo sqn.SeqNo ch chan []model.Action } @@ -50,7 +51,7 @@ func (d *Dispatcher) Run(ctx context.Context) (err error) { } } -func (d *Dispatcher) Subscribe(agentId string, seqNo int64) *Sub { +func (d *Dispatcher) Subscribe(agentId string, seqNo sqn.SeqNo) *Sub { cbCh := make(chan []model.Action, 1) sub := Sub{ diff --git a/internal/pkg/dl/constants.go b/internal/pkg/dl/constants.go index 8be370690..5de4c2961 100644 --- a/internal/pkg/dl/constants.go +++ b/internal/pkg/dl/constants.go @@ -4,6 +4,8 @@ package dl +import "github.com/elastic/fleet-server/v7/internal/pkg/sqn" + // Indices names const ( FleetActions = ".fleet-actions" @@ -42,13 +44,8 @@ const ( FieldIdentifier = "identifier" ) -// Public constants -const ( - UndefinedSeqNo = -1 -) - // Private constants const ( - defaultSeqNo = UndefinedSeqNo + defaultSeqNo = sqn.UndefinedSeqNo seqNoPrimaryTerm = "seq_no_primary_term" ) diff --git a/internal/pkg/model/schema.go b/internal/pkg/model/schema.go index be6449bfd..485892a31 100644 --- a/internal/pkg/model/schema.go +++ b/internal/pkg/model/schema.go @@ -96,7 +96,7 @@ type Agent struct { AccessApiKeyId string `json:"access_api_key_id,omitempty"` // The last acknowledged action sequence number for the Elastic Agent - ActionSeqNo int64 `json:"action_seq_no,omitempty"` + ActionSeqNo []int64 `json:"action_seq_no,omitempty"` // Active flag Active bool `json:"active"` diff --git a/internal/pkg/sqn/sqn.go b/internal/pkg/sqn/sqn.go new file mode 100644 index 000000000..a2d2def60 --- /dev/null +++ b/internal/pkg/sqn/sqn.go @@ -0,0 +1,33 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package sqn + +import ( + "fmt" + "strings" +) + +const UndefinedSeqNo = -1 + +// Abstracts the array of document seq numbers +type SeqNo []int64 + +func (s SeqNo) String() string { + if len(s) == 0 { + return "" + } + return strings.Join(strings.Fields(strings.Trim(fmt.Sprint([]int64(s)), "[]")), ",") +} + +func (s SeqNo) IsSet() bool { + return len(s) > 0 && s[0] >= 0 +} + +func (s SeqNo) Get(idx int) int64 { + if idx < len(s) { + return s[idx] + } + return UndefinedSeqNo +} diff --git a/model/schema.json b/model/schema.json index 6ecf110c0..8386463a5 100644 --- a/model/schema.json +++ b/model/schema.json @@ -430,7 +430,10 @@ }, "action_seq_no": { "description": "The last acknowledged action sequence number for the Elastic Agent", - "type": "integer" + "type": "array", + "items": { + "type": "integer" + } } }, "required": [ From 23bb5b316d07e5e57f92f241e73aaeeee5f01c34 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 30 Mar 2021 18:39:06 +0000 Subject: [PATCH 044/240] Change the agent.action_seq_no storage format from number to an array (#174) (#191) (cherry picked from commit fbca5f9482925bf830d87f5c3e6244f34bc9fd1e) Co-authored-by: Aleksandr Maus Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> From 2729c2ece8b772f9c6ad56272e41f3ce3290bfbf Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 30 Mar 2021 18:48:27 +0000 Subject: [PATCH 045/240] Indexing permissions as part of the Elastic Agent policy (#187) (#192) * Indexing permissions as part of the Elastic Agent policy * Delay the output key generation. Now it is dirven by the policy monitor policy updates. (cherry picked from commit a743bad98b732a647fb0ae8098c429abcc2f8943) # Conflicts: # cmd/fleet/handleCheckin.go # cmd/fleet/handleEnroll.go Co-authored-by: Aleksandr Maus --- cmd/fleet/handleCheckin.go | 67 +++++- cmd/fleet/handleEnroll.go | 32 +-- cmd/fleet/schema.go | 20 -- fleet-server.yml | 2 +- internal/pkg/dl/policies.go | 25 +- internal/pkg/es/mapping.go | 3 + internal/pkg/model/schema.go | 3 + internal/pkg/policy/output_permissions.go | 99 ++++++++ .../pkg/policy/output_permissions_test.go | 218 ++++++++++++++++++ internal/pkg/smap/smap.go | 74 ++++++ model/schema.json | 4 + 11 files changed, 485 insertions(+), 62 deletions(-) create mode 100644 internal/pkg/policy/output_permissions.go create mode 100644 internal/pkg/policy/output_permissions_test.go create mode 100644 internal/pkg/smap/smap.go diff --git a/cmd/fleet/handleCheckin.go b/cmd/fleet/handleCheckin.go index bd6132e40..2be8bd786 100644 --- a/cmd/fleet/handleCheckin.go +++ b/cmd/fleet/handleCheckin.go @@ -21,6 +21,7 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/model" "github.com/elastic/fleet-server/v7/internal/pkg/monitor" "github.com/elastic/fleet-server/v7/internal/pkg/policy" + "github.com/elastic/fleet-server/v7/internal/pkg/smap" "github.com/elastic/fleet-server/v7/internal/pkg/sqn" "github.com/julienschmidt/httprouter" @@ -48,7 +49,7 @@ func (rt Router) handleCheckin(w http.ResponseWriter, r *http.Request, ps httpro // Don't log connection drops if err != context.Canceled { - log.Error().Err(err).Str("id", id).Int("code", code).Msg("Fail checkin") + log.Error().Err(err).Str("id", id).Int("code", code).Msg("fail checkin") } http.Error(w, err.Error(), code) } @@ -195,7 +196,7 @@ func (ct *CheckinT) _handleCheckin(w http.ResponseWriter, r *http.Request, id st return err } - log.Trace().RawJSON("resp", data).Msg("Checkin response") + log.Trace().RawJSON("resp", data).Msg("checkin response") return nil } @@ -211,7 +212,7 @@ func (ct *CheckinT) resolveSeqNo(ctx context.Context, req CheckinRequest, agent sn, err = ct.tr.Resolve(ctx, ackToken) if err != nil { if errors.Is(err, dl.ErrNotFound) { - log.Debug().Str("token", ackToken).Str("agent_id", agent.Id).Msg("Revision token not found") + log.Debug().Str("token", ackToken).Str("agent_id", agent.Id).Msg("revision token not found") err = nil } else { return @@ -264,7 +265,8 @@ func parsePolicy(ctx context.Context, bulker bulk.Bulk, agentId string, p model. // 4) Inject default api key into structure // 5) Re-serialize and return AgentResp structure - var actionObj map[string]interface{} + // using json.RawMessage to avoid the full json de-serialization + var actionObj map[string]json.RawMessage if err := json.Unmarshal(p.Data, &actionObj); err != nil { return nil, err } @@ -276,26 +278,69 @@ func parsePolicy(ctx context.Context, bulker bulk.Bulk, agentId string, p model. return nil, err } + // Check if need to generate a new output api key + var ( + hash string + needKey bool + roles []byte + ) + if agent.DefaultApiKey == "" { - defaultOutputApiKey, err := generateOutputApiKey(ctx, bulker.Client(), agent.Id, "default") + hash, roles, err = policy.GetRoleDescriptors(actionObj[policy.OutputPermissionsProperty]) + if err != nil { + return nil, err + } + needKey = true + log.Debug().Str("agentId", agentId).Msg("agent API key is not present") + } else { + hash, roles, needKey, err = policy.CheckOutputPermissionsChanged(agent.PolicyOutputPermissionsHash, actionObj[policy.OutputPermissionsProperty]) + if err != nil { + return nil, err + } + if needKey { + log.Debug().Str("agentId", agentId).Msg("policy output permissions changed") + } else { + log.Debug().Str("agentId", agentId).Msg("policy output permissions are the same") + } + } + + if needKey { + log.Debug().Str("agentId", agentId).RawJSON("roles", roles).Str("hash", hash).Msg("generating a new API key") + defaultOutputApiKey, err := generateOutputApiKey(ctx, bulker.Client(), agent.Id, policy.DefaultOutputName, roles) if err != nil { return nil, err } agent.DefaultApiKey = defaultOutputApiKey.Agent() agent.DefaultApiKeyId = defaultOutputApiKey.Id + agent.PolicyOutputPermissionsHash = hash - log.Info().Str("agentId", agentId).Msg("Rewriting full agent record to pick up default output key.") + log.Info().Str("agentId", agentId).Msg("rewriting full agent record to pick up default output key.") if err = dl.IndexAgent(ctx, bulker, agent); err != nil { return nil, err } } - if ok := setMapObj(actionObj, agent.DefaultApiKey, "outputs", "default", "api_key"); !ok { - log.Debug().Msg("Cannot inject api_key into policy") + // Parse the outputs maps in order to inject the api key + const outputsProperty = "outputs" + outputs, err := smap.Parse(actionObj[outputsProperty]) + if err != nil { + return nil, err + } + + if outputs != nil { + if ok := setMapObj(outputs, agent.DefaultApiKey, "default", "api_key"); !ok { + log.Debug().Msg("cannot inject api_key into policy") + } else { + outputRaw, err := json.Marshal(outputs) + if err != nil { + return nil, err + } + actionObj[outputsProperty] = json.RawMessage(outputRaw) + } } dataJSON, err := json.Marshal(struct { - Policy map[string]interface{} `json:"policy"` + Policy map[string]json.RawMessage `json:"policy"` }{actionObj}) if err != nil { return nil, err @@ -349,7 +394,7 @@ func findAgentByApiKeyId(ctx context.Context, bulker bulk.Bulk, id string) (*mod func parseMeta(agent *model.Agent, req *CheckinRequest) (fields Fields, err error) { // Quick comparison first if bytes.Equal(req.LocalMeta, agent.LocalMetadata) { - log.Trace().Msg("Quick comparing local metadata is equal") + log.Trace().Msg("quick comparing local metadata is equal") return nil, nil } @@ -366,7 +411,7 @@ func parseMeta(agent *model.Agent, req *CheckinRequest) (fields Fields, err erro } if reqLocalMeta != nil && !reflect.DeepEqual(reqLocalMeta, agentLocalMeta) { - log.Info().RawJSON("req.LocalMeta", req.LocalMeta).Msg("Applying new local metadata") + log.Info().RawJSON("req.LocalMeta", req.LocalMeta).Msg("applying new local metadata") fields = map[string]interface{}{ FieldLocalMetadata: req.LocalMeta, } diff --git a/cmd/fleet/handleEnroll.go b/cmd/fleet/handleEnroll.go index c733e409a..280ce91b3 100644 --- a/cmd/fleet/handleEnroll.go +++ b/cmd/fleet/handleEnroll.go @@ -180,18 +180,6 @@ func _enroll(ctx context.Context, bulker bulk.Bulk, c cache.Cache, req EnrollReq return nil, err } - defaultOutputApiKey, err := generateOutputApiKey(ctx, bulker.Client(), agentId, "default") - if err != nil { - return nil, err - } - - log.Debug(). - Dur("rtt", time.Since(now)). - Str("agentId", agentId). - Str("accessApiKey.Id", accessApiKey.Id). - Str("defaultOutputApiKey.Id", defaultOutputApiKey.Id). - Msg("Created api key") - // Update the local metadata agent id localMeta, err := updateLocalMetaAgentId(req.Meta.Local, agentId) if err != nil { @@ -199,15 +187,13 @@ func _enroll(ctx context.Context, bulker bulk.Bulk, c cache.Cache, req EnrollReq } agentData := model.Agent{ - Active: true, - PolicyId: erec.PolicyId, - Type: req.Type, - EnrolledAt: now.UTC().Format(time.RFC3339), - LocalMetadata: localMeta, - AccessApiKeyId: accessApiKey.Id, - DefaultApiKeyId: defaultOutputApiKey.Id, - DefaultApiKey: defaultOutputApiKey.Agent(), - ActionSeqNo: []int64{sqn.UndefinedSeqNo}, + Active: true, + PolicyId: erec.PolicyId, + Type: req.Type, + EnrolledAt: now.UTC().Format(time.RFC3339), + LocalMetadata: localMeta, + AccessApiKeyId: accessApiKey.Id, + ActionSeqNo: []int64{sqn.UndefinedSeqNo}, } err = createFleetAgent(ctx, bulker, agentId, agentData) @@ -310,9 +296,9 @@ func generateAccessApiKey(ctx context.Context, client *elasticsearch.Client, age return apikey.Create(ctx, client, agentId, "", []byte(kFleetAccessRolesJSON)) } -func generateOutputApiKey(ctx context.Context, client *elasticsearch.Client, agentId string, outputName string) (*apikey.ApiKey, error) { +func generateOutputApiKey(ctx context.Context, client *elasticsearch.Client, agentId, outputName string, roles []byte) (*apikey.ApiKey, error) { name := fmt.Sprintf("%s:%s", agentId, outputName) - return apikey.Create(ctx, client, name, "", []byte(kFleetOutputRolesJSON)) + return apikey.Create(ctx, client, name, "", roles) } func (et *EnrollerT) fetchEnrollmentKeyRecord(ctx context.Context, id string) (*model.EnrollmentApiKey, error) { diff --git a/cmd/fleet/schema.go b/cmd/fleet/schema.go index 4262f5d51..e3f6fe5f3 100644 --- a/cmd/fleet/schema.go +++ b/cmd/fleet/schema.go @@ -36,26 +36,6 @@ const kFleetAccessRolesJSON = ` } ` -const kFleetOutputRolesJSON = ` - { - "fleet-output": { - "cluster": ["monitor"], - "index": [{ - "names": [ - "logs-*", - "metrics-*", - "traces-*", - ".logs-endpoint.diagnostic.collection-*" - ], - "privileges": [ - "auto_configure", - "create_doc" - ] - }] - } - } -` - // Wrong: no AAD; // This defeats the signature check; // can copy from one to another and will dispatch. diff --git a/fleet-server.yml b/fleet-server.yml index 830d3cf97..1642b53c7 100644 --- a/fleet-server.yml +++ b/fleet-server.yml @@ -8,7 +8,7 @@ fleet: agent: id: 1e4954ce-af37-4731-9f4a-407b08e69e42 logging: - level: '${LOG_LEVEL:INFO}' + level: '${LOG_LEVEL:DEBUG}' # Input config provided by the Elastic Agent for the server #inputs: diff --git a/internal/pkg/dl/policies.go b/internal/pkg/dl/policies.go index 78817c87b..1ebf32089 100644 --- a/internal/pkg/dl/policies.go +++ b/internal/pkg/dl/policies.go @@ -8,16 +8,17 @@ import ( "context" "encoding/json" "errors" + "github.com/elastic/fleet-server/v7/internal/pkg/bulk" "github.com/elastic/fleet-server/v7/internal/pkg/model" - "sync" "github.com/elastic/fleet-server/v7/internal/pkg/dsl" ) var ( - tmplQueryLatestPolicies []byte - initQueryLatestPoliciesOnce sync.Once + tmplQueryLatestPolicies = prepareQueryLatestPolicies() + + queryPolicyByID = preparePolicyFindByID() ) var ErrPolicyLeaderNotFound = errors.New("policy has no leader") @@ -36,12 +37,22 @@ func prepareQueryLatestPolicies() []byte { return root.MustMarshalJSON() } +func preparePolicyFindByID() *dsl.Tmpl { + tmpl := dsl.NewTmpl() + root := dsl.NewRoot() + + root.Size(1) + root.Query().Bool().Filter().Term(FieldPolicyId, tmpl.Bind(FieldPolicyId), nil) + sort := root.Sort() + sort.SortOrder(FieldRevisionIdx, dsl.SortDescend) + sort.SortOrder(FieldCoordinatorIdx, dsl.SortDescend) + + tmpl.MustResolve(root) + return tmpl +} + // QueryLatestPolices gets the latest revision for a policy func QueryLatestPolicies(ctx context.Context, bulker bulk.Bulk, opt ...Option) ([]model.Policy, error) { - initQueryLatestPoliciesOnce.Do(func() { - tmplQueryLatestPolicies = prepareQueryLatestPolicies() - }) - o := newOption(FleetPolicies, opt...) res, err := bulker.Search(ctx, []string{o.indexName}, tmplQueryLatestPolicies) if err != nil { diff --git a/internal/pkg/es/mapping.go b/internal/pkg/es/mapping.go index 0aa36732e..0a45abfba 100644 --- a/internal/pkg/es/mapping.go +++ b/internal/pkg/es/mapping.go @@ -128,6 +128,9 @@ const ( "policy_id": { "type": "keyword" }, + "policy_output_permissions_hash": { + "type": "keyword" + }, "policy_revision_idx": { "type": "integer" }, diff --git a/internal/pkg/model/schema.go b/internal/pkg/model/schema.go index 485892a31..389c2fa30 100644 --- a/internal/pkg/model/schema.go +++ b/internal/pkg/model/schema.go @@ -132,6 +132,9 @@ type Agent struct { // The policy ID for the Elastic Agent PolicyId string `json:"policy_id,omitempty"` + // The policy output permissions hash + PolicyOutputPermissionsHash string `json:"policy_output_permissions_hash,omitempty"` + // The current policy revision_idx for the Elastic Agent PolicyRevisionIdx int64 `json:"policy_revision_idx,omitempty"` diff --git a/internal/pkg/policy/output_permissions.go b/internal/pkg/policy/output_permissions.go new file mode 100644 index 000000000..465de52f9 --- /dev/null +++ b/internal/pkg/policy/output_permissions.go @@ -0,0 +1,99 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package policy + +import ( + "crypto/sha256" + "encoding/hex" + "encoding/json" + "errors" + + "github.com/elastic/fleet-server/v7/internal/pkg/smap" +) + +const ( + DefaultOutputName = "default" + OutputPermissionsProperty = "output_permissions" +) + +var ( + ErrOutputPermissionsNotFound = errors.New("output_permissions not found") + ErrDefaultOutputNotFound = errors.New("default output not found") + ErrInvalidPermissionsFormat = errors.New("invalid permissions format") +) + +func GetRoleDescriptors(outputPermissionsRaw []byte) (hash string, roles []byte, err error) { + if len(outputPermissionsRaw) == 0 { + return + } + + output, err := getDefaultOutputMap(outputPermissionsRaw) + if err != nil { + return + } + + // Calculating the hash of the original output map + hash, err = output.Hash() + if err != nil { + return + } + + roles, err = json.Marshal(output) + if err != nil { + return + } + + return +} + +func CheckOutputPermissionsChanged(hash string, outputPermissionsRaw []byte) (newHash string, roles []byte, changed bool, err error) { + if len(outputPermissionsRaw) == 0 { + return + } + + // shotcuircut, hash and compare as is, if equals the json is serialized consistently from jsacascript and go + newHash, err = getDefaultOutputHash(outputPermissionsRaw) + if err != nil { + return + } + if hash == newHash { + return hash, nil, false, nil + } + + newHash, roles, err = GetRoleDescriptors(outputPermissionsRaw) + if err != nil { + return + } + + return newHash, roles, (newHash != hash), nil +} + +func getDefaultOutputHash(outputPermissionsRaw []byte) (hash string, err error) { + var m map[string]json.RawMessage + err = json.Unmarshal(outputPermissionsRaw, &m) + if err != nil { + return + } + + if len(m[DefaultOutputName]) == 0 { + return + } + + b := sha256.Sum256(m[DefaultOutputName]) + return hex.EncodeToString(b[:]), nil +} + +func getDefaultOutputMap(outputPermissionsRaw []byte) (defaultOutput smap.Map, err error) { + outputPermissions, err := smap.Parse(outputPermissionsRaw) + if err != nil { + return + } + + defaultOutput = outputPermissions.GetMap(DefaultOutputName) + if defaultOutput == nil { + err = ErrDefaultOutputNotFound + } + return +} diff --git a/internal/pkg/policy/output_permissions_test.go b/internal/pkg/policy/output_permissions_test.go new file mode 100644 index 000000000..472d9ee74 --- /dev/null +++ b/internal/pkg/policy/output_permissions_test.go @@ -0,0 +1,218 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// +build !integration + +package policy + +import ( + "testing" + + "github.com/elastic/fleet-server/v7/internal/pkg/smap" + "github.com/google/go-cmp/cmp" +) + +const ( + fallbackPermissions = ` + { + "default": { + "_fallback": { + "indices": [ + { + "names": [ + "logs-*", + "metrics-*", + "traces-*", + ".logs-endpoint.diagnostic.collection-*" + ], + "privileges": [ + "auto_configure", + "create_doc" + ] + } + ] + } + } + } +` + fallbackPermissionsHash = "48e2e1dfe0e64df0dd841e96e28bb82ff6273432e9ebccca259a3278ff86ee4c" + + outputPermissions = ` + { + "default": { + "nginx-logs-1": { + "indices": [ + { + "names": [ + "logs-nginx.access-*", + "logs-nginx.error-*" + ], + "privileges": [ + "append" + ] + } + ] + }, + "nginx-metrics-1": { + "indices": [ + { + "names": [ + "metrics-nginx.substatus-*" + ], + "privileges": [ + "append" + ] + } + ] + }, + "endpoint-policy1-part1": { + "indices": [ + { + "names": [ + ".logs-endpoint.diagnostic.collection-*" + ], + "privileges": [ + "read" + ] + } + ] + }, + "endpoint-policy1-part2": { + "indices": [ + { + "names": [ + "metrics-endpoint-*" + ], + "privileges": [ + "append" + ] + } + ] + } + } + } +` + outputPermissionsHash = "42c955b5df44eec374dc66a97ab8c2045a88583af499aba81345c4221e473ead" + + resultDescriptors = ` +{ + "endpoint-policy1-part1": { + "indices": [ + { + "names": [ + ".logs-endpoint.diagnostic.collection-*" + ], + "privileges": [ + "read" + ] + } + ] + }, + "endpoint-policy1-part2": { + "indices": [ + { + "names": [ + "metrics-endpoint-*" + ], + "privileges": [ + "append" + ] + } + ] + }, + "nginx-logs-1": { + "indices": [ + { + "names": [ + "logs-nginx.access-*", + "logs-nginx.error-*" + ], + "privileges": [ + "append" + ] + } + ] + }, + "nginx-metrics-1": { + "indices": [ + { + "names": [ + "metrics-nginx.substatus-*" + ], + "privileges": [ + "append" + ] + } + ] + } +} +` +) + +func TestGetRoleDescriptors(t *testing.T) { + + hash, roles, err := GetRoleDescriptors([]byte(outputPermissions)) + if err != nil { + t.Fatal(err) + } + + m, err := smap.Parse([]byte(resultDescriptors)) + if err != nil { + t.Fatal(err) + } + expected, err := m.Marshal() + if err != nil { + t.Fatal(err) + } + + diff := cmp.Diff(expected, roles) + if diff != "" { + t.Fatal(diff) + } + + diff = cmp.Diff(outputPermissionsHash, hash) + if diff != "" { + t.Fatal(diff) + } +} + +func TestCheckOutputPermissionsChanged(t *testing.T) { + // Detect change with initially empty hash + hash, roles, changed, err := CheckOutputPermissionsChanged("", []byte(fallbackPermissions)) + if err != nil { + t.Fatal(err) + } + diff := cmp.Diff(fallbackPermissionsHash, hash) + if diff != "" { + t.Error(diff) + } + + if !changed { + t.Error("expected policy hash change detected") + } + + if len(roles) == 0 { + t.Error("expected non empty roles descriptors") + } + + // Detect no change with the same hash and the content + newHash, roles, changed, err := CheckOutputPermissionsChanged(hash, []byte(fallbackPermissions)) + diff = cmp.Diff(hash, newHash) + if diff != "" { + t.Error(diff) + } + if changed { + t.Error("expected policy hash no change detected") + } + + // Detect the change with the new output permissions + newHash, roles, changed, err = CheckOutputPermissionsChanged(hash, []byte(outputPermissions)) + diff = cmp.Diff(outputPermissionsHash, newHash) + if diff != "" { + t.Error(diff) + } + if !changed { + t.Error("expected policy hash change detected") + } +} diff --git a/internal/pkg/smap/smap.go b/internal/pkg/smap/smap.go new file mode 100644 index 000000000..3c636dffd --- /dev/null +++ b/internal/pkg/smap/smap.go @@ -0,0 +1,74 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package smap + +import ( + "crypto/sha256" + "encoding/hex" + "encoding/json" +) + +type Map map[string]interface{} + +func (m Map) GetMap(k string) Map { + if m == nil { + return m + } + + v := m[k] + if v != nil { + if m, ok := v.(map[string]interface{}); ok { + return m + } + } + return nil +} + +func (m Map) GetString(k string) string { + if m == nil { + return "" + } + if v := m[k]; v != nil { + if s, ok := v.(string); ok { + return s + } + } + return "" +} + +func (m Map) Hash() (string, error) { + if m == nil { + return "", nil + } + + // Hashing through the json encoder + h := sha256.New() + enc := json.NewEncoder(h) + err := enc.Encode(m) + if err != nil { + return "", err + } + + return hex.EncodeToString(h.Sum(nil)), nil +} + +func (m Map) Marshal() ([]byte, error) { + if m == nil { + return nil, nil + } + return json.Marshal(m) +} + +func Parse(data []byte) (Map, error) { + if len(data) == 0 { + return nil, nil + } + + var m Map + + err := json.Unmarshal(data, &m) + + return m, err +} diff --git a/model/schema.json b/model/schema.json index 8386463a5..9dcfe3d38 100644 --- a/model/schema.json +++ b/model/schema.json @@ -394,6 +394,10 @@ "description": "The current policy coordinator for the Elastic Agent", "type": "integer" }, + "policy_output_permissions_hash": { + "description": "The policy output permissions hash", + "type": "string" + }, "last_updated": { "description": "Date/time the Elastic Agent was last updated", "type": "string", From 7a3ec913245f8840b426fced82fbc5ead305f847 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Fri, 2 Apr 2021 17:18:12 +0000 Subject: [PATCH 046/240] Add user_id to .fleet-actions schema (#198) (#199) (cherry picked from commit cfb6a226a112a262f47044a66e4b53c89108b09f) Co-authored-by: Aleksandr Maus --- internal/pkg/es/mapping.go | 3 +++ internal/pkg/model/schema.go | 3 +++ model/schema.json | 4 ++++ 3 files changed, 10 insertions(+) diff --git a/internal/pkg/es/mapping.go b/internal/pkg/es/mapping.go index 0a45abfba..ee48f060f 100644 --- a/internal/pkg/es/mapping.go +++ b/internal/pkg/es/mapping.go @@ -32,6 +32,9 @@ const ( }, "type": { "type": "keyword" + }, + "user_id": { + "type": "keyword" } } }` diff --git a/internal/pkg/model/schema.go b/internal/pkg/model/schema.go index 389c2fa30..ee8357fea 100644 --- a/internal/pkg/model/schema.go +++ b/internal/pkg/model/schema.go @@ -53,6 +53,9 @@ type Action struct { // The action type. INPUT_ACTION is the value for the actions that suppose to be routed to the endpoints/beats. Type string `json:"type,omitempty"` + + // The ID of the user who created the action. + UserId string `json:"user_id,omitempty"` } // ActionData The opaque payload. diff --git a/model/schema.json b/model/schema.json index 9dcfe3d38..43a09c525 100644 --- a/model/schema.json +++ b/model/schema.json @@ -38,6 +38,10 @@ "description": "The input type the actions should be routed to.", "type": "string" }, + "user_id": { + "description": "The ID of the user who created the action.", + "type": "string" + }, "agents": { "description": "The Agent IDs the action is intended for. No support for json.RawMessage with the current generator. Could be useful to lazy parse the agent ids", "type": "array", From 81d5bfc7cb65f1810d2745883ba898b80f1158c7 Mon Sep 17 00:00:00 2001 From: Sean Cunningham Date: Mon, 5 Apr 2021 13:53:03 -0400 Subject: [PATCH 047/240] Bport2 (#203) * Move to less aggressive bulk queue preallocation size by default. The larger queues are more efficient at high scale, but are way overkill on small systems. * Add pretty print logger output under stderr * Add optional gzip compression support on check-in response. --- cmd/fleet/handleCheckin.go | 45 ++++++++++++++++++++++++------ internal/pkg/bulk/bulk.go | 6 ++-- internal/pkg/bulk/opt.go | 1 + internal/pkg/config/config_test.go | 4 +++ internal/pkg/config/input.go | 3 ++ internal/pkg/config/logging.go | 1 + internal/pkg/logger/logger.go | 7 ++++- 7 files changed, 56 insertions(+), 11 deletions(-) diff --git a/cmd/fleet/handleCheckin.go b/cmd/fleet/handleCheckin.go index 2be8bd786..0dede39af 100644 --- a/cmd/fleet/handleCheckin.go +++ b/cmd/fleet/handleCheckin.go @@ -6,9 +6,12 @@ package fleet import ( "bytes" + "compress/flate" + "compress/gzip" "context" "encoding/json" "errors" + "io" "net/http" "reflect" "time" @@ -35,6 +38,8 @@ var ( kLongPollTimeout = 300 * time.Second // 5m ) +const kEncodingGzip = "gzip" + func (rt Router) handleCheckin(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { // TODO: Consider rate limit here @@ -187,18 +192,42 @@ func (ct *CheckinT) _handleCheckin(w http.ResponseWriter, r *http.Request, id st Actions: actions, } - data, err := json.Marshal(&resp) - if err != nil { - return err - } + return ct.writeResponse(w, r, resp) +} - if _, err = w.Write(data); err != nil { - return err +func (ct *CheckinT) writeResponse(w http.ResponseWriter, r *http.Request, resp CheckinResponse) error { + var wr io.Writer + + compressionLevel := ct.cfg.Inputs[0].Server.CompressionLevel + + if compressionLevel != flate.NoCompression && acceptsEncoding(r, kEncodingGzip) { + log.Trace().Int("level", compressionLevel).Msg("Compressing policy response") + + zipper, err := gzip.NewWriterLevel(w, compressionLevel) + if err != nil { + return err + } + + // Must close the compression context to flush + defer zipper.Close() + wr = zipper + + w.Header().Set("Content-Encoding", kEncodingGzip) + } else { + wr = w } - log.Trace().RawJSON("resp", data).Msg("checkin response") + encoder := json.NewEncoder(wr) + return encoder.Encode(&resp) +} - return nil +func acceptsEncoding(r *http.Request, encoding string) bool { + for _, v := range r.Header.Values("Accept-Encoding") { + if v == encoding { + return true + } + } + return false } // Resolve AckToken from request, fallback on the agent record diff --git a/internal/pkg/bulk/bulk.go b/internal/pkg/bulk/bulk.go index 7761a7dca..68cf6fb95 100644 --- a/internal/pkg/bulk/bulk.go +++ b/internal/pkg/bulk/bulk.go @@ -82,6 +82,7 @@ const ( defaultFlushThresholdCnt = 32768 defaultFlushThresholdSz = 1024 * 1024 * 10 defaultMaxPending = 32 + defaultQueuePrealloc = 64 ) func InitES(ctx context.Context, cfg *config.Config, opts ...BulkOpt) (*elasticsearch.Client, Bulk, error) { @@ -119,6 +120,7 @@ func (b *Bulker) parseBulkOpts(opts ...BulkOpt) bulkOptT { flushThresholdCnt: defaultFlushThresholdCnt, flushThresholdSz: defaultFlushThresholdSz, maxPending: defaultMaxPending, + queuePrealloc: defaultQueuePrealloc, } for _, f := range opts { @@ -182,7 +184,7 @@ func (b *Bulker) Run(ctx context.Context, opts ...BulkOpt) error { queues = append(queues, &queueT{ action: action, - queue: make([]bulkT, 0, bopts.flushThresholdCnt), + queue: make([]bulkT, 0, bopts.queuePrealloc), }) } @@ -198,7 +200,7 @@ func (b *Bulker) Run(ctx context.Context, opts ...BulkOpt) error { } q.pending = 0 - q.queue = make([]bulkT, 0, bopts.flushThresholdCnt) + q.queue = make([]bulkT, 0, bopts.queuePrealloc) } } diff --git a/internal/pkg/bulk/opt.go b/internal/pkg/bulk/opt.go index c2d1896e5..b73aae6a1 100644 --- a/internal/pkg/bulk/opt.go +++ b/internal/pkg/bulk/opt.go @@ -31,6 +31,7 @@ type bulkOptT struct { flushThresholdCnt int flushThresholdSz int maxPending int + queuePrealloc int } type BulkOpt func(*bulkOptT) diff --git a/internal/pkg/config/config_test.go b/internal/pkg/config/config_test.go index c953a6a77..cfb2e9159 100644 --- a/internal/pkg/config/config_test.go +++ b/internal/pkg/config/config_test.go @@ -60,6 +60,7 @@ func TestConfig(t *testing.T) { Enabled: false, Bind: "localhost:6060", }, + CompressionLevel: 1, }, Cache: Cache{ NumCounters: defaultCacheNumCounters, @@ -122,6 +123,7 @@ func TestConfig(t *testing.T) { Enabled: false, Bind: "localhost:6060", }, + CompressionLevel: 1, }, Cache: Cache{ NumCounters: defaultCacheNumCounters, @@ -182,6 +184,7 @@ func TestConfig(t *testing.T) { Enabled: false, Bind: "localhost:6060", }, + CompressionLevel: 1, }, Cache: Cache{ NumCounters: defaultCacheNumCounters, @@ -242,6 +245,7 @@ func TestConfig(t *testing.T) { Enabled: false, Bind: "localhost:6060", }, + CompressionLevel: 1, }, Cache: Cache{ NumCounters: defaultCacheNumCounters, diff --git a/internal/pkg/config/input.go b/internal/pkg/config/input.go index e24047fcd..fea7ace2e 100644 --- a/internal/pkg/config/input.go +++ b/internal/pkg/config/input.go @@ -5,6 +5,7 @@ package config import ( + "compress/flate" "fmt" "strings" "time" @@ -62,6 +63,7 @@ type Server struct { MaxConnections int `config:"max_connections"` MaxEnrollPending int64 `config:"max_enroll_pending"` Profile ServerProfile `config:"profile"` + CompressionLevel int `config:"compression_level"` } // InitDefaults initializes the defaults for the configuration. @@ -74,6 +76,7 @@ func (c *Server) InitDefaults() { c.RateLimitInterval = 5 * time.Millisecond c.MaxConnections = 0 // no limit c.MaxEnrollPending = 64 + c.CompressionLevel = flate.BestSpeed c.Profile.InitDefaults() } diff --git a/internal/pkg/config/logging.go b/internal/pkg/config/logging.go index 12ac2b03a..1199d5860 100644 --- a/internal/pkg/config/logging.go +++ b/internal/pkg/config/logging.go @@ -44,6 +44,7 @@ type Logging struct { Level string `config:"level"` ToStderr bool `config:"to_stderr"` ToFiles bool `config:"to_files"` + Pretty bool `config:"pretty"` Files *LoggingFiles `config:"files"` } diff --git a/internal/pkg/logger/logger.go b/internal/pkg/logger/logger.go index 3e30280c5..292875935 100644 --- a/internal/pkg/logger/logger.go +++ b/internal/pkg/logger/logger.go @@ -6,6 +6,7 @@ package logger import ( "context" + "io" "io/ioutil" "os" "path/filepath" @@ -117,7 +118,11 @@ func level(cfg *config.Config) zerolog.Level { func configure(cfg *config.Config) (zerolog.Logger, WriterSync, error) { if cfg.Logging.ToStderr { - return log.Output(os.Stderr).Level(level(cfg)), os.Stderr, nil + out := io.Writer(os.Stderr) + if cfg.Logging.Pretty { + out = zerolog.ConsoleWriter{Out: os.Stderr} + } + return log.Output(out).Level(level(cfg)), os.Stderr, nil } if cfg.Logging.ToFiles { files := cfg.Logging.Files From 9258e948debfbe431746059a4b3bffef69d2b284 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Wed, 7 Apr 2021 18:28:08 +0000 Subject: [PATCH 048/240] Normalize server API limits. (#208) Default host bind should be 0.0.0.0 not loopback. (cherry picked from commit b0f77c6f7e6498ad455a8284293f5a160dbea347) Co-authored-by: Sean Cunningham --- cmd/fleet/handleAck.go | 87 +++++++++++---- cmd/fleet/handleArtifacts.go | 120 +++++++++++--------- cmd/fleet/handleCheckin.go | 100 ++++++++++++----- cmd/fleet/handleEnroll.go | 77 ++++++------- cmd/fleet/handleStatus.go | 2 +- cmd/fleet/main.go | 33 ++++-- cmd/fleet/router.go | 6 +- cmd/fleet/server.go | 28 +---- cmd/fleet/server_integration_test.go | 10 +- cmd/fleet/server_test.go | 6 +- fleet-server.yml | 37 ++++++- internal/pkg/cache/cache.go | 4 +- internal/pkg/config/config.go | 2 + internal/pkg/config/config_test.go | 158 +++++++++++++++++++++------ internal/pkg/config/input.go | 36 +++--- internal/pkg/config/limits.go | 54 +++++++++ internal/pkg/config/runtime.go | 13 +++ internal/pkg/dl/policies.go | 20 +--- internal/pkg/limit/limiter.go | 72 ++++++++++++ internal/pkg/rate/rate.go | 49 --------- 20 files changed, 600 insertions(+), 314 deletions(-) create mode 100644 internal/pkg/config/limits.go create mode 100644 internal/pkg/config/runtime.go create mode 100644 internal/pkg/limit/limiter.go delete mode 100644 internal/pkg/rate/rate.go diff --git a/cmd/fleet/handleAck.go b/cmd/fleet/handleAck.go index 6a1aaeb7f..5677713f4 100644 --- a/cmd/fleet/handleAck.go +++ b/cmd/fleet/handleAck.go @@ -16,34 +16,73 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/apikey" "github.com/elastic/fleet-server/v7/internal/pkg/bulk" "github.com/elastic/fleet-server/v7/internal/pkg/cache" + "github.com/elastic/fleet-server/v7/internal/pkg/config" "github.com/elastic/fleet-server/v7/internal/pkg/dl" + "github.com/elastic/fleet-server/v7/internal/pkg/limit" "github.com/elastic/fleet-server/v7/internal/pkg/model" "github.com/elastic/fleet-server/v7/internal/pkg/policy" "github.com/julienschmidt/httprouter" + "github.com/rs/zerolog" "github.com/rs/zerolog/log" ) var ErrEventAgentIdMismatch = errors.New("event agentId mismatch") +type AckT struct { + limit *limit.Limiter + bulk bulk.Bulk + cache cache.Cache +} + +func NewAckT(cfg *config.Server, bulker bulk.Bulk, cache cache.Cache) *AckT { + log.Info(). + Interface("limits", cfg.Limits.AckLimit). + Msg("Ack install limits") + + return &AckT{ + bulk: bulker, + cache: cache, + limit: limit.NewLimiter(&cfg.Limits.AckLimit), + } +} + func (rt Router) handleAcks(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { id := ps.ByName("id") - err := _handleAcks(w, r, id, rt.ct.bulker, rt.ct.cache) + err := rt.ack.handleAcks(w, r, id) if err != nil { - code := http.StatusBadRequest - // Don't log connection drops - if err != context.Canceled { - log.Error().Err(err).Int("code", code).Msg("Fail ACK") + lvl := zerolog.DebugLevel + + var code int + switch err { + case limit.ErrRateLimit, limit.ErrMaxLimit: + code = http.StatusTooManyRequests + case context.Canceled: + code = http.StatusServiceUnavailable + default: + lvl = zerolog.InfoLevel + code = http.StatusBadRequest } + // Don't log connection drops + log.WithLevel(lvl). + Err(err). + Int("code", code). + Msg("Fail ACK") - http.Error(w, err.Error(), code) + http.Error(w, "", code) } } -func _handleAcks(w http.ResponseWriter, r *http.Request, id string, bulker bulk.Bulk, c cache.Cache) error { - agent, err := authAgent(r, id, bulker, c) +func (ack AckT) handleAcks(w http.ResponseWriter, r *http.Request, id string) error { + limitF, err := ack.limit.Acquire() + if err != nil { + return err + } + defer limitF() + + agent, err := authAgent(r, id, ack.bulk, ack.cache) if err != nil { return err } @@ -60,7 +99,7 @@ func _handleAcks(w http.ResponseWriter, r *http.Request, id string, bulker bulk. log.Trace().RawJSON("raw", raw).Msg("Ack request") - if err = _handleAckEvents(r.Context(), agent, req.Events, bulker, c); err != nil { + if err = ack.handleAckEvents(r.Context(), agent, req.Events); err != nil { return err } @@ -78,7 +117,7 @@ func _handleAcks(w http.ResponseWriter, r *http.Request, id string, bulker bulk. return nil } -func _handleAckEvents(ctx context.Context, agent *model.Agent, events []Event, bulker bulk.Bulk, c cache.Cache) error { +func (ack *AckT) handleAckEvents(ctx context.Context, agent *model.Agent, events []Event) error { var policyAcks []string var unenroll bool for _, ev := range events { @@ -93,9 +132,9 @@ func _handleAckEvents(ctx context.Context, agent *model.Agent, events []Event, b continue } - action, ok := c.GetAction(ev.ActionId) + action, ok := ack.cache.GetAction(ev.ActionId) if !ok { - actions, err := dl.FindAction(ctx, bulker, ev.ActionId) + actions, err := dl.FindAction(ctx, ack.bulk, ev.ActionId) if err != nil { return err } @@ -103,7 +142,7 @@ func _handleAckEvents(ctx context.Context, agent *model.Agent, events []Event, b return errors.New("no matching action") } action = actions[0] - c.SetAction(action) + ack.cache.SetAction(action, time.Minute) } acr := model.ActionResult{ @@ -115,7 +154,7 @@ func _handleAckEvents(ctx context.Context, agent *model.Agent, events []Event, b Data: ev.Data, Error: ev.Error, } - if _, err := dl.CreateActionResult(ctx, bulker, acr); err != nil { + if _, err := dl.CreateActionResult(ctx, ack.bulk, acr); err != nil { return err } @@ -123,7 +162,7 @@ func _handleAckEvents(ctx context.Context, agent *model.Agent, events []Event, b if action.Type == TypeUnenroll { unenroll = true } else if action.Type == TypeUpgrade { - if err := _handleUpgrade(ctx, bulker, agent); err != nil { + if err := ack.handleUpgrade(ctx, agent); err != nil { return err } } @@ -131,13 +170,13 @@ func _handleAckEvents(ctx context.Context, agent *model.Agent, events []Event, b } if len(policyAcks) > 0 { - if err := _handlePolicyChange(ctx, bulker, agent, policyAcks...); err != nil { + if err := ack.handlePolicyChange(ctx, agent, policyAcks...); err != nil { return err } } if unenroll { - if err := _handleUnenroll(ctx, bulker, agent); err != nil { + if err := ack.handleUnenroll(ctx, agent); err != nil { return err } } @@ -145,7 +184,7 @@ func _handleAckEvents(ctx context.Context, agent *model.Agent, events []Event, b return nil } -func _handlePolicyChange(ctx context.Context, bulker bulk.Bulk, agent *model.Agent, actionIds ...string) error { +func (ack *AckT) handlePolicyChange(ctx context.Context, agent *model.Agent, actionIds ...string) error { // If more than one, pick the winner; // 0) Correct policy id // 1) Highest revision/coordinator number @@ -184,7 +223,7 @@ func _handlePolicyChange(ctx context.Context, bulker bulk.Bulk, agent *model.Age Index: dl.FleetAgents, }) - err = bulker.MUpdate(ctx, updates, bulk.WithRefresh()) + err = ack.bulk.MUpdate(ctx, updates, bulk.WithRefresh()) if err != nil { return err } @@ -193,10 +232,10 @@ func _handlePolicyChange(ctx context.Context, bulker bulk.Bulk, agent *model.Age return nil } -func _handleUnenroll(ctx context.Context, bulker bulk.Bulk, agent *model.Agent) error { +func (ack *AckT) handleUnenroll(ctx context.Context, agent *model.Agent) error { apiKeys := _getAPIKeyIDs(agent) if len(apiKeys) > 0 { - if err := apikey.Invalidate(ctx, bulker.Client(), apiKeys...); err != nil { + if err := apikey.Invalidate(ctx, ack.bulk.Client(), apiKeys...); err != nil { return err } } @@ -222,10 +261,10 @@ func _handleUnenroll(ctx context.Context, bulker bulk.Bulk, agent *model.Agent) Index: dl.FleetAgents, }) - return bulker.MUpdate(ctx, updates, bulk.WithRefresh()) + return ack.bulk.MUpdate(ctx, updates, bulk.WithRefresh()) } -func _handleUpgrade(ctx context.Context, bulker bulk.Bulk, agent *model.Agent) error { +func (ack *AckT) handleUpgrade(ctx context.Context, agent *model.Agent) error { updates := make([]bulk.BulkOp, 0, 1) now := time.Now().UTC().Format(time.RFC3339) fields := map[string]interface{}{ @@ -246,7 +285,7 @@ func _handleUpgrade(ctx context.Context, bulker bulk.Bulk, agent *model.Agent) e Index: dl.FleetAgents, }) - return bulker.MUpdate(ctx, updates, bulk.WithRefresh()) + return ack.bulk.MUpdate(ctx, updates, bulk.WithRefresh()) } func _getAPIKeyIDs(agent *model.Agent) []string { diff --git a/cmd/fleet/handleArtifacts.go b/cmd/fleet/handleArtifacts.go index 27a6f1917..5508409b7 100644 --- a/cmd/fleet/handleArtifacts.go +++ b/cmd/fleet/handleArtifacts.go @@ -18,7 +18,9 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/bulk" "github.com/elastic/fleet-server/v7/internal/pkg/cache" + "github.com/elastic/fleet-server/v7/internal/pkg/config" "github.com/elastic/fleet-server/v7/internal/pkg/dl" + "github.com/elastic/fleet-server/v7/internal/pkg/limit" "github.com/elastic/fleet-server/v7/internal/pkg/model" "github.com/elastic/fleet-server/v7/internal/pkg/throttle" @@ -34,13 +36,33 @@ const ( ) var ( - artThrottle = throttle.NewThrottle(defaultMaxParallel) ErrorThrottle = errors.New("cannot acquire throttle token") ErrorBadSha2 = errors.New("malformed sha256") ErrorRecord = errors.New("artifact record mismatch") ErrorMismatchSha2 = errors.New("mismatched sha256") ) +type ArtifactT struct { + bulker bulk.Bulk + cache cache.Cache + esThrottle *throttle.Throttle + limit *limit.Limiter +} + +func NewArtifactT(cfg *config.Server, bulker bulk.Bulk, cache cache.Cache) *ArtifactT { + log.Info(). + Interface("limits", cfg.Limits.ArtifactLimit). + Int("maxParallel", defaultMaxParallel). + Msg("Artifact install limits") + + return &ArtifactT{ + bulker: bulker, + cache: cache, + limit: limit.NewLimiter(&cfg.Limits.ArtifactLimit), + esThrottle: throttle.NewThrottle(defaultMaxParallel), + } +} + func (rt Router) handleArtifacts(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { start := time.Now() @@ -55,33 +77,7 @@ func (rt Router) handleArtifacts(w http.ResponseWriter, r *http.Request, ps http Str("remoteAddr", r.RemoteAddr). Logger() - // Authenticate the APIKey; retrieve agent record. - // Note: This is going to be a bit slow even if we hit the cache on the api key. - // In order to validate that the agent still has that api key, we fetch the agent record from elastic. - agent, err := authAgent(r, "", rt.ct.bulker, rt.ct.cache) - if err != nil { - code := http.StatusUnauthorized - zlog.Info(). - Err(err). - Int("code", code). - Msg("Fail auth") - - http.Error(w, "", code) - return - } - - zlog = zlog.With(). - Str("APIKeyId", agent.AccessApiKeyId). - Str("agentId", agent.Id). - Logger() - - ah := artHandler{ - zlog: zlog, - bulker: rt.ct.bulker, - c: rt.ct.cache, - } - - rdr, err := ah.handle(r.Context(), agent, id, sha2) + rdr, err := rt.at.handleArtifacts(r, zlog, id, sha2) var nWritten int64 if err == nil { @@ -94,7 +90,7 @@ func (rt Router) handleArtifacts(w http.ResponseWriter, r *http.Request, ps http } if err != nil { - code, lvl := assessError(err) + code, lvl := rt.at.assessError(err) zlog.WithLevel(lvl). Err(err). @@ -107,7 +103,30 @@ func (rt Router) handleArtifacts(w http.ResponseWriter, r *http.Request, ps http } } -func assessError(err error) (int, zerolog.Level) { +func (at ArtifactT) handleArtifacts(r *http.Request, zlog zerolog.Logger, id, sha2 string) (io.Reader, error) { + limitF, err := at.limit.Acquire() + if err != nil { + return nil, err + } + defer limitF() + + // Authenticate the APIKey; retrieve agent record. + // Note: This is going to be a bit slow even if we hit the cache on the api key. + // In order to validate that the agent still has that api key, we fetch the agent record from elastic. + agent, err := authAgent(r, "", at.bulker, at.cache) + if err != nil { + return nil, err + } + + zlog = zlog.With(). + Str("APIKeyId", agent.AccessApiKeyId). + Str("agentId", agent.Id). + Logger() + + return at.handle(r.Context(), zlog, agent, id, sha2) +} + +func (at ArtifactT) assessError(err error) (int, zerolog.Level) { lvl := zerolog.DebugLevel // TODO: return a 503 on elastic timeout, connection drop @@ -120,11 +139,12 @@ func assessError(err error) (int, zerolog.Level) { // show up in the logs at a higher level than debug lvl = zerolog.WarnLevel code = http.StatusNotFound - case ErrorThrottle: + case ErrorThrottle, limit.ErrRateLimit, limit.ErrMaxLimit: code = http.StatusTooManyRequests case context.Canceled: code = http.StatusServiceUnavailable default: + lvl = zerolog.InfoLevel code = http.StatusBadRequest } @@ -137,7 +157,7 @@ type artHandler struct { c cache.Cache } -func (ah artHandler) handle(ctx context.Context, agent *model.Agent, id, sha2 string) (io.Reader, error) { +func (at ArtifactT) handle(ctx context.Context, zlog zerolog.Logger, agent *model.Agent, id, sha2 string) (io.Reader, error) { // Input validation if err := validateSha2String(sha2); err != nil { @@ -145,13 +165,13 @@ func (ah artHandler) handle(ctx context.Context, agent *model.Agent, id, sha2 st } // Determine whether the agent should have access to this artifact - if err := ah.authorizeArtifact(ctx, agent, id, sha2); err != nil { - ah.zlog.Warn().Err(err).Msg("Unauthorized GET on artifact") + if err := at.authorizeArtifact(ctx, agent, id, sha2); err != nil { + zlog.Warn().Err(err).Msg("Unauthorized GET on artifact") return nil, err } // Grab artifact, whether from cache or elastic. - artifact, err := ah.getArtifact(ctx, id, sha2) + artifact, err := at.getArtifact(ctx, zlog, id, sha2) if err != nil { return nil, err } @@ -159,7 +179,7 @@ func (ah artHandler) handle(ctx context.Context, agent *model.Agent, id, sha2 st // Sanity check; just in case something underneath is misbehaving if artifact.Identifier != id || artifact.DecodedSha256 != sha2 { err = ErrorRecord - ah.zlog.Info(). + zlog.Info(). Err(err). Str("artifact_id", artifact.Identifier). Str("artifact_sha2", artifact.DecodedSha256). @@ -167,7 +187,7 @@ func (ah artHandler) handle(ctx context.Context, agent *model.Agent, id, sha2 st return nil, err } - ah.zlog.Debug(). + zlog.Debug(). Int("sz", len(artifact.Body)). Int64("decodedSz", artifact.DecodedSize). Str("compression", artifact.CompressionAlgorithm). @@ -189,31 +209,31 @@ func (ah artHandler) handle(ctx context.Context, agent *model.Agent, id, sha2 st // // Initial implementation is dependent on security by obscurity; ie. // it should be difficult for an attacker to guess a guid. -func (ah artHandler) authorizeArtifact(ctx context.Context, agent *model.Agent, ident, sha2 string) error { +func (at ArtifactT) authorizeArtifact(ctx context.Context, agent *model.Agent, ident, sha2 string) error { return nil // TODO } // Return artifact from cache by sha2 or fetch directly from Elastic. // Update cache on successful retrieval from Elastic. -func (ah artHandler) getArtifact(ctx context.Context, ident, sha2 string) (*model.Artifact, error) { +func (at ArtifactT) getArtifact(ctx context.Context, zlog zerolog.Logger, ident, sha2 string) (*model.Artifact, error) { // Check the cache; return immediately if found. - if artifact, ok := ah.c.GetArtifact(ident, sha2); ok { + if artifact, ok := at.cache.GetArtifact(ident, sha2); ok { return &artifact, nil } // Fetch the artifact from elastic - art, err := ah.fetchArtifact(ctx, ident, sha2) + art, err := at.fetchArtifact(ctx, zlog, ident, sha2) if err != nil { - ah.zlog.Info().Err(err).Msg("Fail retrieve artifact") + zlog.Info().Err(err).Msg("Fail retrieve artifact") return nil, err } // The 'Body' field type is Raw; extract to string. var srcPayload string if err = json.Unmarshal(art.Body, &srcPayload); err != nil { - ah.zlog.Error().Err(err).Msg("Cannot unmarshal artifact payload") + zlog.Error().Err(err).Msg("Cannot unmarshal artifact payload") return nil, err } @@ -222,13 +242,13 @@ func (ah artHandler) getArtifact(ctx context.Context, ident, sha2 string) (*mode // to avoid having to decode on each cache hit. dstPayload, err := base64.StdEncoding.DecodeString(srcPayload) if err != nil { - ah.zlog.Error().Err(err).Msg("Fail base64 decode artifact") + zlog.Error().Err(err).Msg("Fail base64 decode artifact") return nil, err } // Validate the sha256 hash; this is just good hygiene. if err = validateSha2Data(dstPayload, art.EncodedSha256); err != nil { - ah.zlog.Error().Err(err).Msg("Fail sha2 hash validation") + zlog.Error().Err(err).Msg("Fail sha2 hash validation") return nil, err } @@ -236,7 +256,7 @@ func (ah artHandler) getArtifact(ctx context.Context, ident, sha2 string) (*mode art.Body = dstPayload // Update the cache. - ah.c.SetArtifact(*art, defaultCacheTTL) + at.cache.SetArtifact(*art, defaultCacheTTL) return art, nil } @@ -245,18 +265,18 @@ func (ah artHandler) getArtifact(ctx context.Context, ident, sha2 string) (*mode // TODO: Design a mechanism to mitigate a DDOS attack on bogus hashes. // Perhaps have a cache of the most recently used hashes available, and items that aren't // in the cache can do a lookup but throttle as below. We could update the cache every 10m or so. -func (ah artHandler) fetchArtifact(ctx context.Context, ident, sha2 string) (*model.Artifact, error) { +func (at ArtifactT) fetchArtifact(ctx context.Context, zlog zerolog.Logger, ident, sha2 string) (*model.Artifact, error) { // Throttle prevents more than N outstanding requests to elastic globally and per sha2. - if token := artThrottle.Acquire(sha2, defaultThrottleTTL); token == nil { + if token := at.esThrottle.Acquire(sha2, defaultThrottleTTL); token == nil { return nil, ErrorThrottle } else { defer token.Release() } start := time.Now() - artifact, err := dl.FindArtifact(ctx, ah.bulker, ident, sha2) + artifact, err := dl.FindArtifact(ctx, at.bulker, ident, sha2) - ah.zlog.Info(). + zlog.Info(). Err(err). Dur("rtt", time.Since(start)). Msg("fetch artifact") diff --git a/cmd/fleet/handleCheckin.go b/cmd/fleet/handleCheckin.go index 0dede39af..6a571babc 100644 --- a/cmd/fleet/handleCheckin.go +++ b/cmd/fleet/handleCheckin.go @@ -11,7 +11,6 @@ import ( "context" "encoding/json" "errors" - "io" "net/http" "reflect" "time" @@ -21,6 +20,7 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/cache" "github.com/elastic/fleet-server/v7/internal/pkg/config" "github.com/elastic/fleet-server/v7/internal/pkg/dl" + "github.com/elastic/fleet-server/v7/internal/pkg/limit" "github.com/elastic/fleet-server/v7/internal/pkg/model" "github.com/elastic/fleet-server/v7/internal/pkg/monitor" "github.com/elastic/fleet-server/v7/internal/pkg/policy" @@ -28,40 +28,55 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/sqn" "github.com/julienschmidt/httprouter" + "github.com/rs/zerolog" "github.com/rs/zerolog/log" ) var ( ErrAgentNotFound = errors.New("agent not found") - - kCheckinTimeout = 30 * time.Second - kLongPollTimeout = 300 * time.Second // 5m ) const kEncodingGzip = "gzip" func (rt Router) handleCheckin(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { - // TODO: Consider rate limit here id := ps.ByName("id") err := rt.ct._handleCheckin(w, r, id, rt.bulker) if err != nil { - code := http.StatusBadRequest - if err == ErrAgentNotFound { + lvl := zerolog.DebugLevel + + var code int + switch err { + case ErrAgentNotFound: code = http.StatusNotFound + lvl = zerolog.WarnLevel + case limit.ErrRateLimit: + code = http.StatusTooManyRequests + case limit.ErrMaxLimit: + // Log this as warn for visibility that limit has been reached. + // This allows customers to tune the configuration on detection of threshold. + code = http.StatusTooManyRequests + lvl = zerolog.WarnLevel + case context.Canceled: + code = http.StatusServiceUnavailable + default: + lvl = zerolog.InfoLevel + code = http.StatusBadRequest } - // Don't log connection drops - if err != context.Canceled { - log.Error().Err(err).Str("id", id).Int("code", code).Msg("fail checkin") - } - http.Error(w, err.Error(), code) + log.WithLevel(lvl). + Err(err). + Str("id", id). + Int("code", code). + Msg("fail checkin") + + http.Error(w, "", code) } } type CheckinT struct { - cfg *config.Config + cfg *config.Server cache cache.Cache bc *BulkCheckin pm policy.Monitor @@ -69,10 +84,11 @@ type CheckinT struct { ad *action.Dispatcher tr *action.TokenResolver bulker bulk.Bulk + limit *limit.Limiter } func NewCheckinT( - cfg *config.Config, + cfg *config.Server, c cache.Cache, bc *BulkCheckin, pm policy.Monitor, @@ -81,7 +97,14 @@ func NewCheckinT( tr *action.TokenResolver, bulker bulk.Bulk, ) *CheckinT { - return &CheckinT{ + + log.Info(). + Interface("limits", cfg.Limits.CheckinLimit). + Dur("long_poll_timeout", cfg.Timeouts.CheckinLongPoll). + Dur("long_poll_timestamp", cfg.Timeouts.CheckinTimestamp). + Msg("Checkin install limits") + + ct := &CheckinT{ cfg: cfg, cache: c, bc: bc, @@ -89,12 +112,21 @@ func NewCheckinT( gcp: gcp, ad: ad, tr: tr, + limit: limit.NewLimiter(&cfg.Limits.CheckinLimit), bulker: bulker, } + + return ct } func (ct *CheckinT) _handleCheckin(w http.ResponseWriter, r *http.Request, id string, bulker bulk.Bulk) error { + limitF, err := ct.limit.Acquire() + if err != nil { + return err + } + defer limitF() + agent, err := authAgent(r, id, ct.bulker, ct.cache) if err != nil { @@ -135,11 +167,11 @@ func (ct *CheckinT) _handleCheckin(w http.ResponseWriter, r *http.Request, id st defer ct.pm.Unsubscribe(sub) // Update check-in timestamp on timeout - tick := time.NewTicker(kCheckinTimeout) + tick := time.NewTicker(ct.cfg.Timeouts.CheckinTimestamp) defer tick.Stop() // Chill out for for a bit. Long poll. - longPoll := time.NewTicker(kLongPollTimeout) + longPoll := time.NewTicker(ct.cfg.Timeouts.CheckinLongPoll) defer longPoll.Stop() // Intial update on checkin, and any user fields that might have changed @@ -196,29 +228,40 @@ func (ct *CheckinT) _handleCheckin(w http.ResponseWriter, r *http.Request, id st } func (ct *CheckinT) writeResponse(w http.ResponseWriter, r *http.Request, resp CheckinResponse) error { - var wr io.Writer - compressionLevel := ct.cfg.Inputs[0].Server.CompressionLevel + payload, err := json.Marshal(&resp) + if err != nil { + return err + } - if compressionLevel != flate.NoCompression && acceptsEncoding(r, kEncodingGzip) { - log.Trace().Int("level", compressionLevel).Msg("Compressing policy response") + compressionLevel := ct.cfg.CompressionLevel + compressThreshold := ct.cfg.CompressionThresh + + if len(payload) > compressThreshold && compressionLevel != flate.NoCompression && acceptsEncoding(r, kEncodingGzip) { zipper, err := gzip.NewWriterLevel(w, compressionLevel) if err != nil { return err } - // Must close the compression context to flush - defer zipper.Close() - wr = zipper - w.Header().Set("Content-Encoding", kEncodingGzip) + + if _, err = zipper.Write(payload); err != nil { + return err + } + + err = zipper.Close() + + log.Trace(). + Err(err). + Int("dataSz", len(payload)). + Int("lvl", compressionLevel). + Msg("Compressing checkin response") } else { - wr = w + _, err = w.Write(payload) } - encoder := json.NewEncoder(wr) - return encoder.Encode(&resp) + return err } func acceptsEncoding(r *http.Request, encoding string) bool { @@ -440,6 +483,7 @@ func parseMeta(agent *model.Agent, req *CheckinRequest) (fields Fields, err erro } if reqLocalMeta != nil && !reflect.DeepEqual(reqLocalMeta, agentLocalMeta) { + log.Trace().RawJSON("oldLocalMeta", agent.LocalMetadata).RawJSON("newLocalMeta", req.LocalMeta).Msg("Local metadata not equal") log.Info().RawJSON("req.LocalMeta", req.LocalMeta).Msg("applying new local metadata") fields = map[string]interface{}{ FieldLocalMetadata: req.LocalMeta, diff --git a/cmd/fleet/handleEnroll.go b/cmd/fleet/handleEnroll.go index 280ce91b3..f52ea8fd2 100644 --- a/cmd/fleet/handleEnroll.go +++ b/cmd/fleet/handleEnroll.go @@ -18,14 +18,15 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/cache" "github.com/elastic/fleet-server/v7/internal/pkg/config" "github.com/elastic/fleet-server/v7/internal/pkg/dl" + "github.com/elastic/fleet-server/v7/internal/pkg/limit" "github.com/elastic/fleet-server/v7/internal/pkg/model" "github.com/elastic/fleet-server/v7/internal/pkg/sqn" "github.com/elastic/go-elasticsearch/v8" "github.com/gofrs/uuid" "github.com/julienschmidt/httprouter" + "github.com/rs/zerolog" "github.com/rs/zerolog/log" - "golang.org/x/sync/semaphore" ) const ( @@ -37,25 +38,24 @@ const ( var ( ErrUnknownEnrollType = errors.New("unknown enroll request type") - ErrServiceBusy = errors.New("service busy") - ErrAgentIdFailure = errors.New("agent persist failure") ) type EnrollerT struct { - throttle *semaphore.Weighted - bulker bulk.Bulk - cache cache.Cache + bulker bulk.Bulk + cache cache.Cache + limit *limit.Limiter } func NewEnrollerT(cfg *config.Server, bulker bulk.Bulk, c cache.Cache) (*EnrollerT, error) { - // This value has more to do with the throughput of elastic search than anything else - // if you have a large elastic search cluster, you can be more aggressive. - maxEnrollPending := cfg.MaxEnrollPending + + log.Info(). + Interface("limits", cfg.Limits.EnrollLimit). + Msg("Enroller install limits") return &EnrollerT{ - throttle: semaphore.NewWeighted(maxEnrollPending), - bulker: bulker, - cache: c, + limit: limit.NewLimiter(&cfg.Limits.EnrollLimit), + bulker: bulker, + cache: c, }, nil } @@ -72,21 +72,27 @@ func (rt Router) handleEnroll(w http.ResponseWriter, r *http.Request, ps httprou data, err := rt.et.handleEnroll(r) if err != nil { - code := http.StatusBadRequest - if err == ErrServiceBusy { + lvl := zerolog.DebugLevel + + var code int + switch err { + case limit.ErrRateLimit, limit.ErrMaxLimit: + code = http.StatusTooManyRequests + case context.Canceled: code = http.StatusServiceUnavailable + default: + lvl = zerolog.InfoLevel + code = http.StatusBadRequest } - // Don't log connection drops - if err != context.Canceled { - log.Error(). - Str("mod", kEnrollMod). - Int("code", code). - Err(err).Dur("tdiff", time.Since(start)). - Msg("Enroll fail") - } + log.WithLevel(lvl). + Err(err). + Str("mod", kEnrollMod). + Int("code", code). + Dur("tdiff", time.Since(start)). + Msg("Enroll fail") - http.Error(w, err.Error(), code) + http.Error(w, "", code) return } @@ -102,32 +108,13 @@ func (rt Router) handleEnroll(w http.ResponseWriter, r *http.Request, ps httprou Msg("handleEnroll OK") } -func (et *EnrollerT) acquireSemaphore(ctx context.Context) error { - start := time.Now() - - // Wait a reasonable amount of time, but if busy for N seconds; ask to come back later. - acquireCtx, cancelF := context.WithTimeout(ctx, time.Second*10) - defer cancelF() - - if err := et.throttle.Acquire(acquireCtx, 1); err != nil { - return ErrServiceBusy - } - - log.Trace(). - Str("mod", kEnrollMod). - Dur("tdiff", time.Since(start)). - Msg("Enroll acquire") - - return nil -} - func (et *EnrollerT) handleEnroll(r *http.Request) ([]byte, error) { - if err := et.acquireSemaphore(r.Context()); err != nil { + limitF, err := et.limit.Acquire() + if err != nil { return nil, err } - - defer et.throttle.Release(1) + defer limitF() key, err := authApiKey(r, et.bulker.Client(), et.cache) if err != nil { diff --git a/cmd/fleet/handleStatus.go b/cmd/fleet/handleStatus.go index 86cf6d303..578cc1a28 100644 --- a/cmd/fleet/handleStatus.go +++ b/cmd/fleet/handleStatus.go @@ -25,7 +25,7 @@ func (rt Router) handleStatus(w http.ResponseWriter, _ *http.Request, _ httprout if err != nil { code := http.StatusInternalServerError log.Error().Err(err).Int("code", code).Msg("fail status") - http.Error(w, err.Error(), code) + http.Error(w, "", code) return } diff --git a/cmd/fleet/main.go b/cmd/fleet/main.go index 0d63aa968..1c03d26c9 100644 --- a/cmd/fleet/main.go +++ b/cmd/fleet/main.go @@ -9,8 +9,8 @@ import ( "fmt" "io" "os" + "runtime/debug" "sync" - "time" "github.com/elastic/go-ucfg" "github.com/elastic/go-ucfg/yaml" @@ -38,8 +38,7 @@ import ( ) const ( - kPolicyThrottle = time.Millisecond * 5 - kAgentMode = "agent-mode" + kAgentMode = "agent-mode" ) func installSignalHandler() context.Context { @@ -422,12 +421,12 @@ func (f *FleetServer) Run(ctx context.Context) error { } // Restart profiler - if curCfg == nil || curCfg.Inputs[0].Server.Profile.Enabled != newCfg.Inputs[0].Server.Profile.Enabled || curCfg.Inputs[0].Server.Profile.Bind != newCfg.Inputs[0].Server.Profile.Bind { + if curCfg == nil || curCfg.Inputs[0].Server.Profiler.Enabled != newCfg.Inputs[0].Server.Profiler.Enabled || curCfg.Inputs[0].Server.Profiler.Bind != newCfg.Inputs[0].Server.Profiler.Bind { stop(proCancel, proEg) proEg, proCancel = nil, nil - if newCfg.Inputs[0].Server.Profile.Enabled { + if newCfg.Inputs[0].Server.Profiler.Enabled { proEg, proCancel = start(ctx, func(ctx context.Context) error { - return profile.RunProfiler(ctx, newCfg.Inputs[0].Server.Profile.Bind) + return profile.RunProfiler(ctx, newCfg.Inputs[0].Server.Profiler.Bind) }, ech) } } @@ -471,7 +470,19 @@ func loggedRunFunc(ctx context.Context, tag string, runfn runFunc) func() error } } +func initRuntime(cfg *config.Config) { + if cfg.Runtime.GCPercent != 0 { + old := debug.SetGCPercent(cfg.Runtime.GCPercent) + + log.Info(). + Int("old", old). + Int("new", cfg.Runtime.GCPercent). + Msg("SetGCPercent") + } +} + func (f *FleetServer) runServer(ctx context.Context, cfg *config.Config) (err error) { + initRuntime(cfg) // The metricsServer is only enabled if http.enabled is set in the config metricsServer, err := f.initMetrics(ctx, cfg) @@ -506,7 +517,7 @@ func (f *FleetServer) runServer(ctx context.Context, cfg *config.Config) (err er g.Go(loggedRunFunc(ctx, "Coordinator policy monitor", cord.Run)) // Policy monitor - pm := policy.NewMonitor(bulker, pim, kPolicyThrottle) + pm := policy.NewMonitor(bulker, pim, cfg.Inputs[0].Server.Limits.PolicyThrottle) g.Go(loggedRunFunc(ctx, "Policy monitor", pm.Run)) // Policy self monitor @@ -534,12 +545,16 @@ func (f *FleetServer) runServer(ctx context.Context, cfg *config.Config) (err er bc := NewBulkCheckin(bulker) g.Go(loggedRunFunc(ctx, "Bulk checkin", bc.Run)) - ct := NewCheckinT(f.cfg, f.cache, bc, pm, am, ad, tr, bulker) + ct := NewCheckinT(&f.cfg.Inputs[0].Server, f.cache, bc, pm, am, ad, tr, bulker) et, err := NewEnrollerT(&f.cfg.Inputs[0].Server, bulker, f.cache) if err != nil { return err } - router := NewRouter(bulker, ct, et, sm) + + at := NewArtifactT(&f.cfg.Inputs[0].Server, bulker, f.cache) + ack := NewAckT(&f.cfg.Inputs[0].Server, bulker, f.cache) + + router := NewRouter(bulker, ct, et, at, ack, sm) g.Go(loggedRunFunc(ctx, "Http server", func(ctx context.Context) error { return runServer(ctx, router, &f.cfg.Inputs[0].Server) diff --git a/cmd/fleet/router.go b/cmd/fleet/router.go index 7fb76645b..411022113 100644 --- a/cmd/fleet/router.go +++ b/cmd/fleet/router.go @@ -26,16 +26,20 @@ type Router struct { ver string ct *CheckinT et *EnrollerT + at *ArtifactT + ack *AckT sm policy.SelfMonitor } -func NewRouter(bulker bulk.Bulk, ct *CheckinT, et *EnrollerT, sm policy.SelfMonitor) *httprouter.Router { +func NewRouter(bulker bulk.Bulk, ct *CheckinT, et *EnrollerT, at *ArtifactT, ack *AckT, sm policy.SelfMonitor) *httprouter.Router { r := Router{ bulker: bulker, ct: ct, et: et, sm: sm, + at: at, + ack: ack, } router := httprouter.New() diff --git a/cmd/fleet/server.go b/cmd/fleet/server.go index eecf93233..7d898352b 100644 --- a/cmd/fleet/server.go +++ b/cmd/fleet/server.go @@ -12,7 +12,6 @@ import ( "net/http" "github.com/elastic/fleet-server/v7/internal/pkg/config" - "github.com/elastic/fleet-server/v7/internal/pkg/rate" "github.com/elastic/beats/v7/libbeat/common/transport/tlscommon" "github.com/elastic/beats/v7/libbeat/monitoring" @@ -34,6 +33,10 @@ func init() { } func diagConn(c net.Conn, s http.ConnState) { + if c == nil { + return + } + log.Trace(). Str("local", c.LocalAddr().String()). Str("remote", c.RemoteAddr().String()). @@ -53,7 +56,7 @@ func runServer(ctx context.Context, router *httprouter.Router, cfg *config.Serve addr := cfg.BindAddress() rdto := cfg.Timeouts.Read wrto := cfg.Timeouts.Write - mhbz := cfg.MaxHeaderByteSize + mhbz := cfg.Limits.MaxHeaderByteSize bctx := func(net.Listener) context.Context { return ctx } log.Info(). @@ -107,7 +110,6 @@ func runServer(ctx context.Context, router *httprouter.Router, cfg *config.Serve log.Warn().Msg("exposed over insecure HTTP; enablement of TLS is strongly recommended") } - ln = wrapRateLimitter(ctx, ln, cfg) ln = wrapConnLimitter(ctx, ln, cfg) if err := server.Serve(ln); err != nil && err != context.Canceled { return err @@ -116,26 +118,8 @@ func runServer(ctx context.Context, router *httprouter.Router, cfg *config.Serve return nil } -func wrapRateLimitter(ctx context.Context, ln net.Listener, cfg *config.Server) net.Listener { - rateLimitBurst := cfg.RateLimitBurst - rateLimitInterval := cfg.RateLimitInterval - - if rateLimitInterval != 0 { - log.Info(). - Dur("interval", rateLimitInterval). - Int("burst", rateLimitBurst). - Msg("Server rate limiter installed") - - ln = rate.NewRateListener(ctx, ln, rateLimitBurst, rateLimitInterval) - } else { - log.Info().Msg("server connection rate limiter disabled") - } - - return ln -} - func wrapConnLimitter(ctx context.Context, ln net.Listener, cfg *config.Server) net.Listener { - hardLimit := cfg.MaxConnections + hardLimit := cfg.Limits.MaxConnections if hardLimit != 0 { log.Info(). diff --git a/cmd/fleet/server_integration_test.go b/cmd/fleet/server_integration_test.go index c7e848182..9ea09f835 100644 --- a/cmd/fleet/server_integration_test.go +++ b/cmd/fleet/server_integration_test.go @@ -14,7 +14,6 @@ import ( "io/ioutil" "net/http" "path" - "strings" "testing" "time" @@ -172,7 +171,7 @@ func TestServerUnauthorized(t *testing.T) { } raw, _ := ioutil.ReadAll(res.Body) - diff = cmp.Diff("no authorization header\n", string(raw)) + diff = cmp.Diff("\n", string(raw)) if diff != "" { t.Fatal(diff) } @@ -181,7 +180,7 @@ func TestServerUnauthorized(t *testing.T) { // Unauthorized, expecting error from /_security/_authenticate t.Run("unauthorized", func(t *testing.T) { - const expectedErrResponsePrefix = `fail Auth: [401 Unauthorized]` + for _, u := range agenturls { req, err := http.NewRequest("POST", u, bytes.NewBuffer([]byte("{}"))) require.NoError(t, err) @@ -198,8 +197,9 @@ func TestServerUnauthorized(t *testing.T) { } raw, _ := ioutil.ReadAll(res.Body) - if !strings.HasPrefix(string(raw), expectedErrResponsePrefix) { - t.Fatalf("unexpected error: %s", string(raw)) + diff = cmp.Diff("\n", string(raw)) + if diff != "" { + t.Fatal(diff) } } }) diff --git a/cmd/fleet/server_test.go b/cmd/fleet/server_test.go index 6f20286d0..508d9757c 100644 --- a/cmd/fleet/server_test.go +++ b/cmd/fleet/server_test.go @@ -37,13 +37,13 @@ func TestRunServer(t *testing.T) { require.NoError(t, err) bulker := ftesting.MockBulk{} pim := mock.NewMockIndexMonitor() - pm := policy.NewMonitor(bulker, pim, kPolicyThrottle) + pm := policy.NewMonitor(bulker, pim, 5*time.Millisecond) bc := NewBulkCheckin(nil) - ct := NewCheckinT(nil, c, bc, pm, nil, nil, nil, nil) + ct := NewCheckinT(cfg, c, bc, pm, nil, nil, nil, nil) et, err := NewEnrollerT(cfg, nil, c) require.NoError(t, err) - router := NewRouter(bulker, ct, et, nil) + router := NewRouter(bulker, ct, et, nil, nil, nil) errCh := make(chan error) var wg sync.WaitGroup diff --git a/fleet-server.yml b/fleet-server.yml index 1642b53c7..df37e8d86 100644 --- a/fleet-server.yml +++ b/fleet-server.yml @@ -10,6 +10,9 @@ fleet: logging: level: '${LOG_LEVEL:DEBUG}' +runtime: + gc_percent: 100 # Overide the golang GC target percentage (see https://golang.org/pkg/runtime/debug/#SetGCPercent) + # Input config provided by the Elastic Agent for the server #inputs: # - type: fleet-server @@ -19,12 +22,38 @@ fleet: # cache: # num_counters: 500000 # 10x times expected count # max_cost: 50 * 1024 * 1024 # 50MiB cache size +# timeouts: +# checkin_long_poll: 300s # long poll timeout +# profiler: +# enabled: true # enable profiler +# limits: +# policy_throttle: 100ms +# max_connetions: 150 +# checkin_limit: +# interval: 100ms +# burst: 25 +# max: 100 +# artifact_limit: +# interval: 10ms +# burst: 5 +# max: 10 +# ack_limit: +# interval: 10ms +# burst: 20 +# max: 10 +# enroll_limit: +# interval: 50ms +# burst: 10 +# max: 8 +# ssl: +# enabled: true +# certificate: /creds/cert.pem +# key: /creds/key.pem logging: - to_stderr: true - #to_files: - #files: - #level: + to_stderr: true # Force the logging output to stderr + pretty: true # Output pretty logging in stderr mode + #level: trace # Enables the stats endpoint under http://localhost:5601 by default. # Additional stats can be found under http://127.0.0.1:5066/stats and http://127.0.0.1:5066/state diff --git a/internal/pkg/cache/cache.go b/internal/pkg/cache/cache.go index 4057dacc0..27fd27551 100644 --- a/internal/pkg/cache/cache.go +++ b/internal/pkg/cache/cache.go @@ -48,14 +48,14 @@ func New(cfg Config) (Cache, error) { // // This will only cache the action ID and action Type. So `GetAction` will only // return a `model.Action` with `ActionId` and `Type` set. -func (c Cache) SetAction(action model.Action) { +func (c Cache) SetAction(action model.Action, ttl time.Duration) { scopedKey := "action:" + action.ActionId v := actionCache{ actionId: action.ActionId, actionType: action.Type, } cost := len(action.ActionId) + len(action.Type) - ok := c.cache.Set(scopedKey, v, int64(cost)) + ok := c.cache.SetWithTTL(scopedKey, v, int64(cost), ttl) log.Trace(). Bool("ok", ok). Str("id", action.ActionId). diff --git a/internal/pkg/config/config.go b/internal/pkg/config/config.go index 2f636792b..c0d2d3af3 100644 --- a/internal/pkg/config/config.go +++ b/internal/pkg/config/config.go @@ -27,6 +27,7 @@ type Config struct { Inputs []Input `config:"inputs"` Logging Logging `config:"logging"` HTTP HTTP `config:"http"` + Runtime Runtime `config:"runtime"` } // InitDefaults initializes the defaults for the configuration. @@ -34,6 +35,7 @@ func (c *Config) InitDefaults() { c.Inputs = make([]Input, 1) c.Inputs[0].InitDefaults() c.HTTP.InitDefaults() + c.Runtime.InitDefaults() } // Validate ensures that the configuration is valid. diff --git a/internal/pkg/config/config_test.go b/internal/pkg/config/config_test.go index cfb2e9159..d91699155 100644 --- a/internal/pkg/config/config_test.go +++ b/internal/pkg/config/config_test.go @@ -49,18 +49,41 @@ func TestConfig(t *testing.T) { Host: kDefaultHost, Port: kDefaultPort, Timeouts: ServerTimeouts{ - Read: 5 * time.Second, - Write: 60 * 10 * time.Second, + Read: 5 * time.Second, + Write: 60 * 10 * time.Second, + CheckinTimestamp: 30 * time.Second, + CheckinLongPoll: 5 * time.Minute, }, - MaxHeaderByteSize: 8192, - MaxEnrollPending: 64, - RateLimitBurst: 1024, - RateLimitInterval: 5 * time.Millisecond, - Profile: ServerProfile{ + Profiler: ServerProfiler{ Enabled: false, Bind: "localhost:6060", }, - CompressionLevel: 1, + CompressionLevel: 1, + CompressionThresh: 1024, + Limits: ServerLimits{ + MaxHeaderByteSize: 8192, + MaxConnections: 0, + PolicyThrottle: 5 * time.Millisecond, + CheckinLimit: Limit{ + Interval: time.Millisecond, + Burst: 1000, + }, + ArtifactLimit: Limit{ + Interval: time.Millisecond * 5, + Burst: 25, + Max: 50, + }, + EnrollLimit: Limit{ + Interval: time.Millisecond * 10, + Burst: 100, + Max: 50, + }, + AckLimit: Limit{ + Interval: time.Millisecond * 10, + Burst: 100, + Max: 50, + }, + }, }, Cache: Cache{ NumCounters: defaultCacheNumCounters, @@ -112,18 +135,41 @@ func TestConfig(t *testing.T) { Host: kDefaultHost, Port: kDefaultPort, Timeouts: ServerTimeouts{ - Read: 5 * time.Second, - Write: 60 * 10 * time.Second, + Read: 5 * time.Second, + Write: 60 * 10 * time.Second, + CheckinTimestamp: 30 * time.Second, + CheckinLongPoll: 5 * time.Minute, }, - MaxHeaderByteSize: 8192, - MaxEnrollPending: 64, - RateLimitBurst: 1024, - RateLimitInterval: 5 * time.Millisecond, - Profile: ServerProfile{ + Profiler: ServerProfiler{ Enabled: false, Bind: "localhost:6060", }, - CompressionLevel: 1, + CompressionLevel: 1, + CompressionThresh: 1024, + Limits: ServerLimits{ + MaxHeaderByteSize: 8192, + MaxConnections: 0, + PolicyThrottle: 5 * time.Millisecond, + CheckinLimit: Limit{ + Interval: time.Millisecond, + Burst: 1000, + }, + ArtifactLimit: Limit{ + Interval: time.Millisecond * 5, + Burst: 25, + Max: 50, + }, + EnrollLimit: Limit{ + Interval: time.Millisecond * 10, + Burst: 100, + Max: 50, + }, + AckLimit: Limit{ + Interval: time.Millisecond * 10, + Burst: 100, + Max: 50, + }, + }, }, Cache: Cache{ NumCounters: defaultCacheNumCounters, @@ -173,18 +219,41 @@ func TestConfig(t *testing.T) { Host: kDefaultHost, Port: kDefaultPort, Timeouts: ServerTimeouts{ - Read: 5 * time.Second, - Write: 60 * 10 * time.Second, + Read: 5 * time.Second, + Write: 60 * 10 * time.Second, + CheckinTimestamp: 30 * time.Second, + CheckinLongPoll: 5 * time.Minute, }, - MaxHeaderByteSize: 8192, - MaxEnrollPending: 64, - RateLimitBurst: 1024, - RateLimitInterval: 5 * time.Millisecond, - Profile: ServerProfile{ + Profiler: ServerProfiler{ Enabled: false, Bind: "localhost:6060", }, - CompressionLevel: 1, + CompressionLevel: 1, + CompressionThresh: 1024, + Limits: ServerLimits{ + MaxHeaderByteSize: 8192, + MaxConnections: 0, + PolicyThrottle: 5 * time.Millisecond, + CheckinLimit: Limit{ + Interval: time.Millisecond, + Burst: 1000, + }, + ArtifactLimit: Limit{ + Interval: time.Millisecond * 5, + Burst: 25, + Max: 50, + }, + EnrollLimit: Limit{ + Interval: time.Millisecond * 10, + Burst: 100, + Max: 50, + }, + AckLimit: Limit{ + Interval: time.Millisecond * 10, + Burst: 100, + Max: 50, + }, + }, }, Cache: Cache{ NumCounters: defaultCacheNumCounters, @@ -231,21 +300,44 @@ func TestConfig(t *testing.T) { { Type: "fleet-server", Server: Server{ - Host: kDefaultHost, + Host: "localhost", Port: 8888, Timeouts: ServerTimeouts{ - Read: 20 * time.Second, - Write: 5 * time.Second, + Read: 20 * time.Second, + Write: 5 * time.Second, + CheckinTimestamp: 30 * time.Second, + CheckinLongPoll: 5 * time.Minute, }, - MaxHeaderByteSize: 8192, - MaxEnrollPending: 64, - RateLimitBurst: 1024, - RateLimitInterval: 5 * time.Millisecond, - Profile: ServerProfile{ + Profiler: ServerProfiler{ Enabled: false, Bind: "localhost:6060", }, - CompressionLevel: 1, + CompressionLevel: 1, + CompressionThresh: 1024, + Limits: ServerLimits{ + MaxHeaderByteSize: 8192, + MaxConnections: 0, + PolicyThrottle: 5 * time.Millisecond, + CheckinLimit: Limit{ + Interval: time.Millisecond, + Burst: 1000, + }, + ArtifactLimit: Limit{ + Interval: time.Millisecond * 5, + Burst: 25, + Max: 50, + }, + EnrollLimit: Limit{ + Interval: time.Millisecond * 10, + Burst: 100, + Max: 50, + }, + AckLimit: Limit{ + Interval: time.Millisecond * 10, + Burst: 100, + Max: 50, + }, + }, }, Cache: Cache{ NumCounters: defaultCacheNumCounters, diff --git a/internal/pkg/config/input.go b/internal/pkg/config/input.go index fea7ace2e..ec804a9e5 100644 --- a/internal/pkg/config/input.go +++ b/internal/pkg/config/input.go @@ -13,7 +13,7 @@ import ( "github.com/elastic/beats/v7/libbeat/common/transport/tlscommon" ) -const kDefaultHost = "localhost" +const kDefaultHost = "0.0.0.0" const kDefaultPort = 8220 // Policy is the configuration policy to use. @@ -23,24 +23,28 @@ type Policy struct { // ServerTimeouts is the configuration for the server timeouts type ServerTimeouts struct { - Read time.Duration `config:"read"` - Write time.Duration `config:"write"` + Read time.Duration `config:"read"` + Write time.Duration `config:"write"` + CheckinTimestamp time.Duration `config:"checkin_timestamp"` + CheckinLongPoll time.Duration `config:"checkin_long_poll"` } // InitDefaults initializes the defaults for the configuration. func (c *ServerTimeouts) InitDefaults() { c.Read = 5 * time.Second - c.Write = 60 * 10 * time.Second // 10 minutes (long poll) + c.Write = 10 * time.Minute + c.CheckinTimestamp = 30 * time.Second + c.CheckinLongPoll = 5 * time.Minute } -// ServerProfile is the configuration for profiling the server. -type ServerProfile struct { +// ServerProfiler is the configuration for profiling the server. +type ServerProfiler struct { Enabled bool `config:"enabled"` Bind string `config:"bind"` } // InitDefaults initializes the defaults for the configuration. -func (c *ServerProfile) InitDefaults() { +func (c *ServerProfiler) InitDefaults() { c.Enabled = false c.Bind = "localhost:6060" } @@ -57,13 +61,10 @@ type Server struct { Port uint16 `config:"port"` TLS *tlscommon.Config `config:"ssl"` Timeouts ServerTimeouts `config:"timeouts"` - MaxHeaderByteSize int `config:"max_header_byte_size"` - RateLimitBurst int `config:"rate_limit_burst"` - RateLimitInterval time.Duration `config:"rate_limit_interval"` - MaxConnections int `config:"max_connections"` - MaxEnrollPending int64 `config:"max_enroll_pending"` - Profile ServerProfile `config:"profile"` + Profiler ServerProfiler `config:"profiler"` CompressionLevel int `config:"compression_level"` + CompressionThresh int `config:"compression_threshold"` + Limits ServerLimits `config:"limits"` } // InitDefaults initializes the defaults for the configuration. @@ -71,13 +72,10 @@ func (c *Server) InitDefaults() { c.Host = kDefaultHost c.Port = kDefaultPort c.Timeouts.InitDefaults() - c.MaxHeaderByteSize = 8192 // 8k - c.RateLimitBurst = 1024 - c.RateLimitInterval = 5 * time.Millisecond - c.MaxConnections = 0 // no limit - c.MaxEnrollPending = 64 c.CompressionLevel = flate.BestSpeed - c.Profile.InitDefaults() + c.CompressionThresh = 1024 + c.Profiler.InitDefaults() + c.Limits.InitDefaults() } // BindAddress returns the binding address for the HTTP server. diff --git a/internal/pkg/config/limits.go b/internal/pkg/config/limits.go new file mode 100644 index 000000000..cef3ad525 --- /dev/null +++ b/internal/pkg/config/limits.go @@ -0,0 +1,54 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package config + +import ( + "time" +) + +type Limit struct { + Interval time.Duration `config:"interval"` + Burst int `config:"burst"` + Max int64 `config:"max"` +} + +type ServerLimits struct { + PolicyThrottle time.Duration `config:"policy_throttle"` + MaxHeaderByteSize int `config:"max_header_byte_size"` + MaxConnections int `config:"max_connections"` + + CheckinLimit Limit `config:"checkin_limit"` + ArtifactLimit Limit `config:"artifact_limit"` + EnrollLimit Limit `config:"enroll_limit"` + AckLimit Limit `config:"ack_limit"` +} + +// InitDefaults initializes the defaults for the configuration. +func (c *ServerLimits) InitDefaults() { + + c.MaxHeaderByteSize = 8192 // 8k + c.MaxConnections = 0 // no limit + c.PolicyThrottle = time.Millisecond * 5 + + c.CheckinLimit = Limit{ + Interval: time.Millisecond, + Burst: 1000, + } + c.ArtifactLimit = Limit{ + Interval: time.Millisecond * 5, + Burst: 25, + Max: 50, + } + c.EnrollLimit = Limit{ + Interval: time.Millisecond * 10, + Burst: 100, + Max: 50, + } + c.AckLimit = Limit{ + Interval: time.Millisecond * 10, + Burst: 100, + Max: 50, + } +} diff --git a/internal/pkg/config/runtime.go b/internal/pkg/config/runtime.go new file mode 100644 index 000000000..c1f2d1c37 --- /dev/null +++ b/internal/pkg/config/runtime.go @@ -0,0 +1,13 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package config + +type Runtime struct { + GCPercent int `config:"gc_percent"` +} + +func (r Runtime) InitDefaults() { + r.GCPercent = 0 +} diff --git a/internal/pkg/dl/policies.go b/internal/pkg/dl/policies.go index 1ebf32089..b24e5b1b5 100644 --- a/internal/pkg/dl/policies.go +++ b/internal/pkg/dl/policies.go @@ -17,13 +17,9 @@ import ( var ( tmplQueryLatestPolicies = prepareQueryLatestPolicies() - - queryPolicyByID = preparePolicyFindByID() + ErrMissingAggregations = errors.New("missing expected aggregation result") ) -var ErrPolicyLeaderNotFound = errors.New("policy has no leader") -var ErrMissingAggregations = errors.New("missing expected aggregation result") - func prepareQueryLatestPolicies() []byte { root := dsl.NewRoot() root.Size(0) @@ -37,20 +33,6 @@ func prepareQueryLatestPolicies() []byte { return root.MustMarshalJSON() } -func preparePolicyFindByID() *dsl.Tmpl { - tmpl := dsl.NewTmpl() - root := dsl.NewRoot() - - root.Size(1) - root.Query().Bool().Filter().Term(FieldPolicyId, tmpl.Bind(FieldPolicyId), nil) - sort := root.Sort() - sort.SortOrder(FieldRevisionIdx, dsl.SortDescend) - sort.SortOrder(FieldCoordinatorIdx, dsl.SortDescend) - - tmpl.MustResolve(root) - return tmpl -} - // QueryLatestPolices gets the latest revision for a policy func QueryLatestPolicies(ctx context.Context, bulker bulk.Bulk, opt ...Option) ([]model.Policy, error) { o := newOption(FleetPolicies, opt...) diff --git a/internal/pkg/limit/limiter.go b/internal/pkg/limit/limiter.go new file mode 100644 index 000000000..09a82b417 --- /dev/null +++ b/internal/pkg/limit/limiter.go @@ -0,0 +1,72 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package limit + +import ( + "errors" + "time" + + "github.com/elastic/fleet-server/v7/internal/pkg/config" + + "golang.org/x/sync/semaphore" + "golang.org/x/time/rate" +) + +type Limiter struct { + rateLimit *rate.Limiter + maxLimit *semaphore.Weighted +} + +type ReleaseFunc func() + +var ( + ErrRateLimit = errors.New("rate limit") + ErrMaxLimit = errors.New("max limit") +) + +func NewLimiter(cfg *config.Limit) *Limiter { + + if cfg == nil { + return &Limiter{} + } + + l := &Limiter{} + + if cfg.Interval != time.Duration(0) { + l.rateLimit = rate.NewLimiter(rate.Every(cfg.Interval), cfg.Burst) + } + + if cfg.Max != 0 { + l.maxLimit = semaphore.NewWeighted(cfg.Max) + } + + return l +} + +func (l *Limiter) Acquire() (ReleaseFunc, error) { + releaseFunc := noop + + if l.rateLimit != nil && !l.rateLimit.Allow() { + return nil, ErrRateLimit + } + + if l.maxLimit != nil { + if !l.maxLimit.TryAcquire(1) { + return nil, ErrMaxLimit + } + releaseFunc = l.release + } + + return releaseFunc, nil +} + +func (l *Limiter) release() { + if l.maxLimit != nil { + l.maxLimit.Release(1) + } +} + +func noop() { +} diff --git a/internal/pkg/rate/rate.go b/internal/pkg/rate/rate.go deleted file mode 100644 index 0b2d45cc4..000000000 --- a/internal/pkg/rate/rate.go +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package rate - -// Listener limited by leaky bucket. -// TODO: Not enamored with this. More complicated than necessary. - -import ( - "context" - "net" - "time" - - xr "golang.org/x/time/rate" -) - -type rateListener struct { - net.Listener - lim *xr.Limiter - - ctx context.Context - cancelF context.CancelFunc -} - -func NewRateListener(ctx context.Context, l net.Listener, burst int, interval time.Duration) net.Listener { - - ctx, cfunc := context.WithCancel(ctx) - - return &rateListener{ - Listener: l, - lim: xr.NewLimiter(xr.Every(interval), burst), - ctx: ctx, - cancelF: cfunc, - } -} - -func (r *rateListener) Accept() (net.Conn, error) { - if err := r.lim.Wait(r.ctx); err != nil { - return nil, err - } - - return r.Listener.Accept() -} - -func (r *rateListener) Close() error { - r.cancelF() - return r.Listener.Close() -} From 88bc4e84f9a34b772cc33dcdef6eaa40329d1b4f Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Thu, 8 Apr 2021 00:26:52 +0000 Subject: [PATCH 049/240] Propagate enrollment_token to agent during bootstrap. (#207) (#210) * Propogate enrollment_token to agent during bootstrap. * Fix issues with indexes in elasticsearch. * Remove the unneeded check. (cherry picked from commit b70022150cf4d9c22b2caacff41c7ffdd4256ab2) Co-authored-by: Blake Rouse --- cmd/fleet/handleEnroll.go | 2 +- internal/pkg/coordinator/monitor.go | 6 +- internal/pkg/dl/enrollment_api_key.go | 46 +++-- .../dl/enrollment_api_key_integration_test.go | 59 +++++-- internal/pkg/policy/self.go | 126 ++++++++++---- internal/pkg/policy/self_test.go | 164 ++++++++++++++++-- internal/pkg/status/reporter.go | 6 +- 7 files changed, 322 insertions(+), 87 deletions(-) diff --git a/cmd/fleet/handleEnroll.go b/cmd/fleet/handleEnroll.go index f52ea8fd2..1ae3085dc 100644 --- a/cmd/fleet/handleEnroll.go +++ b/cmd/fleet/handleEnroll.go @@ -295,7 +295,7 @@ func (et *EnrollerT) fetchEnrollmentKeyRecord(ctx context.Context, id string) (* } // Pull API key record from .fleet-enrollment-api-keys - rec, err := dl.FindEnrollmentAPIKey(ctx, et.bulker, dl.QueryEnrollmentAPIKeyByID, id) + rec, err := dl.FindEnrollmentAPIKey(ctx, et.bulker, dl.QueryEnrollmentAPIKeyByID, dl.FieldApiKeyID, id) if err != nil { return nil, err } diff --git a/internal/pkg/coordinator/monitor.go b/internal/pkg/coordinator/monitor.go index 272d76f2d..67b85e636 100644 --- a/internal/pkg/coordinator/monitor.go +++ b/internal/pkg/coordinator/monitor.go @@ -202,11 +202,9 @@ func (m *monitorT) ensureLeadership(ctx context.Context) error { } leaders, err = dl.SearchPolicyLeaders(ctx, m.bulker, ids, dl.WithIndexName(m.leadersIndex)) if err != nil { - if errors.Is(err, es.ErrIndexNotFound) { - m.log.Debug().Str("index", m.leadersIndex).Msg(es.ErrIndexNotFound.Error()) - return nil + if !errors.Is(err, es.ErrIndexNotFound) { + return err } - return err } } diff --git a/internal/pkg/dl/enrollment_api_key.go b/internal/pkg/dl/enrollment_api_key.go index 1ac18e72b..5e268f239 100644 --- a/internal/pkg/dl/enrollment_api_key.go +++ b/internal/pkg/dl/enrollment_api_key.go @@ -18,39 +18,36 @@ const ( ) var ( - QueryEnrollmentAPIKeyByID = prepareFindEnrollmentAPIKeyByID() + QueryEnrollmentAPIKeyByID = prepareFindEnrollmentAPIKeyByID() + QueryEnrollmentAPIKeyByPolicyID = prepareFindEnrollmentAPIKeyByPolicyID() ) -// RenderAllEnrollmentAPIKeysQuery render all enrollment api keys query. For migration only. -func RenderAllEnrollmentAPIKeysQuery(size uint64) ([]byte, error) { +func prepareFindEnrollmentAPIKeyByID() *dsl.Tmpl { tmpl := dsl.NewTmpl() root := dsl.NewRoot() - root.Size(size) + root.Query().Bool().Filter().Term(FieldApiKeyID, tmpl.Bind(FieldApiKeyID), nil) - err := tmpl.Resolve(root) - if err != nil { - return nil, err - } - return tmpl.Render(nil) + tmpl.MustResolve(root) + return tmpl } -func prepareFindEnrollmentAPIKeyByID() *dsl.Tmpl { +func prepareFindEnrollmentAPIKeyByPolicyID() *dsl.Tmpl { tmpl := dsl.NewTmpl() root := dsl.NewRoot() - root.Query().Bool().Filter().Term(FieldApiKeyID, tmpl.Bind(FieldApiKeyID), nil) + root.Query().Bool().Filter().Term(FieldPolicyId, tmpl.Bind(FieldPolicyId), nil) tmpl.MustResolve(root) return tmpl } -func FindEnrollmentAPIKey(ctx context.Context, bulker bulk.Bulk, tmpl *dsl.Tmpl, id string) (rec model.EnrollmentApiKey, err error) { - return findEnrollmentAPIKey(ctx, bulker, FleetEnrollmentAPIKeys, tmpl, id) +func FindEnrollmentAPIKey(ctx context.Context, bulker bulk.Bulk, tmpl *dsl.Tmpl, field string, id string) (rec model.EnrollmentApiKey, err error) { + return findEnrollmentAPIKey(ctx, bulker, FleetEnrollmentAPIKeys, tmpl, field, id) } -func findEnrollmentAPIKey(ctx context.Context, bulker bulk.Bulk, index string, tmpl *dsl.Tmpl, id string) (rec model.EnrollmentApiKey, err error) { - res, err := SearchWithOneParam(ctx, bulker, tmpl, index, FieldApiKeyID, id) +func findEnrollmentAPIKey(ctx context.Context, bulker bulk.Bulk, index string, tmpl *dsl.Tmpl, field string, id string) (rec model.EnrollmentApiKey, err error) { + res, err := SearchWithOneParam(ctx, bulker, tmpl, index, field, id) if err != nil { return } @@ -63,3 +60,22 @@ func findEnrollmentAPIKey(ctx context.Context, bulker bulk.Bulk, index string, t err = res.Hits[0].Unmarshal(&rec) return rec, err } + +func FindEnrollmentAPIKeys(ctx context.Context, bulker bulk.Bulk, tmpl *dsl.Tmpl, field string, id string) ([]model.EnrollmentApiKey, error) { + return findEnrollmentAPIKeys(ctx, bulker, FleetEnrollmentAPIKeys, tmpl, field, id) +} + +func findEnrollmentAPIKeys(ctx context.Context, bulker bulk.Bulk, index string, tmpl *dsl.Tmpl, field string, id string) ([]model.EnrollmentApiKey, error) { + res, err := SearchWithOneParam(ctx, bulker, tmpl, index, field, id) + if err != nil { + return nil, err + } + + recs := make([]model.EnrollmentApiKey, len(res.Hits)) + for i := 0; i < len(res.Hits); i++ { + if err := res.Hits[i].Unmarshal(&recs[i]); err != nil { + return nil, err + } + } + return recs, nil +} diff --git a/internal/pkg/dl/enrollment_api_key_integration_test.go b/internal/pkg/dl/enrollment_api_key_integration_test.go index ba5420b34..d2322ba4b 100644 --- a/internal/pkg/dl/enrollment_api_key_integration_test.go +++ b/internal/pkg/dl/enrollment_api_key_integration_test.go @@ -22,7 +22,7 @@ import ( ftesting "github.com/elastic/fleet-server/v7/internal/pkg/testing" ) -func createRandomEnrollmentAPIKey() model.EnrollmentApiKey { +func createRandomEnrollmentAPIKey(policyID string) model.EnrollmentApiKey { now := time.Now().UTC() return model.EnrollmentApiKey{ ESDocument: model.ESDocument{ @@ -33,13 +33,13 @@ func createRandomEnrollmentAPIKey() model.EnrollmentApiKey { ApiKeyId: xid.New().String(), CreatedAt: now.Format(time.RFC3339), Name: "Default (db3f8318-05f0-4625-a808-9deddb0112b5)", - PolicyId: uuid.Must(uuid.NewV4()).String(), + PolicyId: policyID, } } -func storeRandomEnrollmentAPIKey(ctx context.Context, bulker bulk.Bulk, index string) (rec model.EnrollmentApiKey, err error) { - rec = createRandomEnrollmentAPIKey() +func storeRandomEnrollmentAPIKey(ctx context.Context, bulker bulk.Bulk, index string, policyID string) (rec model.EnrollmentApiKey, err error) { + rec = createRandomEnrollmentAPIKey(policyID) body, err := json.Marshal(rec) if err != nil { @@ -52,22 +52,16 @@ func storeRandomEnrollmentAPIKey(ctx context.Context, bulker bulk.Bulk, index st return rec, err } -func setupEnrollmentAPIKeys(ctx context.Context, t *testing.T) (string, bulk.Bulk, model.EnrollmentApiKey) { +func TestSearchEnrollmentAPIKeyByID(t *testing.T) { + ctx, cn := context.WithCancel(context.Background()) + defer cn() + index, bulker := ftesting.SetupIndexWithBulk(ctx, t, es.MappingEnrollmentApiKey) - rec, err := storeRandomEnrollmentAPIKey(ctx, bulker, index) + rec, err := storeRandomEnrollmentAPIKey(ctx, bulker, index, uuid.Must(uuid.NewV4()).String()) if err != nil { t.Fatal(err) } - - return index, bulker, rec -} - -func TestSearchEnrollmentAPIKey(t *testing.T) { - ctx, cn := context.WithCancel(context.Background()) - defer cn() - - index, bulker, rec := setupEnrollmentAPIKeys(ctx, t) - foundRec, err := findEnrollmentAPIKey(ctx, bulker, index, QueryEnrollmentAPIKeyByID, rec.ApiKeyId) + foundRec, err := findEnrollmentAPIKey(ctx, bulker, index, QueryEnrollmentAPIKeyByID, FieldApiKeyID, rec.ApiKeyId) if err != nil { t.Fatal(err) } @@ -77,7 +71,7 @@ func TestSearchEnrollmentAPIKey(t *testing.T) { t.Fatal(diff) } - foundRec, err = findEnrollmentAPIKey(ctx, bulker, index, QueryEnrollmentAPIKeyByID, xid.New().String()) + foundRec, err = findEnrollmentAPIKey(ctx, bulker, index, QueryEnrollmentAPIKeyByID, FieldApiKeyID, xid.New().String()) if err == nil { t.Fatal("expected error") } else { @@ -87,3 +81,34 @@ func TestSearchEnrollmentAPIKey(t *testing.T) { } } } + +func TestSearchEnrollmentAPIKeyByPolicyID(t *testing.T) { + ctx, cn := context.WithCancel(context.Background()) + defer cn() + + index, bulker := ftesting.SetupIndexWithBulk(ctx, t, es.MappingEnrollmentApiKey) + + policyID := uuid.Must(uuid.NewV4()).String() + rec1, err := storeRandomEnrollmentAPIKey(ctx, bulker, index, policyID) + if err != nil { + t.Fatal(err) + } + rec2, err := storeRandomEnrollmentAPIKey(ctx, bulker, index, policyID) + if err != nil { + t.Fatal(err) + } + _, err = storeRandomEnrollmentAPIKey(ctx, bulker, index, uuid.Must(uuid.NewV4()).String()) + if err != nil { + t.Fatal(err) + } + + foundRecs, err := findEnrollmentAPIKeys(ctx, bulker, index, QueryEnrollmentAPIKeyByPolicyID, FieldPolicyId, policyID) + if err != nil { + t.Fatal(err) + } + + diff := cmp.Diff([]model.EnrollmentApiKey{rec1, rec2}, foundRecs) + if diff != "" { + t.Fatal(diff) + } +} diff --git a/internal/pkg/policy/self.go b/internal/pkg/policy/self.go index ac6b52621..39885eada 100644 --- a/internal/pkg/policy/self.go +++ b/internal/pkg/policy/self.go @@ -9,15 +9,15 @@ import ( "encoding/json" "errors" "fmt" - "github.com/elastic/fleet-server/v7/internal/pkg/config" - "net/http" "sync" + "time" "github.com/elastic/elastic-agent-client/v7/pkg/proto" "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/elastic/fleet-server/v7/internal/pkg/bulk" + "github.com/elastic/fleet-server/v7/internal/pkg/config" "github.com/elastic/fleet-server/v7/internal/pkg/dl" "github.com/elastic/fleet-server/v7/internal/pkg/es" "github.com/elastic/fleet-server/v7/internal/pkg/model" @@ -25,6 +25,11 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/status" ) +// DefaultCheckTime is the default interval for self to check for its policy. +const DefaultCheckTime = 5 * time.Second + +type enrollmentTokenFetcher func(ctx context.Context, bulker bulk.Bulk, policyID string) ([]model.EnrollmentApiKey, error) + type SelfMonitor interface { // Run runs the monitor. Run(ctx context.Context) error @@ -46,8 +51,10 @@ type selfMonitorT struct { policy *model.Policy - policyF policyFetcher - policiesIndex string + policyF policyFetcher + policiesIndex string + enrollmentTokenF enrollmentTokenFetcher + checkTime time.Duration } // NewSelfMonitor creates the self policy monitor. @@ -56,15 +63,17 @@ type selfMonitorT struct { // has a Fleet Server input defined. func NewSelfMonitor(fleet config.Fleet, bulker bulk.Bulk, monitor monitor.Monitor, policyId string, reporter status.Reporter) SelfMonitor { return &selfMonitorT{ - log: log.With().Str("ctx", "policy self monitor").Logger(), - fleet: fleet, - bulker: bulker, - monitor: monitor, - policyId: policyId, - status: proto.StateObserved_STARTING, - reporter: reporter, - policyF: dl.QueryLatestPolicies, - policiesIndex: dl.FleetPolicies, + log: log.With().Str("ctx", "policy self monitor").Logger(), + fleet: fleet, + bulker: bulker, + monitor: monitor, + policyId: policyId, + status: proto.StateObserved_STARTING, + reporter: reporter, + policyF: dl.QueryLatestPolicies, + policiesIndex: dl.FleetPolicies, + enrollmentTokenF: findEnrollmentAPIKeys, + checkTime: DefaultCheckTime, } } @@ -73,16 +82,29 @@ func (m *selfMonitorT) Run(ctx context.Context) error { s := m.monitor.Subscribe() defer m.monitor.Unsubscribe(s) - err := m.process(ctx) + _, err := m.process(ctx) if err != nil { return err } + cT := time.NewTimer(m.checkTime) + defer cT.Stop() + LOOP: for { select { case <-ctx.Done(): break LOOP + case <-cT.C: + status, err := m.process(ctx) + if err != nil { + return err + } + cT.Reset(m.checkTime) + if status == proto.StateObserved_HEALTHY { + // running; can stop + break LOOP + } case hits := <-s.Output(): policies := make([]model.Policy, len(hits)) for i, hit := range hits { @@ -91,9 +113,14 @@ LOOP: return err } } - if err := m.processPolicies(ctx, policies); err != nil { + status, err := m.processPolicies(ctx, policies) + if err != nil { return err } + if status == proto.StateObserved_HEALTHY { + // running; can stop + break LOOP + } } } @@ -106,28 +133,24 @@ func (m *selfMonitorT) Status() proto.StateObserved_Status { return m.status } -func (m *selfMonitorT) process(ctx context.Context) error { +func (m *selfMonitorT) process(ctx context.Context) (proto.StateObserved_Status, error) { policies, err := m.policyF(ctx, m.bulker, dl.WithIndexName(m.policiesIndex)) if err != nil { - elasticErr, ok := err.(*es.ErrElastic) - if !ok { - return err - } - if elasticErr.Status != http.StatusNotFound { - return err + if !errors.Is(err, es.ErrIndexNotFound) { + return proto.StateObserved_FAILED, nil } + m.log.Debug().Str("index", m.policiesIndex).Msg(es.ErrIndexNotFound.Error()) } if len(policies) == 0 { - m.updateStatus() - return nil + return m.updateStatus(ctx) } return m.processPolicies(ctx, policies) } -func (m *selfMonitorT) processPolicies(ctx context.Context, policies []model.Policy) error { +func (m *selfMonitorT) processPolicies(ctx context.Context, policies []model.Policy) (proto.StateObserved_Status, error) { if len(policies) == 0 { // nothing to do - return nil + return proto.StateObserved_STARTING, nil } latest := m.groupByLatest(policies) for _, policy := range latest { @@ -139,7 +162,7 @@ func (m *selfMonitorT) processPolicies(ctx context.Context, policies []model.Pol break } } - return m.updateStatus() + return m.updateStatus(ctx) } func (m *selfMonitorT) groupByLatest(policies []model.Policy) map[string]model.Policy { @@ -160,7 +183,7 @@ func (m *selfMonitorT) groupByLatest(policies []model.Policy) map[string]model.P return latest } -func (m *selfMonitorT) updateStatus() error { +func (m *selfMonitorT) updateStatus(ctx context.Context) (proto.StateObserved_Status, error) { m.mut.Lock() defer m.mut.Unlock() @@ -172,31 +195,52 @@ func (m *selfMonitorT) updateStatus() error { } else { m.reporter.Status(proto.StateObserved_STARTING, fmt.Sprintf("Waiting on policy with Fleet Server integration: %s", m.policyId), nil) } - return nil + return proto.StateObserved_STARTING, nil } var data policyData err := json.Unmarshal(m.policy.Data, &data) if err != nil { - return err + return proto.StateObserved_FAILED, err } if !data.HasType("fleet-server") { - return errors.New("assigned policy does not have fleet-server input") + return proto.StateObserved_FAILED, errors.New("assigned policy does not have fleet-server input") } status := proto.StateObserved_HEALTHY extendMsg := "" + var payload map[string]interface{} if m.fleet.Agent.ID == "" { status = proto.StateObserved_DEGRADED extendMsg = "; missing config fleet.agent.id" + + // Elastic Agent has not been enrolled; Fleet Server passes back the enrollment token so the Elastic Agent + // can perform enrollment. + tokens, err := m.enrollmentTokenF(ctx, m.bulker, m.policy.PolicyId) + if err != nil { + return proto.StateObserved_FAILED, err + } + tokens = filterActiveTokens(tokens) + if len(tokens) == 0 { + // no tokens created for the policy, still starting + if m.policyId == "" { + m.reporter.Status(proto.StateObserved_STARTING, "Waiting on active enrollment keys to be created in default policy with Fleet Server integration", nil) + } else { + m.reporter.Status(proto.StateObserved_STARTING, fmt.Sprintf("Waiting on active enrollment keys to be created in policy with Fleet Server integration: %s", m.policyId), nil) + } + return proto.StateObserved_STARTING, nil + } + payload = map[string]interface{}{ + "enrollment_token": tokens[0].ApiKey, + } } m.status = status if m.policyId == "" { - m.reporter.Status(status, fmt.Sprintf("Running on default policy with Fleet Server integration%s", extendMsg), nil) + m.reporter.Status(status, fmt.Sprintf("Running on default policy with Fleet Server integration%s", extendMsg), payload) } else { - m.reporter.Status(status, fmt.Sprintf("Running on policy with Fleet Server integration: %s%s", m.policyId, extendMsg), nil) + m.reporter.Status(status, fmt.Sprintf("Running on policy with Fleet Server integration: %s%s", m.policyId, extendMsg), payload) } - return nil + return status, nil } type policyData struct { @@ -215,3 +259,17 @@ func (d *policyData) HasType(val string) bool { } return false } + +func findEnrollmentAPIKeys(ctx context.Context, bulker bulk.Bulk, policyID string) ([]model.EnrollmentApiKey, error) { + return dl.FindEnrollmentAPIKeys(ctx, bulker, dl.QueryEnrollmentAPIKeyByPolicyID, dl.FieldPolicyId, policyID) +} + +func filterActiveTokens(tokens []model.EnrollmentApiKey) []model.EnrollmentApiKey { + active := make([]model.EnrollmentApiKey, 0, len(tokens)) + for _, t := range tokens { + if t.Active { + active = append(active, t) + } + } + return active +} diff --git a/internal/pkg/policy/self_test.go b/internal/pkg/policy/self_test.go index 33df692dc..5311c4ac4 100644 --- a/internal/pkg/policy/self_test.go +++ b/internal/pkg/policy/self_test.go @@ -57,7 +57,7 @@ func TestSelfMonitor_DefaultPolicy(t *testing.T) { ftesting.Retry(t, ctx, func(ctx context.Context) error { status, msg, _ := reporter.Current() if status != proto.StateObserved_STARTING { - return fmt.Errorf("should be reported as starting") + return fmt.Errorf("should be reported as starting; instead its %s", status) } if msg != "Waiting on default policy with Fleet Server integration" { return fmt.Errorf("should be matching with default policy") @@ -106,7 +106,7 @@ func TestSelfMonitor_DefaultPolicy(t *testing.T) { ftesting.Retry(t, ctx, func(ctx context.Context) error { status, msg, _ := reporter.Current() if status != proto.StateObserved_HEALTHY { - return fmt.Errorf("should be reported as healthy") + return fmt.Errorf("should be reported as healthy; instead its %s", status) } if msg != "Running on default policy with Fleet Server integration" { return fmt.Errorf("should be matching with default policy") @@ -135,8 +135,22 @@ func TestSelfMonitor_DefaultPolicy_Degraded(t *testing.T) { mm := mock.NewMockIndexMonitor() monitor := NewSelfMonitor(cfg, bulker, mm, "", reporter) sm := monitor.(*selfMonitorT) + sm.checkTime = 100 * time.Millisecond + + var policyLock sync.Mutex + var policyResult []model.Policy sm.policyF = func(ctx context.Context, bulker bulk.Bulk, opt ...dl.Option) ([]model.Policy, error) { - return []model.Policy{}, nil + policyLock.Lock() + defer policyLock.Unlock() + return policyResult, nil + } + + var tokenLock sync.Mutex + var tokenResult []model.EnrollmentApiKey + sm.enrollmentTokenF = func(ctx context.Context, bulker bulk.Bulk, policyID string) ([]model.EnrollmentApiKey, error) { + tokenLock.Lock() + defer tokenLock.Unlock() + return tokenResult, nil } var merr error @@ -151,7 +165,7 @@ func TestSelfMonitor_DefaultPolicy_Degraded(t *testing.T) { ftesting.Retry(t, ctx, func(ctx context.Context) error { status, msg, _ := reporter.Current() if status != proto.StateObserved_STARTING { - return fmt.Errorf("should be reported as starting") + return fmt.Errorf("should be reported as starting; instead its %s", status) } if msg != "Waiting on default policy with Fleet Server integration" { return fmt.Errorf("should be matching with default policy") @@ -185,6 +199,22 @@ func TestSelfMonitor_DefaultPolicy_Degraded(t *testing.T) { if err != nil { t.Fatal(err) } + + // add inactive token that should be filtered out + inactiveToken := model.EnrollmentApiKey{ + ESDocument: model.ESDocument{ + Id: xid.New().String(), + }, + Active: false, + ApiKey: "d2JndlFIWUJJUVVxWDVia2NJTV86X0d6ZmljZGNTc1d4R1otbklrZFFRZw==", + ApiKeyId: xid.New().String(), + Name: "Inactive", + PolicyId: policyId, + } + tokenLock.Lock() + tokenResult = append(tokenResult, inactiveToken) + tokenLock.Unlock() + go func() { mm.Notify(ctx, []es.HitT{ { @@ -194,17 +224,57 @@ func TestSelfMonitor_DefaultPolicy_Degraded(t *testing.T) { Source: policyData, }, }) + policyLock.Lock() + defer policyLock.Unlock() + policyResult = append(policyResult, policy) }() - // should now be set to healthy + // should be set to starting because of missing active enrollment keys ftesting.Retry(t, ctx, func(ctx context.Context) error { status, msg, _ := reporter.Current() + if status != proto.StateObserved_STARTING { + return fmt.Errorf("should be reported as starting; instead its %s", status) + } + if msg != "Waiting on active enrollment keys to be created in default policy with Fleet Server integration" { + return fmt.Errorf("should be matching with default policy") + } + return nil + }, ftesting.RetrySleep(1*time.Second)) + + // add an active token + activeToken := model.EnrollmentApiKey{ + ESDocument: model.ESDocument{ + Id: xid.New().String(), + }, + Active: true, + ApiKey: "d2JndlFIWUJJUVVxWDVia2NJTV86X0d6ZmljZGNTc1d4R1otbklrZFFRZw==", + ApiKeyId: xid.New().String(), + Name: "Active", + PolicyId: policyId, + } + tokenLock.Lock() + tokenResult = append(tokenResult, activeToken) + tokenLock.Unlock() + + // should now be set to degraded + ftesting.Retry(t, ctx, func(ctx context.Context) error { + status, msg, payload := reporter.Current() if status != proto.StateObserved_DEGRADED { - return fmt.Errorf("should be reported as healthy") + return fmt.Errorf("should be reported as degraded; instead its %s", status) } if msg != "Running on default policy with Fleet Server integration; missing config fleet.agent.id" { return fmt.Errorf("should be matching with default policy") } + if payload == nil { + return fmt.Errorf("payload should not be nil") + } + token, set := payload["enrollment_token"] + if !set { + return fmt.Errorf("payload should have enrollment-token set") + } + if token != activeToken.ApiKey { + return fmt.Errorf("enrollment_token value is incorrect") + } return nil }) @@ -246,7 +316,7 @@ func TestSelfMonitor_SpecificPolicy(t *testing.T) { ftesting.Retry(t, ctx, func(ctx context.Context) error { status, msg, _ := reporter.Current() if status != proto.StateObserved_STARTING { - return fmt.Errorf("should be reported as starting") + return fmt.Errorf("should be reported as starting; instead its %s", status) } if msg != fmt.Sprintf("Waiting on policy with Fleet Server integration: %s", policyId) { return fmt.Errorf("should be matching with specific policy") @@ -294,7 +364,7 @@ func TestSelfMonitor_SpecificPolicy(t *testing.T) { ftesting.Retry(t, ctx, func(ctx context.Context) error { status, msg, _ := reporter.Current() if status != proto.StateObserved_HEALTHY { - return fmt.Errorf("should be reported as healthy") + return fmt.Errorf("should be reported as healthy; instead its %s", status) } if msg != fmt.Sprintf("Running on policy with Fleet Server integration: %s", policyId) { return fmt.Errorf("should be matching with specific policy") @@ -324,8 +394,22 @@ func TestSelfMonitor_SpecificPolicy_Degraded(t *testing.T) { mm := mock.NewMockIndexMonitor() monitor := NewSelfMonitor(cfg, bulker, mm, policyId, reporter) sm := monitor.(*selfMonitorT) + sm.checkTime = 100 * time.Millisecond + + var policyLock sync.Mutex + var policyResult []model.Policy sm.policyF = func(ctx context.Context, bulker bulk.Bulk, opt ...dl.Option) ([]model.Policy, error) { - return []model.Policy{}, nil + policyLock.Lock() + defer policyLock.Unlock() + return policyResult, nil + } + + var tokenLock sync.Mutex + var tokenResult []model.EnrollmentApiKey + sm.enrollmentTokenF = func(ctx context.Context, bulker bulk.Bulk, policyID string) ([]model.EnrollmentApiKey, error) { + tokenLock.Lock() + defer tokenLock.Unlock() + return tokenResult, nil } var merr error @@ -340,7 +424,7 @@ func TestSelfMonitor_SpecificPolicy_Degraded(t *testing.T) { ftesting.Retry(t, ctx, func(ctx context.Context) error { status, msg, _ := reporter.Current() if status != proto.StateObserved_STARTING { - return fmt.Errorf("should be reported as starting") + return fmt.Errorf("should be reported as starting; instead its %s", status) } if msg != fmt.Sprintf("Waiting on policy with Fleet Server integration: %s", policyId) { return fmt.Errorf("should be matching with specific policy") @@ -373,6 +457,22 @@ func TestSelfMonitor_SpecificPolicy_Degraded(t *testing.T) { if err != nil { t.Fatal(err) } + + // add inactive token that should be filtered out + inactiveToken := model.EnrollmentApiKey{ + ESDocument: model.ESDocument{ + Id: xid.New().String(), + }, + Active: false, + ApiKey: "d2JndlFIWUJJUVVxWDVia2NJTV86X0d6ZmljZGNTc1d4R1otbklrZFFRZw==", + ApiKeyId: xid.New().String(), + Name: "Inactive", + PolicyId: policyId, + } + tokenLock.Lock() + tokenResult = append(tokenResult, inactiveToken) + tokenLock.Unlock() + go func() { mm.Notify(ctx, []es.HitT{ { @@ -382,17 +482,57 @@ func TestSelfMonitor_SpecificPolicy_Degraded(t *testing.T) { Source: policyData, }, }) + policyLock.Lock() + defer policyLock.Unlock() + policyResult = append(policyResult, policy) }() - // should now be set to healthy + // should be set to starting because of missing active enrollment keys ftesting.Retry(t, ctx, func(ctx context.Context) error { status, msg, _ := reporter.Current() + if status != proto.StateObserved_STARTING { + return fmt.Errorf("should be reported as starting; instead its %s", status) + } + if msg != fmt.Sprintf("Waiting on active enrollment keys to be created in policy with Fleet Server integration: %s", policyId) { + return fmt.Errorf("should be matching with specific policy") + } + return nil + }, ftesting.RetrySleep(1*time.Second)) + + // add an active token + activeToken := model.EnrollmentApiKey{ + ESDocument: model.ESDocument{ + Id: xid.New().String(), + }, + Active: true, + ApiKey: "d2JndlFIWUJJUVVxWDVia2NJTV86X0d6ZmljZGNTc1d4R1otbklrZFFRZw==", + ApiKeyId: xid.New().String(), + Name: "Active", + PolicyId: policyId, + } + tokenLock.Lock() + tokenResult = append(tokenResult, activeToken) + tokenLock.Unlock() + + // should now be set to degraded + ftesting.Retry(t, ctx, func(ctx context.Context) error { + status, msg, payload := reporter.Current() if status != proto.StateObserved_DEGRADED { - return fmt.Errorf("should be reported as healthy") + return fmt.Errorf("should be reported as degraded; instead its %s", status) } if msg != fmt.Sprintf("Running on policy with Fleet Server integration: %s; missing config fleet.agent.id", policyId) { return fmt.Errorf("should be matching with specific policy") } + if payload == nil { + return fmt.Errorf("payload should not be nil") + } + token, set := payload["enrollment_token"] + if !set { + return fmt.Errorf("payload should have enrollment-token set") + } + if token != activeToken.ApiKey { + return fmt.Errorf("enrollment_token value is incorrect") + } return nil }) diff --git a/internal/pkg/status/reporter.go b/internal/pkg/status/reporter.go index eaa3bb405..d3f44fdad 100644 --- a/internal/pkg/status/reporter.go +++ b/internal/pkg/status/reporter.go @@ -25,10 +25,8 @@ func NewLog() *Log { } // Status triggers updating the status. -func (l *Log) Status(status proto.StateObserved_Status, message string, payload map[string]interface{}) error { - log.Info().Str("status", status.String()).Fields(map[string]interface{}{ - "payload": payload, - }).Msg(message) +func (l *Log) Status(status proto.StateObserved_Status, message string, _ map[string]interface{}) error { + log.Info().Str("status", status.String()).Msg(message) return nil } From d3d68cc13c4b60b31480be8ef071954b89e6d945 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Thu, 8 Apr 2021 09:39:52 +0000 Subject: [PATCH 050/240] Support basic ECS logging fields (#211) (cherry picked from commit bb86529dd557942988114b6dd3e60929124d60e2) Co-authored-by: Sean Cunningham --- internal/pkg/logger/logger.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/internal/pkg/logger/logger.go b/internal/pkg/logger/logger.go index 292875935..35a1319be 100644 --- a/internal/pkg/logger/logger.go +++ b/internal/pkg/logger/logger.go @@ -64,6 +64,7 @@ func (l *Logger) Sync() { func Init(cfg *config.Config) (*Logger, error) { var err error once.Do(func() { + var l zerolog.Logger var w WriterSync l, w, err = configure(cfg) @@ -79,6 +80,11 @@ func Init(cfg *config.Config) (*Logger, error) { zerolog.TimeFieldFormat = time.StampMicro + // override the field names for ECS + zerolog.TimestampFieldName = "@timestamp" + zerolog.LevelFieldName = "log.level" + zerolog.MessageFieldName = "message" + log.Info(). Int("pid", os.Getpid()). Int("ppid", os.Getppid()). From 8ad1ef48a71b04e694ef7cecab47fec947f4f571 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Thu, 8 Apr 2021 14:12:20 +0000 Subject: [PATCH 051/240] Structured logger outputs RFC3339 at millisecond resolution in zulu timezone. Pretty logger output local time in milliseconds. (#214) (cherry picked from commit 0459eeefa8ec3467056de164d42359860d458e5a) Co-authored-by: Sean Cunningham --- fleet-server.yml | 1 - internal/pkg/logger/logger.go | 10 ++++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/fleet-server.yml b/fleet-server.yml index df37e8d86..30dcf435f 100644 --- a/fleet-server.yml +++ b/fleet-server.yml @@ -52,7 +52,6 @@ runtime: logging: to_stderr: true # Force the logging output to stderr - pretty: true # Output pretty logging in stderr mode #level: trace # Enables the stats endpoint under http://localhost:5601 by default. diff --git a/internal/pkg/logger/logger.go b/internal/pkg/logger/logger.go index 35a1319be..be6f0d7c5 100644 --- a/internal/pkg/logger/logger.go +++ b/internal/pkg/logger/logger.go @@ -78,12 +78,14 @@ func Init(cfg *config.Config) (*Logger, error) { sync: w, } - zerolog.TimeFieldFormat = time.StampMicro - // override the field names for ECS - zerolog.TimestampFieldName = "@timestamp" zerolog.LevelFieldName = "log.level" zerolog.MessageFieldName = "message" + zerolog.TimeFieldFormat = "2006-01-02T15:04:05.999Z" // RFC3339 at millisecond resolution in zulu timezone + zerolog.TimestampFieldName = "@timestamp" + if !cfg.Logging.Pretty || !cfg.Logging.ToStderr { + zerolog.TimestampFunc = func() time.Time { return time.Now().UTC() } + } log.Info(). Int("pid", os.Getpid()). @@ -126,7 +128,7 @@ func configure(cfg *config.Config) (zerolog.Logger, WriterSync, error) { if cfg.Logging.ToStderr { out := io.Writer(os.Stderr) if cfg.Logging.Pretty { - out = zerolog.ConsoleWriter{Out: os.Stderr} + out = zerolog.ConsoleWriter{Out: os.Stderr, TimeFormat: "15:04:05.000"} } return log.Output(out).Level(level(cfg)), os.Stderr, nil } From 8cd2578742d693dec007a8b3443bc24df17d7489 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Fri, 9 Apr 2021 18:24:09 +0000 Subject: [PATCH 052/240] Preprocess policy on new revision to pre-calculate hashes and minimize memory allocations. (#216) (cherry picked from commit fdb97bcf4c1a5e88eec4f142bda4df2d9c1292c4) Co-authored-by: Sean Cunningham --- cmd/fleet/bulkCheckin.go | 2 +- cmd/fleet/handleCheckin.go | 177 +++--- cmd/fleet/handleChecking_test.go | 2 + cmd/fleet/schema.go | 12 +- internal/pkg/dl/agent.go | 15 - internal/pkg/dl/constants.go | 15 +- internal/pkg/policy/monitor.go | 81 +-- .../pkg/policy/monitor_integration_test.go | 2 +- internal/pkg/policy/monitor_test.go | 4 +- internal/pkg/policy/output_permissions.go | 4 +- internal/pkg/policy/parsed_policy.go | 82 +++ internal/pkg/policy/parsed_policy_test.go | 507 ++++++++++++++++++ 12 files changed, 768 insertions(+), 135 deletions(-) create mode 100644 internal/pkg/policy/parsed_policy.go create mode 100644 internal/pkg/policy/parsed_policy_test.go diff --git a/cmd/fleet/bulkCheckin.go b/cmd/fleet/bulkCheckin.go index eeace0549..adb4916cd 100644 --- a/cmd/fleet/bulkCheckin.go +++ b/cmd/fleet/bulkCheckin.go @@ -114,7 +114,7 @@ func (bc *BulkCheckin) flush(ctx context.Context) error { } err := bc.bulker.MUpdate(ctx, updates, bulk.WithRefresh()) - log.Debug(). + log.Trace(). Err(err). Dur("rtt", time.Since(start)). Int("cnt", len(updates)). diff --git a/cmd/fleet/handleCheckin.go b/cmd/fleet/handleCheckin.go index 6a571babc..c8ba09b39 100644 --- a/cmd/fleet/handleCheckin.go +++ b/cmd/fleet/handleCheckin.go @@ -33,7 +33,10 @@ import ( ) var ( - ErrAgentNotFound = errors.New("agent not found") + ErrAgentNotFound = errors.New("agent not found") + ErrNoOutputPerms = errors.New("output permission sections not found") + ErrNoPolicyOutput = errors.New("output section not found") + ErrFailInjectApiKey = errors.New("fail inject api key") ) const kEncodingGzip = "gzip" @@ -202,14 +205,14 @@ func (ct *CheckinT) _handleCheckin(w http.ResponseWriter, r *http.Request, id st actions = append(actions, acs...) break LOOP case policy := <-sub.Output(): - actionResp, err := parsePolicy(ctx, bulker, agent.Id, policy) + actionResp, err := processPolicy(ctx, bulker, agent.Id, policy) if err != nil { return err } actions = append(actions, *actionResp) break LOOP case <-longPoll.C: - log.Trace().Msg("Fire long poll") + log.Trace().Msg("fire long poll") break LOOP case <-tick.C: ct.bc.CheckIn(agent.Id, nil, seqno) @@ -217,7 +220,6 @@ func (ct *CheckinT) _handleCheckin(w http.ResponseWriter, r *http.Request, id st } } - // For now, empty response resp := CheckinResponse{ AckToken: ackToken, Action: "checkin", @@ -256,7 +258,7 @@ func (ct *CheckinT) writeResponse(w http.ResponseWriter, r *http.Request, resp C Err(err). Int("dataSz", len(payload)). Int("lvl", compressionLevel). - Msg("Compressing checkin response") + Msg("compressing checkin response") } else { _, err = w.Write(payload) } @@ -315,7 +317,7 @@ func convertActions(agentId string, actions []model.Action) ([]ActionResp, strin respList = append(respList, ActionResp{ AgentId: agentId, CreatedAt: action.Timestamp, - Data: []byte(action.Data), + Data: action.Data, Id: action.ActionId, Type: action.Type, InputType: action.InputType, @@ -329,105 +331,138 @@ func convertActions(agentId string, actions []model.Action) ([]ActionResp, strin return respList, ackToken } -func parsePolicy(ctx context.Context, bulker bulk.Bulk, agentId string, p model.Policy) (*ActionResp, error) { - // Need to inject the default api key into the object. So: - // 1) Deserialize the action - // 2) Lookup the DefaultApiKey in the save agent (we purposefully didn't decode it before) - // 3) If not there, generate and persist DefaultAPIKey - // 4) Inject default api key into structure - // 5) Re-serialize and return AgentResp structure - - // using json.RawMessage to avoid the full json de-serialization - var actionObj map[string]json.RawMessage - if err := json.Unmarshal(p.Data, &actionObj); err != nil { - return nil, err +// A new policy exists for this agent. Perform the following: +// - Generate and update default ApiKey if roles have changed. +// - Rewrite the policy for delivery to the agent injecting the key material. +// +func processPolicy(ctx context.Context, bulker bulk.Bulk, agentId string, pp *policy.ParsedPolicy) (*ActionResp, error) { + + zlog := log.With(). + Str("ctx", "processPolicy"). + Str("agentId", agentId). + Str("policyId", pp.Policy.PolicyId). + Logger() + + // The parsed policy object contains a map of name->role with a precalculated sha2. + defaultRole, ok := pp.Roles[policy.DefaultOutputName] + if !ok { + zlog.Error().Str("name", policy.DefaultOutputName).Msg("policy does not contain required output permission section") + return nil, ErrNoOutputPerms } - // Repull and decode the agent object - var agent model.Agent + // Repull and decode the agent object. Do not trust the cache. agent, err := dl.FindAgent(ctx, bulker, dl.QueryAgentByID, dl.FieldId, agentId) if err != nil { + zlog.Error().Err(err).Msg("fail find agent record") return nil, err } - // Check if need to generate a new output api key - var ( - hash string - needKey bool - roles []byte - ) + // Determine whether we need to generate a default output ApiKey. + // This is accomplished by comparing the sha2 hash stored in the agent + // record with the precalculated sha2 hash of the role. + needKey := true + switch { + case agent.DefaultApiKey == "": + zlog.Debug().Msg("must generate api key as default API key is not present") + case defaultRole.Sha2 != agent.PolicyOutputPermissionsHash: + zlog.Debug().Msg("must generate api key as policy output permissions changed") + default: + needKey = false + zlog.Debug().Msg("policy output permissions are the same") + } - if agent.DefaultApiKey == "" { - hash, roles, err = policy.GetRoleDescriptors(actionObj[policy.OutputPermissionsProperty]) - if err != nil { - return nil, err - } - needKey = true - log.Debug().Str("agentId", agentId).Msg("agent API key is not present") - } else { - hash, roles, needKey, err = policy.CheckOutputPermissionsChanged(agent.PolicyOutputPermissionsHash, actionObj[policy.OutputPermissionsProperty]) + if needKey { + zlog.Debug(). + RawJSON("roles", defaultRole.Raw). + Str("oldHash", agent.PolicyOutputPermissionsHash). + Str("newHash", defaultRole.Sha2). + Msg("Generating a new API key") + + defaultOutputApiKey, err := generateOutputApiKey(ctx, bulker.Client(), agent.Id, policy.DefaultOutputName, defaultRole.Raw) if err != nil { + zlog.Error().Err(err).Msg("fail generate output key") return nil, err } - if needKey { - log.Debug().Str("agentId", agentId).Msg("policy output permissions changed") - } else { - log.Debug().Str("agentId", agentId).Msg("policy output permissions are the same") + + zlog.Info(). + Str("hash", defaultRole.Sha2). + Str("apiKeyId", defaultOutputApiKey.Id). + Msg("Updating agent record to pick up default output key.") + + fields := map[string]interface{}{ + dl.FieldDefaultApiKey: defaultOutputApiKey.Agent(), + dl.FieldDefaultApiKeyId: defaultOutputApiKey.Id, + dl.FieldPolicyOutputPermissionsHash: defaultRole.Sha2, } - } - if needKey { - log.Debug().Str("agentId", agentId).RawJSON("roles", roles).Str("hash", hash).Msg("generating a new API key") - defaultOutputApiKey, err := generateOutputApiKey(ctx, bulker.Client(), agent.Id, policy.DefaultOutputName, roles) + body, err := json.Marshal(map[string]interface{}{ + "doc": fields, + }) if err != nil { return nil, err } - agent.DefaultApiKey = defaultOutputApiKey.Agent() - agent.DefaultApiKeyId = defaultOutputApiKey.Id - agent.PolicyOutputPermissionsHash = hash - log.Info().Str("agentId", agentId).Msg("rewriting full agent record to pick up default output key.") - if err = dl.IndexAgent(ctx, bulker, agent); err != nil { + if err = bulker.Update(ctx, dl.FleetAgents, agent.Id, body); err != nil { + zlog.Error().Err(err).Msg("fail update agent record") return nil, err } } + rewrittenPolicy, err := rewritePolicy(pp, agent.DefaultApiKey) + if err != nil { + zlog.Error().Err(err).Msg("fail rewrite policy") + return nil, err + } + + r := policy.RevisionFromPolicy(pp.Policy) + resp := ActionResp{ + AgentId: agent.Id, + CreatedAt: pp.Policy.Timestamp, + Data: rewrittenPolicy, + Id: r.String(), + Type: TypePolicyChange, + } + + return &resp, nil +} + +// Return Serializable policy injecting the apikey into the output field. +// This avoids reallocation of each section of the policy by duping +// the map object and only replacing the targeted section. +func rewritePolicy(pp *policy.ParsedPolicy, apiKey string) (interface{}, error) { + // Parse the outputs maps in order to inject the api key const outputsProperty = "outputs" - outputs, err := smap.Parse(actionObj[outputsProperty]) + outputs, err := smap.Parse(pp.Fields[outputsProperty]) if err != nil { return nil, err } - if outputs != nil { - if ok := setMapObj(outputs, agent.DefaultApiKey, "default", "api_key"); !ok { - log.Debug().Msg("cannot inject api_key into policy") - } else { - outputRaw, err := json.Marshal(outputs) - if err != nil { - return nil, err - } - actionObj[outputsProperty] = json.RawMessage(outputRaw) - } + if outputs == nil { + return nil, ErrNoPolicyOutput } - dataJSON, err := json.Marshal(struct { - Policy map[string]json.RawMessage `json:"policy"` - }{actionObj}) + if ok := setMapObj(outputs, apiKey, "default", "api_key"); !ok { + return nil, ErrFailInjectApiKey + } + + outputRaw, err := json.Marshal(outputs) if err != nil { return nil, err } - r := policy.RevisionFromPolicy(p) - resp := ActionResp{ - AgentId: agent.Id, - CreatedAt: p.Timestamp, - Data: dataJSON, - Id: r.String(), - Type: TypePolicyChange, + // Dupe field map; pp is immutable + fields := make(map[string]json.RawMessage, len(pp.Fields)) + + for k, v := range pp.Fields { + fields[k] = v } - return &resp, nil + fields[outputsProperty] = json.RawMessage(outputRaw) + + return struct { + Policy map[string]json.RawMessage `json:"policy"` + }{fields}, nil } func setMapObj(obj map[string]interface{}, val interface{}, keys ...string) bool { @@ -483,7 +518,7 @@ func parseMeta(agent *model.Agent, req *CheckinRequest) (fields Fields, err erro } if reqLocalMeta != nil && !reflect.DeepEqual(reqLocalMeta, agentLocalMeta) { - log.Trace().RawJSON("oldLocalMeta", agent.LocalMetadata).RawJSON("newLocalMeta", req.LocalMeta).Msg("Local metadata not equal") + log.Trace().RawJSON("oldLocalMeta", agent.LocalMetadata).RawJSON("newLocalMeta", req.LocalMeta).Msg("local metadata not equal") log.Info().RawJSON("req.LocalMeta", req.LocalMeta).Msg("applying new local metadata") fields = map[string]interface{}{ FieldLocalMetadata: req.LocalMeta, diff --git a/cmd/fleet/handleChecking_test.go b/cmd/fleet/handleChecking_test.go index 046ad0cee..ea199c81a 100644 --- a/cmd/fleet/handleChecking_test.go +++ b/cmd/fleet/handleChecking_test.go @@ -5,6 +5,7 @@ package fleet import ( + "encoding/json" "github.com/elastic/fleet-server/v7/internal/pkg/model" "github.com/stretchr/testify/assert" "testing" @@ -27,6 +28,7 @@ func TestConvertActions(t *testing.T) { { AgentId: "agent-id", Id: "1234", + Data: json.RawMessage(nil), }, }) assert.Equal(t, token, "") diff --git a/cmd/fleet/schema.go b/cmd/fleet/schema.go index e3f6fe5f3..7ad4d29ff 100644 --- a/cmd/fleet/schema.go +++ b/cmd/fleet/schema.go @@ -96,12 +96,12 @@ type AckResponse struct { } type ActionResp struct { - AgentId string `json:"agent_id"` - CreatedAt string `json:"created_at"` - Data json.RawMessage `json:"data"` - Id string `json:"id"` - Type string `json:"type"` - InputType string `json:"input_type"` + AgentId string `json:"agent_id"` + CreatedAt string `json:"created_at"` + Data interface{} `json:"data"` + Id string `json:"id"` + Type string `json:"type"` + InputType string `json:"input_type"` } type Event struct { diff --git a/internal/pkg/dl/agent.go b/internal/pkg/dl/agent.go index 998dfc340..fd802d197 100644 --- a/internal/pkg/dl/agent.go +++ b/internal/pkg/dl/agent.go @@ -6,13 +6,10 @@ package dl import ( "context" - "encoding/json" "github.com/elastic/fleet-server/v7/internal/pkg/bulk" "github.com/elastic/fleet-server/v7/internal/pkg/dsl" "github.com/elastic/fleet-server/v7/internal/pkg/model" - - "github.com/gofrs/uuid" ) const ( @@ -49,15 +46,3 @@ func FindAgent(ctx context.Context, bulker bulk.Bulk, tmpl *dsl.Tmpl, name strin err = res.Hits[0].Unmarshal(&agent) return agent, err } - -func IndexAgent(ctx context.Context, bulker bulk.Bulk, agent model.Agent) error { - if agent.Id == "" { - agent.Id = uuid.Must(uuid.NewV4()).String() - } - body, err := json.Marshal(agent) - if err != nil { - return err - } - _, err = bulker.Index(ctx, FleetAgents, agent.Id, body, bulk.WithRefresh()) - return err -} diff --git a/internal/pkg/dl/constants.go b/internal/pkg/dl/constants.go index 5de4c2961..f2ad891f5 100644 --- a/internal/pkg/dl/constants.go +++ b/internal/pkg/dl/constants.go @@ -27,12 +27,15 @@ const ( FieldMaxSeqNo = "max_seq_no" FieldActionSeqNo = "action_seq_no" - FieldActionId = "action_id" - FieldPolicyId = "policy_id" - FieldRevisionIdx = "revision_idx" - FieldCoordinatorIdx = "coordinator_idx" - FieldPolicyRevisionIdx = "policy_revision_idx" - FieldPolicyCoordinatorIdx = "policy_coordinator_idx" + FieldActionId = "action_id" + FieldPolicyId = "policy_id" + FieldRevisionIdx = "revision_idx" + FieldCoordinatorIdx = "coordinator_idx" + FieldPolicyRevisionIdx = "policy_revision_idx" + FieldPolicyCoordinatorIdx = "policy_coordinator_idx" + FieldDefaultApiKey = "default_api_key" + FieldDefaultApiKeyId = "default_api_key_id" + FieldPolicyOutputPermissionsHash = "policy_output_permissions_hash" FieldActive = "active" FieldUpdatedAt = "updated_at" diff --git a/internal/pkg/policy/monitor.go b/internal/pkg/policy/monitor.go index e647bb612..b4eeb170f 100644 --- a/internal/pkg/policy/monitor.go +++ b/internal/pkg/policy/monitor.go @@ -27,7 +27,7 @@ var gCounter uint64 type Subscription interface { // Output returns a new policy that needs to be sent based on the current subscription. - Output() <-chan model.Policy + Output() <-chan *ParsedPolicy } type Monitor interface { @@ -50,12 +50,12 @@ type subT struct { revIdx int64 coordIdx int64 - c chan model.Policy + c chan *ParsedPolicy } type policyT struct { - policy model.Policy - subs map[uint64]subT // map sub counter to channel + pp ParsedPolicy + subs map[uint64]subT // map sub counter to channel } type monitorT struct { @@ -74,7 +74,7 @@ type monitorT struct { } // Output returns a new policy that needs to be sent based on the current subscription. -func (s *subT) Output() <-chan model.Policy { +func (s *subT) Output() <-chan *ParsedPolicy { return s.c } @@ -177,7 +177,12 @@ func (m *monitorT) groupByLatest(policies []model.Policy) map[string]model.Polic func (m *monitorT) rollout(ctx context.Context, policy model.Policy) error { zlog := m.log.With().Str("policyId", policy.PolicyId).Logger() - subs := m.updatePolicy(policy) + pp, err := NewParsedPolicy(policy) + if err != nil { + return err + } + + subs := m.updatePolicy(pp) if subs == nil { return nil } @@ -206,7 +211,6 @@ func (m *monitorT) rollout(ctx context.Context, policy model.Policy) error { Dur("throttle", m.throttle). Msg("policy rollout begin") - var err error LOOP: for _, s := range subs { @@ -220,7 +224,7 @@ LOOP: } select { - case s.c <- policy: + case s.c <- pp: default: // Should never block on a channel; we created a channel of size one. // A block here indicates a logic error somewheres. @@ -239,40 +243,51 @@ LOOP: return err } -func (m *monitorT) updatePolicy(policy model.Policy) []subT { +func (m *monitorT) updatePolicy(pp *ParsedPolicy) []subT { m.mut.Lock() defer m.mut.Unlock() - p, ok := m.policies[policy.PolicyId] + newPolicy := pp.Policy + + p, ok := m.policies[newPolicy.PolicyId] if !ok { p = policyT{ - policy: policy, - subs: make(map[uint64]subT), + pp: *pp, + subs: make(map[uint64]subT), } - m.policies[policy.PolicyId] = p + m.policies[newPolicy.PolicyId] = p + m.log.Info(). + Str("policyId", newPolicy.PolicyId). + Int64("rev", newPolicy.RevisionIdx). + Int64("coord", newPolicy.CoordinatorIdx). + Msg("new policy") return nil } - p.policy = policy - m.policies[policy.PolicyId] = p + oldPolicy := p.pp.Policy - if policy.CoordinatorIdx <= 0 { - // don't rollout new policy that has not passed through the coordinator - return nil - } + p.pp = *pp + m.policies[newPolicy.PolicyId] = p m.log.Info(). - Str("policyId", policy.PolicyId). - Int64("orev", p.policy.RevisionIdx). - Int64("nrev", policy.RevisionIdx). - Int64("ocoord", p.policy.CoordinatorIdx). - Int64("ncoord", policy.CoordinatorIdx). - Msg("new policy") + Str("policyId", newPolicy.PolicyId). + Int64("orev", oldPolicy.RevisionIdx). + Int64("nrev", newPolicy.RevisionIdx). + Int64("ocoord", oldPolicy.CoordinatorIdx). + Int64("ncoord", newPolicy.CoordinatorIdx). + Msg("policy revised") + + if newPolicy.CoordinatorIdx <= 0 { + m.log.Info(). + Str("policyId", newPolicy.PolicyId). + Msg("Do not roll out policy that has not pass through coordinator") + return nil + } subs := make([]subT, 0, len(p.subs)) for idx, sub := range p.subs { - if p.policy.RevisionIdx > sub.revIdx || - (p.policy.RevisionIdx == sub.revIdx && p.policy.CoordinatorIdx > sub.coordIdx) { + if newPolicy.RevisionIdx > sub.revIdx || + (newPolicy.RevisionIdx == sub.revIdx && newPolicy.CoordinatorIdx > sub.coordIdx) { // These subscriptions are one shot; delete from map. delete(p.subs, idx) subs = append(subs, sub) @@ -308,16 +323,20 @@ func (m *monitorT) Subscribe(agentId string, policyId string, revisionIdx int64, policyId: policyId, revIdx: revisionIdx, coordIdx: coordinatorIdx, - c: make(chan model.Policy, 1), + c: make(chan *ParsedPolicy, 1), } m.mut.Lock() p, ok := m.policies[policyId] - if (p.policy.RevisionIdx > revisionIdx && p.policy.CoordinatorIdx > 0) || - (p.policy.RevisionIdx == revisionIdx && p.policy.CoordinatorIdx > coordinatorIdx) { + + pRevIdx := p.pp.Policy.RevisionIdx + pCoordIdx := p.pp.Policy.CoordinatorIdx + + if (pRevIdx > revisionIdx && pCoordIdx > 0) || + (pRevIdx == revisionIdx && pCoordIdx > coordinatorIdx) { // fill the channel, clear out id; no point putting it in map as it is already fired s.idx = 0 - s.c <- p.policy + s.c <- &p.pp } else { if !ok { p = policyT{subs: make(map[uint64]subT)} diff --git a/internal/pkg/policy/monitor_integration_test.go b/internal/pkg/policy/monitor_integration_test.go index 202cff9e3..5983b8ab3 100644 --- a/internal/pkg/policy/monitor_integration_test.go +++ b/internal/pkg/policy/monitor_integration_test.go @@ -92,7 +92,7 @@ func TestMonitor_Integration(t *testing.T) { select { case subPolicy := <-s.Output(): tm.Stop() - if subPolicy.PolicyId != policyId && subPolicy.RevisionIdx != 1 && subPolicy.CoordinatorIdx != 1 { + if subPolicy.Policy.PolicyId != policyId && subPolicy.Policy.RevisionIdx != 1 && subPolicy.Policy.CoordinatorIdx != 1 { t.Fatal("failed to get the expected updated policy") } case <-tm.C: diff --git a/internal/pkg/policy/monitor_test.go b/internal/pkg/policy/monitor_test.go index 0f21c2491..b67bd45e4 100644 --- a/internal/pkg/policy/monitor_test.go +++ b/internal/pkg/policy/monitor_test.go @@ -85,7 +85,7 @@ func TestMonitor_NewPolicy(t *testing.T) { select { case subPolicy := <-s.Output(): tm.Stop() - diff := cmp.Diff(policy, subPolicy) + diff := cmp.Diff(policy, subPolicy.Policy) if diff != "" { t.Fatal(diff) } @@ -317,7 +317,7 @@ func runTestMonitor_NewPolicyExists(t *testing.T, delay time.Duration) { select { case subPolicy := <-s.Output(): tm.Stop() - diff := cmp.Diff(policy, subPolicy) + diff := cmp.Diff(policy, subPolicy.Policy) if diff != "" { t.Fatal(diff) } diff --git a/internal/pkg/policy/output_permissions.go b/internal/pkg/policy/output_permissions.go index 465de52f9..799e73cce 100644 --- a/internal/pkg/policy/output_permissions.go +++ b/internal/pkg/policy/output_permissions.go @@ -14,8 +14,8 @@ import ( ) const ( - DefaultOutputName = "default" - OutputPermissionsProperty = "output_permissions" + DefaultOutputName = "default" + FieldOutputPermissions = "output_permissions" ) var ( diff --git a/internal/pkg/policy/parsed_policy.go b/internal/pkg/policy/parsed_policy.go new file mode 100644 index 000000000..96429124c --- /dev/null +++ b/internal/pkg/policy/parsed_policy.go @@ -0,0 +1,82 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package policy + +import ( + "encoding/json" + + "github.com/elastic/fleet-server/v7/internal/pkg/model" + "github.com/elastic/fleet-server/v7/internal/pkg/smap" +) + +type RoleT struct { + Raw []byte + Sha2 string +} + +type RoleMapT map[string]RoleT + +type ParsedPolicy struct { + Policy model.Policy + Fields map[string]json.RawMessage + Roles RoleMapT +} + +func NewParsedPolicy(p model.Policy) (*ParsedPolicy, error) { + var err error + + var fields map[string]json.RawMessage + if err = json.Unmarshal(p.Data, &fields); err != nil { + return nil, err + } + + // Interpret the output permissions if available + var roles map[string]RoleT + if perms := fields[FieldOutputPermissions]; len(perms) != 0 { + if roles, err = parsePerms(perms); err != nil { + return nil, err + } + } + + // We are cool and the gang + pp := &ParsedPolicy{ + Policy: p, + Fields: fields, + Roles: roles, + } + + return pp, nil +} + +func parsePerms(permsRaw json.RawMessage) (RoleMapT, error) { + permMap, err := smap.Parse(permsRaw) + if err != nil { + return nil, err + } + + // iterate across the keys + m := make(RoleMapT, len(permMap)) + for k := range permMap { + + v := permMap.GetMap(k) + + if v != nil { + var r RoleT + + // Stable hash on permissions payload + if r.Sha2, err = v.Hash(); err != nil { + return nil, err + } + + // Re-marshal, the payload for each section + if r.Raw, err = json.Marshal(v); err != nil { + return nil, err + } + m[k] = r + } + } + + return m, nil +} diff --git a/internal/pkg/policy/parsed_policy_test.go b/internal/pkg/policy/parsed_policy_test.go new file mode 100644 index 000000000..f15f118e5 --- /dev/null +++ b/internal/pkg/policy/parsed_policy_test.go @@ -0,0 +1,507 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package policy + +import ( + "encoding/json" + "fmt" + "github.com/elastic/fleet-server/v7/internal/pkg/model" + "testing" +) + +const testPolicy = ` +{ + "id": "63f4e6d0-9626-11eb-b486-6de1529a4151", + "revision": 33, + "outputs": { + "default": { + "type": "elasticsearch", + "hosts": [ + "https://5a8bb94bfbe0401a909e1496a9e884c2.us-central1.gcp.foundit.no:443" + ] + } + }, + "output_permissions": { + "default": { + "_fallback": { + "cluster": [ + "monitor" + ], + "indices": [ + { + "names": [ + "logs-*", + "metrics-*", + "traces-*", + ".logs-endpoint.diagnostic.collection-*" + ], + "privileges": [ + "auto_configure", + "create_doc" + ] + } + ] + } + } + }, + "agent": { + "monitoring": { + "enabled": true, + "use_output": "default", + "logs": true, + "metrics": true + } + }, + "inputs": [ + { + "id": "278c54f2-f62c-4efd-b4f8-50d14c4ee337", + "name": "system-1", + "revision": 2, + "type": "logfile", + "use_output": "default", + "meta": { + "package": { + "name": "system", + "version": "0.11.2" + } + }, + "data_stream": { + "namespace": "default" + }, + "streams": [ + { + "id": "logfile-system.auth-278c54f2-f62c-4efd-b4f8-50d14c4ee337", + "data_stream": { + "dataset": "system.auth", + "type": "logs" + }, + "exclude_files": [ + ".gz$" + ], + "paths": [ + "/var/log/auth.log*", + "/var/log/secure*" + ], + "multiline": { + "pattern": "^\\s", + "match": "after" + }, + "processors": [ + { + "add_locale": null + }, + { + "add_fields": { + "fields": { + "ecs.version": "1.8.0" + }, + "target": "" + } + } + ] + }, + { + "id": "logfile-system.syslog-278c54f2-f62c-4efd-b4f8-50d14c4ee337", + "data_stream": { + "dataset": "system.syslog", + "type": "logs" + }, + "exclude_files": [ + ".gz$" + ], + "paths": [ + "/var/log/messages*", + "/var/log/syslog*" + ], + "multiline": { + "pattern": "^\\s", + "match": "after" + }, + "processors": [ + { + "add_locale": null + }, + { + "add_fields": { + "fields": { + "ecs.version": "1.5.0" + }, + "target": "" + } + } + ] + } + ] + }, + { + "id": "278c54f2-f62c-4efd-b4f8-50d14c4ee337", + "name": "system-1", + "revision": 2, + "type": "system/metrics", + "use_output": "default", + "meta": { + "package": { + "name": "system", + "version": "0.11.2" + } + }, + "data_stream": { + "namespace": "default" + }, + "streams": [ + { + "id": "system/metrics-system.cpu-278c54f2-f62c-4efd-b4f8-50d14c4ee337", + "data_stream": { + "dataset": "system.cpu", + "type": "metrics" + }, + "period": "10s", + "cpu.metrics": [ + "percentages", + "normalized_percentages" + ], + "metricsets": [ + "cpu" + ] + }, + { + "id": "system/metrics-system.diskio-278c54f2-f62c-4efd-b4f8-50d14c4ee337", + "data_stream": { + "dataset": "system.diskio", + "type": "metrics" + }, + "period": "10s", + "diskio.include_devices": null, + "metricsets": [ + "diskio" + ] + }, + { + "id": "system/metrics-system.filesystem-278c54f2-f62c-4efd-b4f8-50d14c4ee337", + "data_stream": { + "dataset": "system.filesystem", + "type": "metrics" + }, + "period": "1m", + "metricsets": [ + "filesystem" + ], + "processors": [ + { + "drop_event.when.regexp": { + "system.filesystem.mount_point": "^/(sys|cgroup|proc|dev|etc|host|lib|snap)($|/)" + } + } + ] + }, + { + "id": "system/metrics-system.fsstat-278c54f2-f62c-4efd-b4f8-50d14c4ee337", + "data_stream": { + "dataset": "system.fsstat", + "type": "metrics" + }, + "period": "1m", + "metricsets": [ + "fsstat" + ], + "processors": [ + { + "drop_event.when.regexp": { + "system.fsstat.mount_point": "^/(sys|cgroup|proc|dev|etc|host|lib|snap)($|/)" + } + } + ] + }, + { + "id": "system/metrics-system.load-278c54f2-f62c-4efd-b4f8-50d14c4ee337", + "data_stream": { + "dataset": "system.load", + "type": "metrics" + }, + "condition": "${host.platform} != 'windows'", + "period": "10s", + "metricsets": [ + "load" + ] + }, + { + "id": "system/metrics-system.memory-278c54f2-f62c-4efd-b4f8-50d14c4ee337", + "data_stream": { + "dataset": "system.memory", + "type": "metrics" + }, + "period": "10s", + "metricsets": [ + "memory" + ] + }, + { + "id": "system/metrics-system.network-278c54f2-f62c-4efd-b4f8-50d14c4ee337", + "data_stream": { + "dataset": "system.network", + "type": "metrics" + }, + "period": "10s", + "network.interfaces": null, + "metricsets": [ + "network" + ] + }, + { + "id": "system/metrics-system.process-278c54f2-f62c-4efd-b4f8-50d14c4ee337", + "data_stream": { + "dataset": "system.process", + "type": "metrics" + }, + "process.include_top_n.by_memory": 5, + "period": "10s", + "processes": [ + ".*" + ], + "process.include_top_n.by_cpu": 5, + "process.cgroups.enabled": false, + "process.cmdline.cache.enabled": true, + "metricsets": [ + "process" + ], + "process.include_cpu_ticks": false + }, + { + "id": "system/metrics-system.process_summary-278c54f2-f62c-4efd-b4f8-50d14c4ee337", + "data_stream": { + "dataset": "system.process_summary", + "type": "metrics" + }, + "period": "10s", + "metricsets": [ + "process_summary" + ] + }, + { + "id": "system/metrics-system.socket_summary-278c54f2-f62c-4efd-b4f8-50d14c4ee337", + "data_stream": { + "dataset": "system.socket_summary", + "type": "metrics" + }, + "period": "10s", + "metricsets": [ + "socket_summary" + ] + }, + { + "id": "system/metrics-system.uptime-278c54f2-f62c-4efd-b4f8-50d14c4ee337", + "data_stream": { + "dataset": "system.uptime", + "type": "metrics" + }, + "period": "10s", + "metricsets": [ + "uptime" + ] + } + ] + }, + { + "id": "74abb3e2-a041-4684-8b3d-09e0e5eacd36", + "name": "Endgame", + "revision": 28, + "type": "endpoint", + "use_output": "default", + "meta": { + "package": { + "name": "endpoint", + "version": "0.18.0" + } + }, + "data_stream": { + "namespace": "default" + }, + "artifact_manifest": { + "schema_version": "v1", + "manifest_version": "1.0.28", + "artifacts": { + "endpoint-trustlist-windows-v1": { + "relative_url": "/api/endpoint/artifacts/download/endpoint-trustlist-windows-v1/74c2255ce31e0b48ada298ed6dacf6d1be7b0fb40c1bcb251d2da66f4b060acf", + "compression_algorithm": "zlib", + "decoded_size": 338, + "decoded_sha256": "74c2255ce31e0b48ada298ed6dacf6d1be7b0fb40c1bcb251d2da66f4b060acf", + "encryption_algorithm": "none", + "encoded_sha256": "8e70ce05d25709b6bbd4fd6981e86e24e1a2f85e3f69d2733058c568830f25d2", + "encoded_size": 185 + }, + "endpoint-trustlist-macos-v1": { + "relative_url": "/api/endpoint/artifacts/download/endpoint-trustlist-macos-v1/d801aa1fb7ddcc330a5e3173372ea6af4a3d08ec58074478e85aa5603e926658", + "compression_algorithm": "zlib", + "decoded_size": 14, + "decoded_sha256": "d801aa1fb7ddcc330a5e3173372ea6af4a3d08ec58074478e85aa5603e926658", + "encryption_algorithm": "none", + "encoded_sha256": "f8e6afa1d5662f5b37f83337af774b5785b5b7f1daee08b7b00c2d6813874cda", + "encoded_size": 22 + }, + "endpoint-exceptionlist-macos-v1": { + "relative_url": "/api/endpoint/artifacts/download/endpoint-exceptionlist-macos-v1/d801aa1fb7ddcc330a5e3173372ea6af4a3d08ec58074478e85aa5603e926658", + "compression_algorithm": "zlib", + "decoded_size": 14, + "decoded_sha256": "d801aa1fb7ddcc330a5e3173372ea6af4a3d08ec58074478e85aa5603e926658", + "encryption_algorithm": "none", + "encoded_sha256": "f8e6afa1d5662f5b37f83337af774b5785b5b7f1daee08b7b00c2d6813874cda", + "encoded_size": 22 + }, + "endpoint-trustlist-linux-v1": { + "relative_url": "/api/endpoint/artifacts/download/endpoint-trustlist-linux-v1/d801aa1fb7ddcc330a5e3173372ea6af4a3d08ec58074478e85aa5603e926658", + "compression_algorithm": "zlib", + "decoded_size": 14, + "decoded_sha256": "d801aa1fb7ddcc330a5e3173372ea6af4a3d08ec58074478e85aa5603e926658", + "encryption_algorithm": "none", + "encoded_sha256": "f8e6afa1d5662f5b37f83337af774b5785b5b7f1daee08b7b00c2d6813874cda", + "encoded_size": 22 + }, + "endpoint-exceptionlist-windows-v1": { + "relative_url": "/api/endpoint/artifacts/download/endpoint-exceptionlist-windows-v1/d801aa1fb7ddcc330a5e3173372ea6af4a3d08ec58074478e85aa5603e926658", + "compression_algorithm": "zlib", + "decoded_size": 14, + "decoded_sha256": "d801aa1fb7ddcc330a5e3173372ea6af4a3d08ec58074478e85aa5603e926658", + "encryption_algorithm": "none", + "encoded_sha256": "f8e6afa1d5662f5b37f83337af774b5785b5b7f1daee08b7b00c2d6813874cda", + "encoded_size": 22 + } + } + }, + "policy": { + "linux": { + "logging": { + "file": "info" + }, + "events": { + "process": true, + "file": true, + "network": true + } + }, + "windows": { + "popup": { + "malware": { + "enabled": true, + "message": "" + }, + "ransomware": { + "enabled": true, + "message": "" + } + }, + "malware": { + "mode": "prevent" + }, + "logging": { + "file": "info" + }, + "antivirus_registration": { + "enabled": false + }, + "events": { + "registry": true, + "process": true, + "security": true, + "file": true, + "dns": true, + "dll_and_driver_load": true, + "network": true + }, + "ransomware": { + "mode": "prevent" + } + }, + "mac": { + "popup": { + "malware": { + "enabled": true, + "message": "" + } + }, + "malware": { + "mode": "prevent" + }, + "logging": { + "file": "info" + }, + "events": { + "process": true, + "file": true, + "network": true + } + } + } + } + ], + "fleet": { + "hosts": [ + "http://10.128.0.4:8220" + ] + } +} +` + +const minified = ` +{"id":"63f4e6d0-9626-11eb-b486-6de1529a4151","revision":33,"outputs":{"default":{"type":"elasticsearch","hosts":["https://5a8bb94bfbe0401a909e1496a9e884c2.us-central1.gcp.foundit.no:443"]}},"output_permissions":{"default":{"_fallback":{"cluster":["monitor"],"indices":[{"names":["logs-*","metrics-*","traces-*",".logs-endpoint.diagnostic.collection-*"],"privileges":["auto_configure","create_doc"]}]}}},"agent":{"monitoring":{"enabled":true,"use_output":"default","logs":true,"metrics":true}},"inputs":[{"id":"278c54f2-f62c-4efd-b4f8-50d14c4ee337","name":"system-1","revision":2,"type":"logfile","use_output":"default","meta":{"package":{"name":"system","version":"0.11.2"}},"data_stream":{"namespace":"default"},"streams":[{"id":"logfile-system.auth-278c54f2-f62c-4efd-b4f8-50d14c4ee337","data_stream":{"dataset":"system.auth","type":"logs"},"exclude_files":[".gz$"],"paths":["/var/log/auth.log*","/var/log/secure*"],"multiline":{"pattern":"^\\s","match":"after"},"processors":[{"add_locale":null},{"add_fields":{"fields":{"ecs.version":"1.8.0"},"target":""}}]},{"id":"logfile-system.syslog-278c54f2-f62c-4efd-b4f8-50d14c4ee337","data_stream":{"dataset":"system.syslog","type":"logs"},"exclude_files":[".gz$"],"paths":["/var/log/messages*","/var/log/syslog*"],"multiline":{"pattern":"^\\s","match":"after"},"processors":[{"add_locale":null},{"add_fields":{"fields":{"ecs.version":"1.5.0"},"target":""}}]}]},{"id":"278c54f2-f62c-4efd-b4f8-50d14c4ee337","name":"system-1","revision":2,"type":"system/metrics","use_output":"default","meta":{"package":{"name":"system","version":"0.11.2"}},"data_stream":{"namespace":"default"},"streams":[{"id":"system/metrics-system.cpu-278c54f2-f62c-4efd-b4f8-50d14c4ee337","data_stream":{"dataset":"system.cpu","type":"metrics"},"period":"10s","cpu.metrics":["percentages","normalized_percentages"],"metricsets":["cpu"]},{"id":"system/metrics-system.diskio-278c54f2-f62c-4efd-b4f8-50d14c4ee337","data_stream":{"dataset":"system.diskio","type":"metrics"},"period":"10s","diskio.include_devices":null,"metricsets":["diskio"]},{"id":"system/metrics-system.filesystem-278c54f2-f62c-4efd-b4f8-50d14c4ee337","data_stream":{"dataset":"system.filesystem","type":"metrics"},"period":"1m","metricsets":["filesystem"],"processors":[{"drop_event.when.regexp":{"system.filesystem.mount_point":"^/(sys|cgroup|proc|dev|etc|host|lib|snap)($|/)"}}]},{"id":"system/metrics-system.fsstat-278c54f2-f62c-4efd-b4f8-50d14c4ee337","data_stream":{"dataset":"system.fsstat","type":"metrics"},"period":"1m","metricsets":["fsstat"],"processors":[{"drop_event.when.regexp":{"system.fsstat.mount_point":"^/(sys|cgroup|proc|dev|etc|host|lib|snap)($|/)"}}]},{"id":"system/metrics-system.load-278c54f2-f62c-4efd-b4f8-50d14c4ee337","data_stream":{"dataset":"system.load","type":"metrics"},"condition":"${host.platform} != 'windows'","period":"10s","metricsets":["load"]},{"id":"system/metrics-system.memory-278c54f2-f62c-4efd-b4f8-50d14c4ee337","data_stream":{"dataset":"system.memory","type":"metrics"},"period":"10s","metricsets":["memory"]},{"id":"system/metrics-system.network-278c54f2-f62c-4efd-b4f8-50d14c4ee337","data_stream":{"dataset":"system.network","type":"metrics"},"period":"10s","network.interfaces":null,"metricsets":["network"]},{"id":"system/metrics-system.process-278c54f2-f62c-4efd-b4f8-50d14c4ee337","data_stream":{"dataset":"system.process","type":"metrics"},"process.include_top_n.by_memory":5,"period":"10s","processes":[".*"],"process.include_top_n.by_cpu":5,"process.cgroups.enabled":false,"process.cmdline.cache.enabled":true,"metricsets":["process"],"process.include_cpu_ticks":false},{"id":"system/metrics-system.process_summary-278c54f2-f62c-4efd-b4f8-50d14c4ee337","data_stream":{"dataset":"system.process_summary","type":"metrics"},"period":"10s","metricsets":["process_summary"]},{"id":"system/metrics-system.socket_summary-278c54f2-f62c-4efd-b4f8-50d14c4ee337","data_stream":{"dataset":"system.socket_summary","type":"metrics"},"period":"10s","metricsets":["socket_summary"]},{"id":"system/metrics-system.uptime-278c54f2-f62c-4efd-b4f8-50d14c4ee337","data_stream":{"dataset":"system.uptime","type":"metrics"},"period":"10s","metricsets":["uptime"]}]},{"id":"74abb3e2-a041-4684-8b3d-09e0e5eacd36","name":"Endgame","revision":28,"type":"endpoint","use_output":"default","meta":{"package":{"name":"endpoint","version":"0.18.0"}},"data_stream":{"namespace":"default"},"artifact_manifest":{"schema_version":"v1","manifest_version":"1.0.28","artifacts":{"endpoint-trustlist-windows-v1":{"relative_url":"/api/endpoint/artifacts/download/endpoint-trustlist-windows-v1/74c2255ce31e0b48ada298ed6dacf6d1be7b0fb40c1bcb251d2da66f4b060acf","compression_algorithm":"zlib","decoded_size":338,"decoded_sha256":"74c2255ce31e0b48ada298ed6dacf6d1be7b0fb40c1bcb251d2da66f4b060acf","encryption_algorithm":"none","encoded_sha256":"8e70ce05d25709b6bbd4fd6981e86e24e1a2f85e3f69d2733058c568830f25d2","encoded_size":185},"endpoint-trustlist-macos-v1":{"relative_url":"/api/endpoint/artifacts/download/endpoint-trustlist-macos-v1/d801aa1fb7ddcc330a5e3173372ea6af4a3d08ec58074478e85aa5603e926658","compression_algorithm":"zlib","decoded_size":14,"decoded_sha256":"d801aa1fb7ddcc330a5e3173372ea6af4a3d08ec58074478e85aa5603e926658","encryption_algorithm":"none","encoded_sha256":"f8e6afa1d5662f5b37f83337af774b5785b5b7f1daee08b7b00c2d6813874cda","encoded_size":22},"endpoint-exceptionlist-macos-v1":{"relative_url":"/api/endpoint/artifacts/download/endpoint-exceptionlist-macos-v1/d801aa1fb7ddcc330a5e3173372ea6af4a3d08ec58074478e85aa5603e926658","compression_algorithm":"zlib","decoded_size":14,"decoded_sha256":"d801aa1fb7ddcc330a5e3173372ea6af4a3d08ec58074478e85aa5603e926658","encryption_algorithm":"none","encoded_sha256":"f8e6afa1d5662f5b37f83337af774b5785b5b7f1daee08b7b00c2d6813874cda","encoded_size":22},"endpoint-trustlist-linux-v1":{"relative_url":"/api/endpoint/artifacts/download/endpoint-trustlist-linux-v1/d801aa1fb7ddcc330a5e3173372ea6af4a3d08ec58074478e85aa5603e926658","compression_algorithm":"zlib","decoded_size":14,"decoded_sha256":"d801aa1fb7ddcc330a5e3173372ea6af4a3d08ec58074478e85aa5603e926658","encryption_algorithm":"none","encoded_sha256":"f8e6afa1d5662f5b37f83337af774b5785b5b7f1daee08b7b00c2d6813874cda","encoded_size":22},"endpoint-exceptionlist-windows-v1":{"relative_url":"/api/endpoint/artifacts/download/endpoint-exceptionlist-windows-v1/d801aa1fb7ddcc330a5e3173372ea6af4a3d08ec58074478e85aa5603e926658","compression_algorithm":"zlib","decoded_size":14,"decoded_sha256":"d801aa1fb7ddcc330a5e3173372ea6af4a3d08ec58074478e85aa5603e926658","encryption_algorithm":"none","encoded_sha256":"f8e6afa1d5662f5b37f83337af774b5785b5b7f1daee08b7b00c2d6813874cda","encoded_size":22}}},"policy":{"linux":{"logging":{"file":"info"},"events":{"process":true,"file":true,"network":true}},"windows":{"popup":{"malware":{"enabled":true,"message":""},"ransomware":{"enabled":true,"message":""}},"malware":{"mode":"prevent"},"logging":{"file":"info"},"antivirus_registration":{"enabled":false},"events":{"registry":true,"process":true,"security":true,"file":true,"dns":true,"dll_and_driver_load":true,"network":true},"ransomware":{"mode":"prevent"}},"mac":{"popup":{"malware":{"enabled":true,"message":""}},"malware":{"mode":"prevent"},"logging":{"file":"info"},"events":{"process":true,"file":true,"network":true}}}}],"fleet":{"hosts":["http://10.128.0.4:8220"]}}` + +func TestNewParsedPolicy(t *testing.T) { + + // Run two formatting of the same payload to validate that the sha2 remains the same + payloads := []string{ + testPolicy, + minified, + } + + for _, payload := range payloads { + // Load the model into the policy object + var m model.Policy + if err := json.Unmarshal([]byte(payload), &m); err != nil { + t.Fatal(err) + } + + m.Data = json.RawMessage(testPolicy) + + pp, err := NewParsedPolicy(m) + if err != nil { + t.Fatal(err) + } + + fields := []string{ + "id", + "revision", + "outputs", + "output_permissions", + "agent", + "inputs", + "fleet", + } + + // Validate the fields; Expect the following top level items + if len(pp.Fields) != len(fields) { + t.Error("Expected N fields") + } + + for _, f := range fields { + if _, ok := pp.Fields[f]; !ok { + t.Error(fmt.Sprintf("Missing field %s", f)) + } + } + + // Now validate output perms hash + if len(pp.Roles) != 1 { + t.Error("Only expected one role") + } + + r, ok := pp.Roles["default"] + if !ok { + t.Fatal("Missing default role") + } + + expectedSha2 := "d4d0840fe28ca4900129a749b56cee729562c0a88c935192c659252b5b0d762a" + if r.Sha2 != expectedSha2 { + t.Fatal(fmt.Sprintf("Expected sha2: '%s', got '%s'.", expectedSha2, r.Sha2)) + } + } +} From 6609f4ee50fcc2e8e549b41877e9e94293bd1b71 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Mon, 12 Apr 2021 18:03:16 +0000 Subject: [PATCH 053/240] Add route metrics. (#222) (cherry picked from commit aac19c973ee5ffd2bfa85fed221669248098f199) Co-authored-by: Sean Cunningham --- NOTICE.txt | 32 +++++++++ cmd/fleet/handleAck.go | 27 ++++--- cmd/fleet/handleArtifacts.go | 33 ++------- cmd/fleet/handleCheckin.go | 45 ++++++------ cmd/fleet/handleEnroll.go | 30 ++++---- cmd/fleet/handleStatus.go | 10 ++- cmd/fleet/metrics.go | 125 +++++++++++++++++++++++++++++++++ cmd/fleet/server.go | 13 ---- go.mod | 1 + go.sum | 2 + internal/pkg/policy/monitor.go | 2 + 11 files changed, 229 insertions(+), 91 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index db2e72769..c5df54396 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -1755,6 +1755,38 @@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------------------- +Dependency : github.com/miolini/datacounter +Version: v1.0.2 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/miolini/datacounter@v1.0.2/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2015 Artem Andreenko + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + + -------------------------------------------------------------------------------- Dependency : github.com/pkg/errors Version: v0.9.1 diff --git a/cmd/fleet/handleAck.go b/cmd/fleet/handleAck.go index 5677713f4..ba399d952 100644 --- a/cmd/fleet/handleAck.go +++ b/cmd/fleet/handleAck.go @@ -23,7 +23,6 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/policy" "github.com/julienschmidt/httprouter" - "github.com/rs/zerolog" "github.com/rs/zerolog/log" ) @@ -53,19 +52,8 @@ func (rt Router) handleAcks(w http.ResponseWriter, r *http.Request, ps httproute err := rt.ack.handleAcks(w, r, id) if err != nil { - lvl := zerolog.DebugLevel - - var code int - switch err { - case limit.ErrRateLimit, limit.ErrMaxLimit: - code = http.StatusTooManyRequests - case context.Canceled: - code = http.StatusServiceUnavailable - default: - lvl = zerolog.InfoLevel - code = http.StatusBadRequest - } - // Don't log connection drops + code, lvl := cntAcks.IncError(err) + log.WithLevel(lvl). Err(err). Int("code", code). @@ -87,11 +75,17 @@ func (ack AckT) handleAcks(w http.ResponseWriter, r *http.Request, id string) er return err } + // Metrics; serenity now. + dfunc := cntAcks.IncStart() + defer dfunc() + raw, err := ioutil.ReadAll(r.Body) if err != nil { return err } + cntAcks.bodyIn.Add(uint64(len(raw))) + var req AckRequest if err := json.Unmarshal(raw, &req); err != nil { return err @@ -110,10 +104,13 @@ func (ack AckT) handleAcks(w http.ResponseWriter, r *http.Request, id string) er return err } - if _, err = w.Write(data); err != nil { + var nWritten int + if nWritten, err = w.Write(data); err != nil { return err } + cntAcks.bodyOut.Add(uint64(nWritten)) + return nil } diff --git a/cmd/fleet/handleArtifacts.go b/cmd/fleet/handleArtifacts.go index 5508409b7..153862363 100644 --- a/cmd/fleet/handleArtifacts.go +++ b/cmd/fleet/handleArtifacts.go @@ -87,10 +87,12 @@ func (rt Router) handleArtifacts(w http.ResponseWriter, r *http.Request, ps http Int64("nWritten", nWritten). Dur("rtt", time.Since(start)). Msg("Response sent") + + cntArtifacts.bodyOut.Add(uint64(nWritten)) } if err != nil { - code, lvl := rt.at.assessError(err) + code, lvl := cntArtifacts.IncError(err) zlog.WithLevel(lvl). Err(err). @@ -118,6 +120,10 @@ func (at ArtifactT) handleArtifacts(r *http.Request, zlog zerolog.Logger, id, sh return nil, err } + // Metrics; serenity now. + dfunc := cntArtifacts.IncStart() + defer dfunc() + zlog = zlog.With(). Str("APIKeyId", agent.AccessApiKeyId). Str("agentId", agent.Id). @@ -126,31 +132,6 @@ func (at ArtifactT) handleArtifacts(r *http.Request, zlog zerolog.Logger, id, sh return at.handle(r.Context(), zlog, agent, id, sha2) } -func (at ArtifactT) assessError(err error) (int, zerolog.Level) { - lvl := zerolog.DebugLevel - - // TODO: return a 503 on elastic timeout, connection drop - - var code int - switch err { - case dl.ErrNotFound: - // Artifact not found indicates a race condition upstream - // or an attack on the fleet server. Either way it should - // show up in the logs at a higher level than debug - lvl = zerolog.WarnLevel - code = http.StatusNotFound - case ErrorThrottle, limit.ErrRateLimit, limit.ErrMaxLimit: - code = http.StatusTooManyRequests - case context.Canceled: - code = http.StatusServiceUnavailable - default: - lvl = zerolog.InfoLevel - code = http.StatusBadRequest - } - - return code, lvl -} - type artHandler struct { zlog zerolog.Logger bulker bulk.Bulk diff --git a/cmd/fleet/handleCheckin.go b/cmd/fleet/handleCheckin.go index c8ba09b39..0f1024f75 100644 --- a/cmd/fleet/handleCheckin.go +++ b/cmd/fleet/handleCheckin.go @@ -26,6 +26,7 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/policy" "github.com/elastic/fleet-server/v7/internal/pkg/smap" "github.com/elastic/fleet-server/v7/internal/pkg/sqn" + "github.com/miolini/datacounter" "github.com/julienschmidt/httprouter" "github.com/rs/zerolog" @@ -47,25 +48,12 @@ func (rt Router) handleCheckin(w http.ResponseWriter, r *http.Request, ps httpro err := rt.ct._handleCheckin(w, r, id, rt.bulker) if err != nil { - lvl := zerolog.DebugLevel + code, lvl := cntCheckin.IncError(err) - var code int - switch err { - case ErrAgentNotFound: - code = http.StatusNotFound + // Log this as warn for visibility that limit has been reached. + // This allows customers to tune the configuration on detection of threshold. + if err == limit.ErrMaxLimit { lvl = zerolog.WarnLevel - case limit.ErrRateLimit: - code = http.StatusTooManyRequests - case limit.ErrMaxLimit: - // Log this as warn for visibility that limit has been reached. - // This allows customers to tune the configuration on detection of threshold. - code = http.StatusTooManyRequests - lvl = zerolog.WarnLevel - case context.Canceled: - code = http.StatusServiceUnavailable - default: - lvl = zerolog.InfoLevel - code = http.StatusBadRequest } log.WithLevel(lvl). @@ -136,15 +124,23 @@ func (ct *CheckinT) _handleCheckin(w http.ResponseWriter, r *http.Request, id st return err } + // Metrics; serenity now. + dfunc := cntCheckin.IncStart() + defer dfunc() + ctx := r.Context() // Interpret request; TODO: defend overflow, slow roll + readCounter := datacounter.NewReaderCounter(r.Body) + var req CheckinRequest - decoder := json.NewDecoder(r.Body) + decoder := json.NewDecoder(readCounter) if err := decoder.Decode(&req); err != nil { return err } + cntCheckin.bodyIn.Add(readCounter.Count()) + // Compare local_metadata content and update if different fields, err := parseMeta(agent, &req) if err != nil { @@ -241,7 +237,9 @@ func (ct *CheckinT) writeResponse(w http.ResponseWriter, r *http.Request, resp C if len(payload) > compressThreshold && compressionLevel != flate.NoCompression && acceptsEncoding(r, kEncodingGzip) { - zipper, err := gzip.NewWriterLevel(w, compressionLevel) + wrCounter := datacounter.NewWriterCounter(w) + + zipper, err := gzip.NewWriterLevel(wrCounter, compressionLevel) if err != nil { return err } @@ -254,13 +252,18 @@ func (ct *CheckinT) writeResponse(w http.ResponseWriter, r *http.Request, resp C err = zipper.Close() + cntCheckin.bodyOut.Add(wrCounter.Count()) + log.Trace(). Err(err). - Int("dataSz", len(payload)). Int("lvl", compressionLevel). + Int("srcSz", len(payload)). + Uint64("dstSz", wrCounter.Count()). Msg("compressing checkin response") } else { - _, err = w.Write(payload) + var nWritten int + nWritten, err = w.Write(payload) + cntCheckin.bodyOut.Add(uint64(nWritten)) } return err diff --git a/cmd/fleet/handleEnroll.go b/cmd/fleet/handleEnroll.go index 1ae3085dc..e15ceb26d 100644 --- a/cmd/fleet/handleEnroll.go +++ b/cmd/fleet/handleEnroll.go @@ -25,7 +25,7 @@ import ( "github.com/elastic/go-elasticsearch/v8" "github.com/gofrs/uuid" "github.com/julienschmidt/httprouter" - "github.com/rs/zerolog" + "github.com/miolini/datacounter" "github.com/rs/zerolog/log" ) @@ -72,18 +72,7 @@ func (rt Router) handleEnroll(w http.ResponseWriter, r *http.Request, ps httprou data, err := rt.et.handleEnroll(r) if err != nil { - lvl := zerolog.DebugLevel - - var code int - switch err { - case limit.ErrRateLimit, limit.ErrMaxLimit: - code = http.StatusTooManyRequests - case context.Canceled: - code = http.StatusServiceUnavailable - default: - lvl = zerolog.InfoLevel - code = http.StatusBadRequest - } + code, lvl := cntEnroll.IncError(err) log.WithLevel(lvl). Err(err). @@ -96,10 +85,13 @@ func (rt Router) handleEnroll(w http.ResponseWriter, r *http.Request, ps httprou return } - if _, err = w.Write(data); err != nil { + var numWritten int + if numWritten, err = w.Write(data); err != nil { log.Error().Err(err).Msg("Fail send enroll response") } + cntEnroll.bodyOut.Add(uint64(numWritten)) + log.Trace(). Err(err). RawJSON("raw", data). @@ -121,18 +113,26 @@ func (et *EnrollerT) handleEnroll(r *http.Request) ([]byte, error) { return nil, err } + // Metrics; serenity now. + dfunc := cntEnroll.IncStart() + defer dfunc() + // Validate that an enrollment record exists for a key with this id. erec, err := et.fetchEnrollmentKeyRecord(r.Context(), key.Id) if err != nil { return nil, err } + readCounter := datacounter.NewReaderCounter(r.Body) + // Parse the request body - req, err := decodeEnrollRequest(r.Body) + req, err := decodeEnrollRequest(readCounter) if err != nil { return nil, err } + cntEnroll.bodyIn.Add(readCounter.Count()) + resp, err := _enroll(r.Context(), et.bulker, et.cache, *req, *erec) if err != nil { return nil, err diff --git a/cmd/fleet/handleStatus.go b/cmd/fleet/handleStatus.go index 578cc1a28..51dac23cd 100644 --- a/cmd/fleet/handleStatus.go +++ b/cmd/fleet/handleStatus.go @@ -15,6 +15,10 @@ import ( ) func (rt Router) handleStatus(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) { + // Metrics; serenity now. + dfunc := cntStatus.IncStart() + defer dfunc() + status := rt.sm.Status() resp := StatusResponse{ Name: "fleet-server", @@ -34,9 +38,13 @@ func (rt Router) handleStatus(w http.ResponseWriter, _ *http.Request, _ httprout code = http.StatusOK } w.WriteHeader(code) - if _, err = w.Write(data); err != nil { + + var nWritten int + if nWritten, err = w.Write(data); err != nil { if err != context.Canceled { log.Error().Err(err).Int("code", code).Msg("fail status") } } + + cntStatus.bodyOut.Add(uint64(nWritten)) } diff --git a/cmd/fleet/metrics.go b/cmd/fleet/metrics.go index 70cf439fd..460f5e4b3 100644 --- a/cmd/fleet/metrics.go +++ b/cmd/fleet/metrics.go @@ -7,14 +7,31 @@ package fleet import ( "context" "github.com/pkg/errors" + "net/http" "github.com/elastic/fleet-server/v7/internal/pkg/config" + "github.com/elastic/fleet-server/v7/internal/pkg/limit" "github.com/elastic/fleet-server/v7/internal/pkg/logger" "github.com/elastic/beats/v7/libbeat/api" "github.com/elastic/beats/v7/libbeat/cmd/instance/metrics" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/monitoring" + "github.com/elastic/fleet-server/v7/internal/pkg/dl" + "github.com/rs/zerolog" +) + +var ( + registry *monitoring.Registry + + cntHttpNew *monitoring.Uint + cntHttpClose *monitoring.Uint + + cntCheckin routeStats + cntEnroll routeStats + cntAcks routeStats + cntStatus routeStats + cntArtifacts artifactStats ) func (f *FleetServer) initMetrics(ctx context.Context, cfg *config.Config) (*api.Server, error) { @@ -42,3 +59,111 @@ func (f *FleetServer) initMetrics(ctx context.Context, cfg *config.Config) (*api return s, err } + +type routeStats struct { + active *monitoring.Uint + total *monitoring.Uint + rateLimit *monitoring.Uint + maxLimit *monitoring.Uint + failure *monitoring.Uint + drop *monitoring.Uint + bodyIn *monitoring.Uint + bodyOut *monitoring.Uint +} + +func (rt *routeStats) Register(registry *monitoring.Registry) { + rt.active = monitoring.NewUint(registry, "active") + rt.total = monitoring.NewUint(registry, "total") + rt.rateLimit = monitoring.NewUint(registry, "limit_rate") + rt.maxLimit = monitoring.NewUint(registry, "limit_max") + rt.failure = monitoring.NewUint(registry, "fail") + rt.drop = monitoring.NewUint(registry, "drop") + rt.bodyIn = monitoring.NewUint(registry, "body_in") + rt.bodyOut = monitoring.NewUint(registry, "body_out") +} + +func init() { + registry = monitoring.Default.NewRegistry("http_server") + cntHttpNew = monitoring.NewUint(registry, "tcp_open") + cntHttpClose = monitoring.NewUint(registry, "tcp_close") + + routesRegistry := registry.NewRegistry("routes") + + cntCheckin.Register(routesRegistry.NewRegistry("checkin")) + cntEnroll.Register(routesRegistry.NewRegistry("enroll")) + cntArtifacts.Register(routesRegistry.NewRegistry("artifacts")) + cntAcks.Register(routesRegistry.NewRegistry("acks")) + cntStatus.Register(routesRegistry.NewRegistry("status")) +} + +// Increment error metric, log and return code +func (rt *routeStats) IncError(err error) (int, zerolog.Level) { + lvl := zerolog.DebugLevel + + incFail := true + + var code int + switch err { + case ErrAgentNotFound: + code = http.StatusNotFound + lvl = zerolog.WarnLevel + case limit.ErrRateLimit: + code = http.StatusTooManyRequests + rt.rateLimit.Inc() + incFail = false + case limit.ErrMaxLimit: + code = http.StatusTooManyRequests + rt.maxLimit.Inc() + incFail = false + case context.Canceled: + code = http.StatusServiceUnavailable + rt.drop.Inc() + incFail = false + default: + lvl = zerolog.InfoLevel + code = http.StatusBadRequest + } + + if incFail { + cntCheckin.failure.Inc() + } + + return code, lvl +} + +func (rt *routeStats) IncStart() func() { + rt.total.Inc() + rt.active.Inc() + return rt.active.Dec +} + +type artifactStats struct { + routeStats + notFound *monitoring.Uint + throttle *monitoring.Uint +} + +func (rt *artifactStats) Register(registry *monitoring.Registry) { + rt.routeStats.Register(registry) + rt.notFound = monitoring.NewUint(registry, "not_found") + rt.throttle = monitoring.NewUint(registry, "throttle") +} + +func (rt *artifactStats) IncError(err error) (code int, lvl zerolog.Level) { + switch err { + case dl.ErrNotFound: + // Artifact not found indicates a race condition upstream + // or an attack on the fleet server. Either way it should + // show up in the logs at a higher level than debug + code = http.StatusNotFound + rt.notFound.Inc() + lvl = zerolog.WarnLevel + case ErrorThrottle: + code = http.StatusTooManyRequests + rt.throttle.Inc() + default: + code, lvl = rt.routeStats.IncError(err) + } + + return +} diff --git a/cmd/fleet/server.go b/cmd/fleet/server.go index 7d898352b..a0e57503c 100644 --- a/cmd/fleet/server.go +++ b/cmd/fleet/server.go @@ -14,24 +14,11 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/config" "github.com/elastic/beats/v7/libbeat/common/transport/tlscommon" - "github.com/elastic/beats/v7/libbeat/monitoring" "github.com/julienschmidt/httprouter" "github.com/rs/zerolog/log" "golang.org/x/net/netutil" ) -var ( - registry *monitoring.Registry - cntHttpNew *monitoring.Uint - cntHttpClose *monitoring.Uint -) - -func init() { - registry = monitoring.Default.NewRegistry("http_server") - cntHttpNew = monitoring.NewUint(registry, "tcp_open") - cntHttpClose = monitoring.NewUint(registry, "tcp_close") -} - func diagConn(c net.Conn, s http.ConnState) { if c == nil { return diff --git a/go.mod b/go.mod index f419e1008..6d8037987 100644 --- a/go.mod +++ b/go.mod @@ -14,6 +14,7 @@ require ( github.com/hashicorp/go-cleanhttp v0.5.1 github.com/hashicorp/golang-lru v0.5.2-0.20190520140433-59383c442f7d github.com/julienschmidt/httprouter v1.3.0 + github.com/miolini/datacounter v1.0.2 github.com/pkg/errors v0.9.1 github.com/rs/xid v1.2.1 github.com/rs/zerolog v1.19.0 diff --git a/go.sum b/go.sum index 6129ee24e..24f2f3150 100644 --- a/go.sum +++ b/go.sum @@ -564,6 +564,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182aff github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/miekg/dns v1.1.15 h1:CSSIDtllwGLMoA6zjdKnaE6Tx6eVUxQ29LUgGetiDCI= github.com/miekg/dns v1.1.15/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= +github.com/miolini/datacounter v1.0.2 h1:mGTL0vqEAtH7mwNJS1JIpd6jwTAP6cBQQ2P8apaCIm8= +github.com/miolini/datacounter v1.0.2/go.mod h1:C45dc2hBumHjDpEU64IqPwR6TDyPVpzOqqRTN7zmBUA= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/gox v1.0.1 h1:x0jD3dcHk9a9xPSDN6YEL4xL6Qz0dvNYm8yZqui5chI= diff --git a/internal/pkg/policy/monitor.go b/internal/pkg/policy/monitor.go index b4eeb170f..e285f2ce4 100644 --- a/internal/pkg/policy/monitor.go +++ b/internal/pkg/policy/monitor.go @@ -94,6 +94,8 @@ func NewMonitor(bulker bulk.Bulk, monitor monitor.Monitor, throttle time.Duratio // Run runs the monitor. func (m *monitorT) Run(ctx context.Context) error { + m.log.Info().Dur("throttle", m.throttle).Msg("run policy monitor") + s := m.monitor.Subscribe() defer m.monitor.Unsubscribe(s) From 153b5e5cdafef11279a217de0a188af197b06f31 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Mon, 12 Apr 2021 20:27:18 +0000 Subject: [PATCH 054/240] Fix issue with not sending the agents default api_key. (#224) (#225) (cherry picked from commit c45b1296483f4bc951aea13e3d4dc1e8a72247b9) Co-authored-by: Blake Rouse --- cmd/fleet/handleCheckin.go | 1 + 1 file changed, 1 insertion(+) diff --git a/cmd/fleet/handleCheckin.go b/cmd/fleet/handleCheckin.go index 0f1024f75..8818b4634 100644 --- a/cmd/fleet/handleCheckin.go +++ b/cmd/fleet/handleCheckin.go @@ -409,6 +409,7 @@ func processPolicy(ctx context.Context, bulker bulk.Bulk, agentId string, pp *po zlog.Error().Err(err).Msg("fail update agent record") return nil, err } + agent.DefaultApiKey = defaultOutputApiKey.Agent() } rewrittenPolicy, err := rewritePolicy(pp, agent.DefaultApiKey) From 51a3b0a7dac7fc74ffbca29f23ff0aa614dc4ca2 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 13 Apr 2021 18:29:36 +0000 Subject: [PATCH 055/240] Wire additional bulk configuration options. (backport #221) (#228) * Wire additional bulk configuration options. Turn off noisy trace debugging in throttle unit test. (cherry picked from commit 3ab0452e0a77e49df2e1038adaeb59456edfc7df) * Always flush policy create. This fixes intermittent errors in integration test, as well as logic in the coordinator that assumes flush. (cherry picked from commit 878c2c39933cf89b46a92b4e38c16aa6be2a7d20) Co-authored-by: Sean Cunningham --- internal/pkg/bulk/bulk.go | 7 ++- internal/pkg/config/config_test.go | 76 +++++++++++++++----------- internal/pkg/config/output.go | 34 +++++++----- internal/pkg/dl/policies.go | 2 +- internal/pkg/throttle/throttle_test.go | 19 +++++++ 5 files changed, 90 insertions(+), 48 deletions(-) diff --git a/internal/pkg/bulk/bulk.go b/internal/pkg/bulk/bulk.go index 68cf6fb95..2b98ef49f 100644 --- a/internal/pkg/bulk/bulk.go +++ b/internal/pkg/bulk/bulk.go @@ -92,7 +92,12 @@ func InitES(ctx context.Context, cfg *config.Config, opts ...BulkOpt) (*elastics return nil, nil, err } - opts = append(opts, WithFlushInterval(cfg.Output.Elasticsearch.BulkFlushInterval)) + opts = append(opts, + WithFlushInterval(cfg.Output.Elasticsearch.BulkFlushInterval), + WithFlushThresholdCount(cfg.Output.Elasticsearch.BulkFlushThresholdCount), + WithFlushThresholdSize(cfg.Output.Elasticsearch.BulkFlushThresholdSize), + WithMaxPending(cfg.Output.Elasticsearch.BulkFlushMaxPending), + ) blk := NewBulker(es) go func() { diff --git a/internal/pkg/config/config_test.go b/internal/pkg/config/config_test.go index d91699155..f50a67c31 100644 --- a/internal/pkg/config/config_test.go +++ b/internal/pkg/config/config_test.go @@ -32,14 +32,17 @@ func TestConfig(t *testing.T) { }, Output: Output{ Elasticsearch: Elasticsearch{ - Protocol: "http", - Hosts: []string{"localhost:9200"}, - Username: "elastic", - Password: "changeme", - MaxRetries: 3, - MaxConnPerHost: 128, - BulkFlushInterval: 250 * time.Millisecond, - Timeout: 90 * time.Second, + Protocol: "http", + Hosts: []string{"localhost:9200"}, + Username: "elastic", + Password: "changeme", + MaxRetries: 3, + MaxConnPerHost: 128, + BulkFlushInterval: 250 * time.Millisecond, + BulkFlushThresholdCount: 2048, + BulkFlushThresholdSize: 1048576, + BulkFlushMaxPending: 8, + Timeout: 90 * time.Second, }, }, Inputs: []Input{ @@ -118,14 +121,17 @@ func TestConfig(t *testing.T) { }, Output: Output{ Elasticsearch: Elasticsearch{ - Protocol: "http", - Hosts: []string{"localhost:9200"}, - Username: "elastic", - Password: "changeme", - MaxRetries: 3, - MaxConnPerHost: 128, - BulkFlushInterval: 250 * time.Millisecond, - Timeout: 90 * time.Second, + Protocol: "http", + Hosts: []string{"localhost:9200"}, + Username: "elastic", + Password: "changeme", + MaxRetries: 3, + MaxConnPerHost: 128, + BulkFlushInterval: 250 * time.Millisecond, + BulkFlushThresholdCount: 2048, + BulkFlushThresholdSize: 1048576, + BulkFlushMaxPending: 8, + Timeout: 90 * time.Second, }, }, Inputs: []Input{ @@ -202,14 +208,17 @@ func TestConfig(t *testing.T) { }, Output: Output{ Elasticsearch: Elasticsearch{ - Protocol: "http", - Hosts: []string{"localhost:9200"}, - Username: "elastic", - Password: "changeme", - MaxRetries: 3, - MaxConnPerHost: 128, - BulkFlushInterval: 250 * time.Millisecond, - Timeout: 90 * time.Second, + Protocol: "http", + Hosts: []string{"localhost:9200"}, + Username: "elastic", + Password: "changeme", + MaxRetries: 3, + MaxConnPerHost: 128, + BulkFlushInterval: 250 * time.Millisecond, + BulkFlushThresholdCount: 2048, + BulkFlushThresholdSize: 1048576, + BulkFlushMaxPending: 8, + Timeout: 90 * time.Second, }, }, Inputs: []Input{ @@ -286,14 +295,17 @@ func TestConfig(t *testing.T) { }, Output: Output{ Elasticsearch: Elasticsearch{ - Protocol: "http", - Hosts: []string{"localhost:9200"}, - Username: "elastic", - Password: "changeme", - MaxRetries: 3, - MaxConnPerHost: 128, - BulkFlushInterval: 250 * time.Millisecond, - Timeout: 90 * time.Second, + Protocol: "http", + Hosts: []string{"localhost:9200"}, + Username: "elastic", + Password: "changeme", + MaxRetries: 3, + MaxConnPerHost: 128, + BulkFlushInterval: 250 * time.Millisecond, + BulkFlushThresholdCount: 2048, + BulkFlushThresholdSize: 1048576, + BulkFlushMaxPending: 8, + Timeout: 90 * time.Second, }, }, Inputs: []Input{ diff --git a/internal/pkg/config/output.go b/internal/pkg/config/output.go index 759a912ab..2c33454ad 100644 --- a/internal/pkg/config/output.go +++ b/internal/pkg/config/output.go @@ -24,20 +24,23 @@ var hasScheme = regexp.MustCompile(`^([a-z][a-z0-9+\-.]*)://`) // Elasticsearch is the configuration for elasticsearch. type Elasticsearch struct { - Protocol string `config:"protocol"` - Hosts []string `config:"hosts"` - Path string `config:"path"` - Headers map[string]string `config:"headers"` - Username string `config:"username"` - Password string `config:"password"` - APIKey string `config:"api_key"` - ProxyURL string `config:"proxy_url"` - ProxyDisable bool `config:"proxy_disable"` - TLS *tlscommon.Config `config:"ssl"` - MaxRetries int `config:"max_retries"` - MaxConnPerHost int `config:"max_conn_per_host"` - BulkFlushInterval time.Duration `config:"bulk_flush_interval"` - Timeout time.Duration `config:"timeout"` + Protocol string `config:"protocol"` + Hosts []string `config:"hosts"` + Path string `config:"path"` + Headers map[string]string `config:"headers"` + Username string `config:"username"` + Password string `config:"password"` + APIKey string `config:"api_key"` + ProxyURL string `config:"proxy_url"` + ProxyDisable bool `config:"proxy_disable"` + TLS *tlscommon.Config `config:"ssl"` + MaxRetries int `config:"max_retries"` + MaxConnPerHost int `config:"max_conn_per_host"` + BulkFlushInterval time.Duration `config:"bulk_flush_interval"` + BulkFlushThresholdCount int `config:"bulk_flush_threshold_cnt"` + BulkFlushThresholdSize int `config:"bulk_flush_threshold_size"` + BulkFlushMaxPending int `config:"bulk_flush_max_pending"` + Timeout time.Duration `config:"timeout"` } // InitDefaults initializes the defaults for the configuration. @@ -48,6 +51,9 @@ func (c *Elasticsearch) InitDefaults() { c.MaxRetries = 3 c.MaxConnPerHost = 128 c.BulkFlushInterval = 250 * time.Millisecond + c.BulkFlushThresholdCount = 2048 + c.BulkFlushThresholdSize = 1024 * 1024 + c.BulkFlushMaxPending = 8 } // Validate ensures that the configuration is valid. diff --git a/internal/pkg/dl/policies.go b/internal/pkg/dl/policies.go index b24e5b1b5..f12199619 100644 --- a/internal/pkg/dl/policies.go +++ b/internal/pkg/dl/policies.go @@ -70,5 +70,5 @@ func CreatePolicy(ctx context.Context, bulker bulk.Bulk, policy model.Policy, op if err != nil { return "", err } - return bulker.Create(ctx, o.indexName, "", data) + return bulker.Create(ctx, o.indexName, "", data, bulk.WithRefresh()) } diff --git a/internal/pkg/throttle/throttle_test.go b/internal/pkg/throttle/throttle_test.go index aafbc7f23..91b2d9412 100644 --- a/internal/pkg/throttle/throttle_test.go +++ b/internal/pkg/throttle/throttle_test.go @@ -5,13 +5,24 @@ package throttle import ( + "github.com/rs/zerolog" "math/rand" "strconv" "testing" "time" ) +func disableTraceLogging() func() { + lvl := zerolog.GlobalLevel() + zerolog.SetGlobalLevel(zerolog.InfoLevel) + return func() { + zerolog.SetGlobalLevel(lvl) + } +} + func TestThrottleZero(t *testing.T) { + f := disableTraceLogging() + defer f() // Zero max parallel means we can acquire as many as we want, // but still cannot acquire existing that has not timed out @@ -80,6 +91,8 @@ func TestThrottleZero(t *testing.T) { } func TestThrottleN(t *testing.T) { + f := disableTraceLogging() + defer f() for N := 1; N < 11; N++ { @@ -148,6 +161,9 @@ func TestThrottleN(t *testing.T) { } func TestThrottleExpireIdentity(t *testing.T) { + f := disableTraceLogging() + defer f() + throttle := NewThrottle(1) key := "xxx" @@ -182,6 +198,9 @@ func TestThrottleExpireIdentity(t *testing.T) { // Test that a token from a different key is expired when at max func TestThrottleExpireAtMax(t *testing.T) { + f := disableTraceLogging() + defer f() + throttle := NewThrottle(1) key1 := "xxx" From a4be496d6cd13c78cfe2e92ba4272c8a3fb95caa Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 13 Apr 2021 19:41:19 +0000 Subject: [PATCH 056/240] Fix coordinator goroutine tight loop on cancelled context (#229) (#230) (cherry picked from commit b65314f7fd31646c8e0d092b060d68c2490ca00f) Co-authored-by: Aleksandr Maus --- internal/pkg/coordinator/monitor.go | 2 ++ 1 file changed, 2 insertions(+) diff --git a/internal/pkg/coordinator/monitor.go b/internal/pkg/coordinator/monitor.go index 67b85e636..d36e3fd78 100644 --- a/internal/pkg/coordinator/monitor.go +++ b/internal/pkg/coordinator/monitor.go @@ -372,6 +372,8 @@ func runCoordinator(ctx context.Context, cord Coordinator, l zerolog.Logger, d t if sleep.WithContext(ctx, d) == context.Canceled { break } + } else { + break } } } From c5e7d9a8d927f8fe1086111a6be7af77d7b5a644 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 13 Apr 2021 20:43:10 +0000 Subject: [PATCH 057/240] Update graceful handling for missing indices to accommodate the new fleet system indices plugin (#205) (#231) * Update the no index found handling to accommodate the new fleet system indices plugin * Add logging (cherry picked from commit 85d0ebf97ac8be5bfdc59f910ba47e3669141d85) Co-authored-by: Aleksandr Maus --- internal/pkg/dl/actions.go | 7 +++++++ internal/pkg/dl/policies_leader.go | 11 +++++++++-- 2 files changed, 16 insertions(+), 2 deletions(-) diff --git a/internal/pkg/dl/actions.go b/internal/pkg/dl/actions.go index 9a2b34fe7..2c34dec16 100644 --- a/internal/pkg/dl/actions.go +++ b/internal/pkg/dl/actions.go @@ -6,10 +6,13 @@ package dl import ( "context" + "errors" "github.com/elastic/fleet-server/v7/internal/pkg/bulk" "github.com/elastic/fleet-server/v7/internal/pkg/dsl" + "github.com/elastic/fleet-server/v7/internal/pkg/es" "github.com/elastic/fleet-server/v7/internal/pkg/model" + "github.com/rs/zerolog/log" ) const ( @@ -83,6 +86,10 @@ func FindActions(ctx context.Context, bulker bulk.Bulk, tmpl *dsl.Tmpl, params m func findActions(ctx context.Context, bulker bulk.Bulk, tmpl *dsl.Tmpl, index string, params map[string]interface{}) ([]model.Action, error) { res, err := Search(ctx, bulker, tmpl, index, params) if err != nil { + if errors.Is(err, es.ErrIndexNotFound) { + log.Debug().Str("index", index).Msg(es.ErrIndexNotFound.Error()) + err = nil + } return nil, err } diff --git a/internal/pkg/dl/policies_leader.go b/internal/pkg/dl/policies_leader.go index 57e3d650e..53bb6f8d6 100644 --- a/internal/pkg/dl/policies_leader.go +++ b/internal/pkg/dl/policies_leader.go @@ -7,12 +7,15 @@ package dl import ( "context" "encoding/json" + "errors" + "sync" + "time" + "github.com/elastic/fleet-server/v7/internal/pkg/bulk" "github.com/elastic/fleet-server/v7/internal/pkg/dsl" "github.com/elastic/fleet-server/v7/internal/pkg/es" "github.com/elastic/fleet-server/v7/internal/pkg/model" - "sync" - "time" + "github.com/rs/zerolog/log" ) var ( @@ -48,6 +51,10 @@ func SearchPolicyLeaders(ctx context.Context, bulker bulk.Bulk, ids []string, op } res, err := bulker.Search(ctx, []string{o.indexName}, data) if err != nil { + if errors.Is(err, es.ErrIndexNotFound) { + log.Debug().Str("index", o.indexName).Msg(es.ErrIndexNotFound.Error()) + err = nil + } return } From 136ce8146b1122ca207da3024c368a85c5c46f37 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 13 Apr 2021 21:07:33 +0000 Subject: [PATCH 058/240] Implement support for API Key metadata (#195) (#232) * Implement support for API Key metadata * Adjust apikey.Create to make the metadata functional options * Address code review feedback * Make metadata properties json omitempty * Additional changes to the metatadata format (cherry picked from commit 82ea1e798ac682929ebcc65b390d6d556a803c86) Co-authored-by: Aleksandr Maus --- cmd/fleet/handleEnroll.go | 6 +- internal/pkg/apikey/apikey.go | 1 + .../pkg/apikey/apikey_integration_test.go | 79 +++++++++++++++++++ internal/pkg/apikey/apikey_test.go | 5 +- internal/pkg/apikey/create.go | 11 +-- internal/pkg/apikey/get.go | 66 ++++++++++++++++ internal/pkg/apikey/metadata.go | 34 ++++++++ 7 files changed, 194 insertions(+), 8 deletions(-) create mode 100644 internal/pkg/apikey/apikey_integration_test.go create mode 100644 internal/pkg/apikey/get.go create mode 100644 internal/pkg/apikey/metadata.go diff --git a/cmd/fleet/handleEnroll.go b/cmd/fleet/handleEnroll.go index e15ceb26d..55ff4c2d1 100644 --- a/cmd/fleet/handleEnroll.go +++ b/cmd/fleet/handleEnroll.go @@ -280,12 +280,14 @@ func createFleetAgent(ctx context.Context, bulker bulk.Bulk, id string, agent mo } func generateAccessApiKey(ctx context.Context, client *elasticsearch.Client, agentId string) (*apikey.ApiKey, error) { - return apikey.Create(ctx, client, agentId, "", []byte(kFleetAccessRolesJSON)) + return apikey.Create(ctx, client, agentId, "", []byte(kFleetAccessRolesJSON), + apikey.NewMetadata(agentId, apikey.TypeAccess)) } func generateOutputApiKey(ctx context.Context, client *elasticsearch.Client, agentId, outputName string, roles []byte) (*apikey.ApiKey, error) { name := fmt.Sprintf("%s:%s", agentId, outputName) - return apikey.Create(ctx, client, name, "", roles) + return apikey.Create(ctx, client, name, "", roles, + apikey.NewMetadata(agentId, apikey.TypeOutput)) } func (et *EnrollerT) fetchEnrollmentKeyRecord(ctx context.Context, id string) (*model.EnrollmentApiKey, error) { diff --git a/internal/pkg/apikey/apikey.go b/internal/pkg/apikey/apikey.go index afc02564b..9230cabea 100644 --- a/internal/pkg/apikey/apikey.go +++ b/internal/pkg/apikey/apikey.go @@ -22,6 +22,7 @@ var ( ErrMalformedHeader = errors.New("malformed authorization header") ErrMalformedToken = errors.New("malformed token") ErrInvalidToken = errors.New("token not valid utf8") + ErrApiKeyNotFound = errors.New("api key not found") ) var AuthKey = http.CanonicalHeaderKey("Authorization") diff --git a/internal/pkg/apikey/apikey_integration_test.go b/internal/pkg/apikey/apikey_integration_test.go new file mode 100644 index 000000000..f629f535c --- /dev/null +++ b/internal/pkg/apikey/apikey_integration_test.go @@ -0,0 +1,79 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// +build integration + +package apikey + +import ( + "context" + "errors" + "testing" + + ftesting "github.com/elastic/fleet-server/v7/internal/pkg/testing" + + "github.com/gofrs/uuid" + "github.com/google/go-cmp/cmp" +) + +const testFleetRoles = ` +{ + "fleet-apikey-access": { + "cluster": [], + "applications": [{ + "application": ".fleet", + "privileges": ["no-privileges"], + "resources": ["*"] + }] + } +} +` + +func TestCreateApiKeyWithMetadata(t *testing.T) { + ctx, cn := context.WithCancel(context.Background()) + defer cn() + + bulker := ftesting.SetupBulk(ctx, t) + + // Create the key + agentId := uuid.Must(uuid.NewV4()).String() + name := uuid.Must(uuid.NewV4()).String() + akey, err := Create(ctx, bulker.Client(), name, "", []byte(testFleetRoles), + NewMetadata(agentId, TypeAccess)) + if err != nil { + t.Fatal(err) + } + + // Get the key and verify that metadata was saved correctly + aKeyMeta, err := Get(ctx, bulker.Client(), akey.Id) + if err != nil { + t.Fatal(err) + } + + diff := cmp.Diff(ManagedByFleetServer, aKeyMeta.Metadata.ManagedBy) + if diff != "" { + t.Error(diff) + } + + diff = cmp.Diff(true, aKeyMeta.Metadata.Managed) + if diff != "" { + t.Error(diff) + } + + diff = cmp.Diff(agentId, aKeyMeta.Metadata.AgentId) + if diff != "" { + t.Error(diff) + } + + diff = cmp.Diff(TypeAccess.String(), aKeyMeta.Metadata.Type) + if diff != "" { + t.Error(diff) + } + + // Try to get the key that doesn't exists, expect ErrApiKeyNotFound + aKeyMeta, err = Get(ctx, bulker.Client(), "0000000000000") + if !errors.Is(err, ErrApiKeyNotFound) { + t.Errorf("Unexpected error type: %v", err) + } +} diff --git a/internal/pkg/apikey/apikey_test.go b/internal/pkg/apikey/apikey_test.go index efd6d0fb6..12ca70613 100644 --- a/internal/pkg/apikey/apikey_test.go +++ b/internal/pkg/apikey/apikey_test.go @@ -2,12 +2,15 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +// +build !integration + package apikey import ( "encoding/base64" - "github.com/stretchr/testify/assert" "testing" + + "github.com/stretchr/testify/assert" ) func TestMonitorLeadership(t *testing.T) { diff --git a/internal/pkg/apikey/create.go b/internal/pkg/apikey/create.go index 35d4b66b2..52f9c512f 100644 --- a/internal/pkg/apikey/create.go +++ b/internal/pkg/apikey/create.go @@ -14,16 +14,17 @@ import ( "github.com/elastic/go-elasticsearch/v8/esapi" ) -func Create(ctx context.Context, client *elasticsearch.Client, name, ttl string, roles []byte) (*ApiKey, error) { - +func Create(ctx context.Context, client *elasticsearch.Client, name, ttl string, roles []byte, meta interface{}) (*ApiKey, error) { payload := struct { Name string `json:"name,omitempty"` Expiration string `json:"expiration,omitempty"` Roles json.RawMessage `json:"role_descriptors,omitempty"` + Metadata interface{} `json:"metadata"` }{ - name, - ttl, - roles, + Name: name, + Expiration: ttl, + Roles: roles, + Metadata: meta, } body, err := json.Marshal(&payload) diff --git a/internal/pkg/apikey/get.go b/internal/pkg/apikey/get.go new file mode 100644 index 000000000..7230dcd95 --- /dev/null +++ b/internal/pkg/apikey/get.go @@ -0,0 +1,66 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package apikey + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/elastic/go-elasticsearch/v8" + "github.com/elastic/go-elasticsearch/v8/esapi" +) + +type ApiKeyMetadata struct { + Id string + Metadata Metadata +} + +func Get(ctx context.Context, client *elasticsearch.Client, id string) (apiKey ApiKeyMetadata, err error) { + + opts := []func(*esapi.SecurityGetAPIKeyRequest){ + client.Security.GetAPIKey.WithContext(ctx), + client.Security.GetAPIKey.WithID(id), + } + + res, err := client.Security.GetAPIKey( + opts..., + ) + + if err != nil { + return + } + + defer res.Body.Close() + + if res.IsError() { + return apiKey, fmt.Errorf("fail GetAPIKey: %s, %w", res.String(), ErrApiKeyNotFound) + } + + type APIKeyResponse struct { + Id string `json:"id"` + Metadata Metadata `json:"metadata"` + } + type GetAPIKeyResponse struct { + ApiKeys []APIKeyResponse `json:"api_keys"` + } + + var resp GetAPIKeyResponse + d := json.NewDecoder(res.Body) + if err = d.Decode(&resp); err != nil { + return + } + + if len(resp.ApiKeys) == 0 { + return apiKey, ErrApiKeyNotFound + } + + first := resp.ApiKeys[0] + + return ApiKeyMetadata{ + Id: first.Id, + Metadata: first.Metadata, + }, nil +} diff --git a/internal/pkg/apikey/metadata.go b/internal/pkg/apikey/metadata.go new file mode 100644 index 000000000..5e347ecb8 --- /dev/null +++ b/internal/pkg/apikey/metadata.go @@ -0,0 +1,34 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package apikey + +const ManagedByFleetServer = "fleet-server" + +type Type int + +const ( + TypeAccess Type = iota + TypeOutput +) + +func (t Type) String() string { + return []string{"access", "output"}[t] +} + +type Metadata struct { + AgentId string `json:"agent_id,omitempty"` + Managed bool `json:"managed,omitempty"` + ManagedBy string `json:"managed_by,omitempty"` + Type string `json:"type,omitempty"` +} + +func NewMetadata(agentId string, typ Type) Metadata { + return Metadata{ + AgentId: agentId, + Managed: true, + ManagedBy: ManagedByFleetServer, + Type: typ.String(), + } +} From 7ef28349d5ab0a53afe55288ee874514e6f64585 Mon Sep 17 00:00:00 2001 From: Sean Cunningham Date: Wed, 14 Apr 2021 08:08:23 -0400 Subject: [PATCH 059/240] Bring 7.x readme up to date with master. (#233) --- README.md | 92 +++++++++++++++++++++++++++++++++---------------------- 1 file changed, 56 insertions(+), 36 deletions(-) diff --git a/README.md b/README.md index eaf469b89..0c8325e59 100644 --- a/README.md +++ b/README.md @@ -6,57 +6,77 @@ fleet-server is under development. The following are notes to help developers onboarding to the project to quickly get running. These notes might change at any time. -### Startup fleet-server +## Setup -Currently to startup fleet-server, the Kibana encryption key is needed. There are two options for this. +To run and test fleet-server, a recent version of Elastic Agent and Kibana are needed. In the following Elastic Agent and Kibana are built from source. The fleet-server itself is not built from source but pulled from the latest snapshot build. It would be possible to also pull Elastic Agent or Kibana from the latest snapshot but the assumption that is made here that whoever is testing this, is likely developing either Elastic Agent or on the Kibana side. -Either the key `a...` is used in the kibana config as this is the default: + +### Kibana setup + +The source code of Kibana must be checked out. After checkout, the following command must be run: ``` -xpack.encryptedSavedObjects.encryptionKey: "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" +yarn kbn bootstrap ``` -The alternative is to use `ES_SAVED_KEY` and pass it to fleet-server during setup with the value of the encryption key used in Kibana. +This will take a while the first time it is run. An error might be return in case not a valid node version is installed. Use nvm to install the correct version. + +Now the following two commands must be run in parallel: + +``` +# Start ES +yarn es snapshot -E xpack.security.authc.api_key.enabled=true + +# Start KB +yarn start --no-base-path +``` +As soon as all is running, go to `http://localhost:5601`, enter `elastic/changeme` as credential and navigate to Fleet. Trigger the Fleet setup. As soon as this is completed, copy the `policy id` and `enrollment token` for the fleet-server policy. The policy id can be copied from the URL, the enrollment token can be found in the Enrollment Token list. -### Kibana +NOTE: This step can be skipped if the full command below for the Elastic Agent is used. -Currently there is some work to do to be able to run Kibana with Fleet Server and all the features are not yet supported, in the future, these workarounds will not be needed anymore. +Now Kibana is running and ready. The next step is to setup Elastic Agent. -* Start fleet-server before Kibana, to create the mappings in ES. -* Create and use a custom user as the `kibana_system` user -* Enable Fleet server usage with `xpack.fleet.agents.fleetServerEnabled: true` +## Beats repo +To build the Elastic Agent from source, check out the beats repository. Navigate to `x-pack/elastic-agent` and run the following command: + +``` +SNAPSHOT=true DEV=true PLATFORMS=darwin mage package +``` + +The above assumes you are running on OS X. Put the platform in you are running on. This speeds up packaging as it only builds it for your platform. As soon as this is completed (it might take a while for the first time) navigate to `build/distributions` and unpackage the `.tar.gz`. Change working directory to the elastic-agent directory and start the Elastic Agent: + +``` +KIBANA_HOST=http://localhost:5601 KIBANA_USERNAME=elastic KIBANA_PASSWORD=changeme ELASTICSEARCH_HOST=http://localhost:9200 ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme KIBANA_FLEET_SETUP=1 FLEET_SERVER_ENABLE=1 sudo ./elastic-agent container ``` -POST /_security/role/kibana_fleet_system -{ - "cluster" : [ - "all" - ], - "indices" : [ - { - "names" : [ - ".fleet*" - ], - "privileges" : [ - "all" - ] - } - ] -} +This will start up Elastic Agent with fleet-server and directly enroll it. In addition Fleet is setup inside of Kibana. +## Running Elastic Agent with fleet-server in container -POST /_security/user/kibana_fleet_system -{ - "password" : "changeme", - "roles" : [ "kibana_system", "kibana_fleet_system" ] -} +If you want to run Elastic Agent and fleet-server in a container but built Kibana from source, you have to add the following to your `config/kibana.dev.yml`: + +``` +server.host: 0.0.0.0 +``` + +This makes sure, Kibana is accessible from the container. Start Kibana as before but for Elasticsearch, run the following command: + +``` +yarn es snapshot -E xpack.security.authc.api_key.enabled=true -E http.host=0.0.0.0 ``` -Then configure your Kibana with +This makes sure also Elasticsearch is accessible to the container. + +Start the Elastic Agent with the following command: + +``` +docker run -e KIBANA_HOST=http://{YOUR-IP}:5601 -e KIBANA_USERNAME=elastic -e KIBANA_PASSWORD=changeme -e ELASTICSEARCH_HOST=http://{YOUR-IP}:9200 -e ELASTICSEARCH_USERNAME=elastic -e ELASTICSEARCH_PASSWORD=changeme -e KIBANA_FLEET_SETUP=1 -e FLEET_SERVER_ENABLE=1 -e FLEET_SERVER_INSECURE_HTTP=1 docker.elastic.co/beats/elastic-agent:8.0.0-SNAPSHOT ``` -elasticsearch.username: 'kibana_fleet_system' -elasticsearch.password: 'changeme' -xpack.fleet.agents.fleetServerEnabled: true -``` \ No newline at end of file + +Replace {YOUR-IP} with the IP address of your machine. + +## fleet-server repo + +By default the above will download the most recent snapshot build for fleet-server. To use your own development build, run `make release` in the fleet-server repository, go to `build/distributions` and copy the `.tar.gz` and `sha512` file to the `data/elastic-agent-{hash}/downloads` inside the elastic-agent directory. Now you run with your own build of fleet-server. \ No newline at end of file From 0aeb5a96c0edfee4d7e95c2f0234c8d42c422616 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Wed, 14 Apr 2021 14:49:38 +0000 Subject: [PATCH 060/240] Add ability to communicate with elasticsearch using a service token (#226) (#237) * Support for service token. * Add comment to fleet-server.yml. * Use fork to set service token. * Update to v8 elasticsearch client. * Run make check. (cherry picked from commit 38bc66a4f9780f0669fd850bc49989cc2ce9fe92) Co-authored-by: Blake Rouse --- NOTICE.txt | 4 ++-- fleet-server.yml | 1 + go.mod | 2 +- go.sum | 4 ++-- internal/pkg/config/output.go | 14 ++++++++------ 5 files changed, 14 insertions(+), 11 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index c5df54396..3d6c298fc 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -484,11 +484,11 @@ SOFTWARE -------------------------------------------------------------------------------- Dependency : github.com/elastic/go-elasticsearch/v8 -Version: v8.0.0-20200728144331-527225d8e836 +Version: v8.0.0-20210414074309-f7ffd04b8d6a Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/go-elasticsearch/v8@v8.0.0-20200728144331-527225d8e836/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-elasticsearch/v8@v8.0.0-20210414074309-f7ffd04b8d6a/LICENSE: Apache License Version 2.0, January 2004 diff --git a/fleet-server.yml b/fleet-server.yml index 30dcf435f..4e2ed9618 100644 --- a/fleet-server.yml +++ b/fleet-server.yml @@ -3,6 +3,7 @@ output: hosts: '${ELASTICSEARCH_HOSTS:localhost:9200}' username: '${ELASTICSEARCH_USERNAME:elastic}' password: '${ELASTICSEARCH_PASSWORD:changeme}' + #service_token: 'token' # comment out username/password when this is set fleet: agent: diff --git a/go.mod b/go.mod index 6d8037987..745deb277 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/dgraph-io/ristretto v0.0.3 github.com/elastic/beats/v7 v7.11.1 github.com/elastic/elastic-agent-client/v7 v7.0.0-20200709172729-d43b7ad5833a - github.com/elastic/go-elasticsearch/v8 v8.0.0-20200728144331-527225d8e836 + github.com/elastic/go-elasticsearch/v8 v8.0.0-20210414074309-f7ffd04b8d6a github.com/elastic/go-ucfg v0.8.3 github.com/gofrs/uuid v3.3.0+incompatible github.com/google/go-cmp v0.4.0 diff --git a/go.sum b/go.sum index 24f2f3150..d6533a4b9 100644 --- a/go.sum +++ b/go.sum @@ -252,8 +252,8 @@ github.com/elastic/fsevents v0.0.0-20181029231046-e1d381a4d270 h1:cWPqxlPtir4RoQ github.com/elastic/fsevents v0.0.0-20181029231046-e1d381a4d270/go.mod h1:Msl1pdboCbArMF/nSCDUXgQuWTeoMmE/z8607X+k7ng= github.com/elastic/go-concert v0.0.4 h1:pzgYCmJ/xMJsW8PSk33inAWZ065hrwSeP79TpwAbsLE= github.com/elastic/go-concert v0.0.4/go.mod h1:9MtFarjXroUgmm0m6HY3NSe1XiKhdktiNRRj9hWvIaM= -github.com/elastic/go-elasticsearch/v8 v8.0.0-20200728144331-527225d8e836 h1:0ZrGQPGY7QCySD/14ht2UDggGKmqgLouMd5FFimcguA= -github.com/elastic/go-elasticsearch/v8 v8.0.0-20200728144331-527225d8e836/go.mod h1:xe9a/L2aeOgFKKgrO3ibQTnMdpAeL0GC+5/HpGScSa4= +github.com/elastic/go-elasticsearch/v8 v8.0.0-20210414074309-f7ffd04b8d6a h1:9sZywotr64cDBOcWWCFpjOjf4oFuFhKnopckNQ4EqcU= +github.com/elastic/go-elasticsearch/v8 v8.0.0-20210414074309-f7ffd04b8d6a/go.mod h1:xe9a/L2aeOgFKKgrO3ibQTnMdpAeL0GC+5/HpGScSa4= github.com/elastic/go-libaudit/v2 v2.1.0 h1:yWSKoGaoWLGFPjqWrQ4gwtuM77pTk7K4CsPxXss8he4= github.com/elastic/go-libaudit/v2 v2.1.0/go.mod h1:MM/l/4xV7ilcl+cIblL8Zn448J7RZaDwgNLE4gNKYPg= github.com/elastic/go-licenser v0.3.1 h1:RmRukU/JUmts+rpexAw0Fvt2ly7VVu6mw8z4HrEzObU= diff --git a/internal/pkg/config/output.go b/internal/pkg/config/output.go index 2c33454ad..b800f4100 100644 --- a/internal/pkg/config/output.go +++ b/internal/pkg/config/output.go @@ -31,6 +31,7 @@ type Elasticsearch struct { Username string `config:"username"` Password string `config:"password"` APIKey string `config:"api_key"` + ServiceToken string `config:"service_token"` ProxyURL string `config:"proxy_url"` ProxyDisable bool `config:"proxy_disable"` TLS *tlscommon.Config `config:"ssl"` @@ -128,12 +129,13 @@ func (c *Elasticsearch) ToESConfig() (elasticsearch.Config, error) { h.Set("X-elastic-product-origin", "fleet") return elasticsearch.Config{ - Addresses: addrs, - Username: c.Username, - Password: c.Password, - Header: h, - Transport: httpTransport, - MaxRetries: c.MaxRetries, + Addresses: addrs, + Username: c.Username, + Password: c.Password, + ServiceToken: c.ServiceToken, + Header: h, + Transport: httpTransport, + MaxRetries: c.MaxRetries, }, nil } From d6166f9a01e479a50491670861b75079144c7648 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Wed, 14 Apr 2021 20:05:38 +0000 Subject: [PATCH 061/240] Example configuration for minimal RAM. (#241) (cherry picked from commit b171295e0423067ca4156b10799e1dd31789055b) Co-authored-by: Sean Cunningham --- example/fleet-server-100.yml | 49 ++++++++++++++++++++++++++++++++++++ 1 file changed, 49 insertions(+) create mode 100644 example/fleet-server-100.yml diff --git a/example/fleet-server-100.yml b/example/fleet-server-100.yml new file mode 100644 index 000000000..d8f8e87ea --- /dev/null +++ b/example/fleet-server-100.yml @@ -0,0 +1,49 @@ +# This sample configuration file demonstrates tweaks to limit the resource usage +# of a very small (100 agent) installation. Target is 1 CPU, 50MiB RAM. + +output: + elasticsearch: + hosts: '${ELASTICSEARCH_HOSTS:localhost:9200}' + username: '${ELASTICSEARCH_USERNAME:elastic}' + password: '${ELASTICSEARCH_PASSWORD:changeme}' + bulk_flush_max_pending: 8 # Limit the number of pending ES bulk operations + bulk_flush_interval: 100ms # Flush ES bulk queues on this interval. + +fleet: + agent: + id: 1e4954ce-af37-4731-9f4a-407b08e69e42 # Normally provided by the agent; stubbed here. + +runtime: + gc_percent: 20 # Force the GC to execute more frequently: see https://golang.org/pkg/runtime/debug/#SetGCPercent + +inputs: + - cache: + num_counters: 2000 # Limit the size of the hash table to rougly 10x expected number of elements + max_cost: 2097152 # Limit the total size of data allowed in the cache, 2 MiB in bytes. + server: + limits: + policy_throttle: 200ms # Roll out a new policy every 200ms; roughly 5 per second. + max_connections: 200 # Hard limit on the number of connections accepted; defends TLS connection flood. + checkin_limit: + interval: 50ms # Check in no faster than 20 per second. + burst: 25 # Allow burst up to 25, then fall back to interval rate. + max: 100 # No more than 100 long polls allowed. THIS EFFECTIVELY LIMITS MAX ENDPOINTS. + artifact_limit: + interval: 100ms # Roll out 10 artifacts per second + burst: 10 # Small burst prevents outbound buffer explosion. + max: 10 # Only 10 transactions at a time max. This should generally not be a relavent limitation as the transactions are cached. + ack_limit: + interval: 10ms # Allow ACK only 100 per second. ACK payload is unbounded in RAM so need to limit. + burst: 20 # Allow burst up to 20, then fall back to interrval rate. + max: 20 # Cannot have too many processing at once due to unbounded payload size. + enroll_limit: + interval: 100ms # Enroll is both CPU and RAM intensive. Limit to 10 per second. + burst: 5 # Allow intial burst, but limit to max. + max: 10 # Max limit. + ssl: + enabled: true + key: /path/to/key.pem # To support TLS, server needs cert, key pair + certificate: /path/to/cert.pem + +http: + enabled: true # Enable metrics on http://localhost:5066/stats \ No newline at end of file From a2e547c6b45fba98776fd5246ed18de755fc2be2 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Thu, 15 Apr 2021 14:07:30 +0000 Subject: [PATCH 062/240] Ensure that the connecting Elastic Agent is a supported version. (#239) (#242) * Ensure that the connecting Elastic Agent is a supported version. * Revert change to fleet-server.yml * Fix added space in fleet-server.yml. * Fixes from code review. * Update go.mod. (cherry picked from commit 91af65abcd58c7196b26e382b595e2ea367e1bf3) Co-authored-by: Blake Rouse --- NOTICE.txt | 4076 +++++++++++++++++------------------ cmd/fleet/handleCheckin.go | 11 +- cmd/fleet/handleEnroll.go | 10 +- cmd/fleet/main.go | 19 +- cmd/fleet/metrics.go | 2 +- cmd/fleet/server_test.go | 5 +- cmd/fleet/userAgent.go | 78 + cmd/fleet/userAgent_test.go | 99 + go.mod | 1 + go.sum | 3 +- 10 files changed, 2254 insertions(+), 2050 deletions(-) create mode 100644 cmd/fleet/userAgent.go create mode 100644 cmd/fleet/userAgent_test.go diff --git a/NOTICE.txt b/NOTICE.txt index 3d6c298fc..b90a30a41 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -1345,100 +1345,100 @@ Exhibit B - "Incompatible With Secondary Licenses" Notice -------------------------------------------------------------------------------- -Dependency : github.com/hashicorp/golang-lru -Version: v0.5.2-0.20190520140433-59383c442f7d +Dependency : github.com/hashicorp/go-version +Version: v1.3.0 Licence type (autodetected): MPL-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/hashicorp/golang-lru@v0.5.2-0.20190520140433-59383c442f7d/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/hashicorp/go-version@v1.3.0/LICENSE: Mozilla Public License, version 2.0 1. Definitions -1.1. "Contributor" +1.1. “Contributor” means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software. -1.2. "Contributor Version" +1.2. “Contributor Version” means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. + Contributor and that particular Contributor’s Contribution. -1.3. "Contribution" +1.3. “Contribution” means Covered Software of a particular Contributor. -1.4. "Covered Software" +1.4. “Covered Software” means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof. -1.5. "Incompatible With Secondary Licenses" +1.5. “Incompatible With Secondary Licenses” means a. that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. -1.6. "Executable Form" +1.6. “Executable Form” means any form of the work other than Source Code Form. -1.7. "Larger Work" +1.7. “Larger Work” - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. -1.8. "License" +1.8. “License” means this document. -1.9. "Licensable" +1.9. “Licensable” - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. -1.10. "Modifications" +1.10. “Modifications” means any of the following: - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or b. any new file in Source Code Form that contains any Covered Software. -1.11. "Patent Claims" of a Contributor +1.11. “Patent Claims” of a Contributor - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. -1.12. "Secondary License" +1.12. “Secondary License” means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses. -1.13. "Source Code Form" +1.13. “Source Code Form” means the form of the work preferred for making modifications. -1.14. "You" (or "Your") +1.14. “You” (or “Your”) means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is + License. For legal entities, “You” includes any entity that controls, is controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause + definition, “control” means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. @@ -1454,59 +1454,57 @@ Mozilla Public License, version 2.0 a. under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. 2.2. Effective Date - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. 2.3. Limitations on Grant Scope - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: a. for any code that a Contributor has removed from Covered Software; or - b. for infringements caused by: (i) Your and any other third party's + b. for infringements caused by: (i) Your and any other third party’s modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). 2.4. Subsequent Licenses No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). 2.5. Representation - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. 2.6. Fair Use - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. 2.7. Conditions @@ -1519,12 +1517,11 @@ Mozilla Public License, version 2.0 3.1. Distribution of Source Form All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. 3.2. Distribution of Executable Form @@ -1536,40 +1533,39 @@ Mozilla Public License, version 2.0 reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. 3.3. Distribution of a Larger Work You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). 3.4. Notices - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. 3.5. Application of Additional Terms You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any @@ -1578,14 +1574,14 @@ Mozilla Public License, version 2.0 4. Inability to Comply Due to Statute or Regulation If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. 5. Termination @@ -1593,22 +1589,21 @@ Mozilla Public License, version 2.0 fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. 5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been @@ -1617,16 +1612,16 @@ Mozilla Public License, version 2.0 6. Disclaimer of Warranty - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. 7. Limitation of Liability @@ -1638,29 +1633,27 @@ Mozilla Public License, version 2.0 goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses, even if such party shall have been informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. 8. Litigation - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. - -9. Miscellaneous - - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. 10. Versions of the License @@ -1674,24 +1667,23 @@ Mozilla Public License, version 2.0 10.2. Effect of New Versions - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license steward. 10.3. Modified Versions If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. Exhibit A - Source Code Form License Notice @@ -1702,382 +1694,445 @@ Exhibit A - Source Code Form License Notice obtain one at http://mozilla.org/MPL/2.0/. -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. You may add additional accurate notices of copyright ownership. -Exhibit B - "Incompatible With Secondary Licenses" Notice +Exhibit B - “Incompatible With Secondary Licenses” Notice - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by the Mozilla Public License, v. 2.0. + -------------------------------------------------------------------------------- -Dependency : github.com/julienschmidt/httprouter -Version: v1.3.0 -Licence type (autodetected): BSD-3-Clause +Dependency : github.com/hashicorp/golang-lru +Version: v0.5.2-0.20190520140433-59383c442f7d +Licence type (autodetected): MPL-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/julienschmidt/httprouter@v1.3.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/hashicorp/golang-lru@v0.5.2-0.20190520140433-59383c442f7d/LICENSE: -BSD 3-Clause License +Mozilla Public License, version 2.0 -Copyright (c) 2013, Julien Schmidt -All rights reserved. +1. Definitions -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: +1.1. "Contributor" -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. +1.2. "Contributor Version" -3. Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +1.3. "Contribution" + means Covered Software of a particular Contributor. --------------------------------------------------------------------------------- -Dependency : github.com/miolini/datacounter -Version: v1.0.2 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- +1.4. "Covered Software" -Contents of probable licence file $GOMODCACHE/github.com/miolini/datacounter@v1.0.2/LICENSE: + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. -The MIT License (MIT) +1.5. "Incompatible With Secondary Licenses" + means -Copyright (c) 2015 Artem Andreenko + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. +1.6. "Executable Form" -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. + means any form of the work other than Source Code Form. +1.7. "Larger Work" + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. --------------------------------------------------------------------------------- -Dependency : github.com/pkg/errors -Version: v0.9.1 -Licence type (autodetected): BSD-2-Clause --------------------------------------------------------------------------------- +1.8. "License" -Contents of probable licence file $GOMODCACHE/github.com/pkg/errors@v0.9.1/LICENSE: + means this document. -Copyright (c) 2015, Dave Cheney -All rights reserved. +1.9. "Licensable" -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. +1.10. "Modifications" -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. + means any of the following: -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + b. any new file in Source Code Form that contains any Covered Software. --------------------------------------------------------------------------------- -Dependency : github.com/rs/xid -Version: v1.2.1 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- +1.11. "Patent Claims" of a Contributor -Contents of probable licence file $GOMODCACHE/github.com/rs/xid@v1.2.1/LICENSE: + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. -Copyright (c) 2015 Olivier Poitrey +1.12. "Secondary License" -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is furnished -to do so, subject to the following conditions: + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. +1.13. "Source Code Form" -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. + means the form of the work preferred for making modifications. +1.14. "You" (or "Your") --------------------------------------------------------------------------------- -Dependency : github.com/rs/zerolog -Version: v1.19.0 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. -Contents of probable licence file $GOMODCACHE/github.com/rs/zerolog@v1.19.0/LICENSE: -MIT License +2. License Grants and Conditions -Copyright (c) 2017 Olivier Poitrey +2.1. Grants -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. +2.2. Effective Date --------------------------------------------------------------------------------- -Dependency : github.com/spf13/cobra -Version: v0.0.5 -Licence type (autodetected): Apache-2.0 --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/spf13/cobra@v0.0.5/LICENSE.txt: + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ +2.3. Limitations on Grant Scope - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: - 1. Definitions. + a. for any code that a Contributor has removed from Covered Software; or - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. +2.4. Subsequent Licenses - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. +2.5. Representation - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. +2.6. Fair Use - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. +2.7. Conditions - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: +3. Responsibilities - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and +3.1. Distribution of Source Form - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and +3.2. Distribution of Executable Form - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. + If You distribute Covered Software in Executable Form then: - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. +3.3. Distribution of a Larger Work - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. +3.4. Notices - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. +3.5. Application of Additional Terms --------------------------------------------------------------------------------- -Dependency : github.com/stretchr/testify -Version: v1.6.1 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. -Contents of probable licence file $GOMODCACHE/github.com/stretchr/testify@v1.6.1/LICENSE: +4. Inability to Comply Due to Statute or Regulation -MIT License + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. -Copyright (c) 2012-2020 Mat Ryer, Tyler Bunnell and contributors. +5. Termination -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. + + +-------------------------------------------------------------------------------- +Dependency : github.com/julienschmidt/httprouter +Version: v1.3.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/julienschmidt/httprouter@v1.3.0/LICENSE: + +BSD 3-Clause License + +Copyright (c) 2013, Julien Schmidt +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +1. Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +2. Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +3. Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/miolini/datacounter +Version: v1.0.2 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/miolini/datacounter@v1.0.2/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2015 Artem Andreenko + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is @@ -2095,25 +2150,59 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + -------------------------------------------------------------------------------- -Dependency : go.uber.org/zap -Version: v1.14.0 +Dependency : github.com/pkg/errors +Version: v0.9.1 +Licence type (autodetected): BSD-2-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/pkg/errors@v0.9.1/LICENSE: + +Copyright (c) 2015, Dave Cheney +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/rs/xid +Version: v1.2.1 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/go.uber.org/zap@v1.14.0/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/rs/xid@v1.2.1/LICENSE: -Copyright (c) 2016-2017 Uber Technologies, Inc. +Copyright (c) 2015 Olivier Poitrey Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: +copies of the Software, and to permit persons to whom the Software is furnished +to do so, subject to the following conditions: -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, @@ -2125,162 +2214,16 @@ THE SOFTWARE. -------------------------------------------------------------------------------- -Dependency : golang.org/x/net -Version: v0.0.0-20200822124328-c89045814202 -Licence type (autodetected): BSD-3-Clause +Dependency : github.com/rs/zerolog +Version: v1.19.0 +Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/net@v0.0.0-20200822124328-c89045814202/LICENSE: - -Copyright (c) 2009 The Go Authors. All rights reserved. +Contents of probable licence file $GOMODCACHE/github.com/rs/zerolog@v1.19.0/LICENSE: -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: +MIT License - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - --------------------------------------------------------------------------------- -Dependency : golang.org/x/sync -Version: v0.0.0-20200625203802-6e8e738ad208 -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/golang.org/x/sync@v0.0.0-20200625203802-6e8e738ad208/LICENSE: - -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - --------------------------------------------------------------------------------- -Dependency : golang.org/x/time -Version: v0.0.0-20200630173020-3af7569d3a1e -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/golang.org/x/time@v0.0.0-20200630173020-3af7569d3a1e/LICENSE: - -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - - - -================================================================================ -Indirect dependencies - - --------------------------------------------------------------------------------- -Dependency : 4d63.com/embedfiles -Version: v0.0.0-20190311033909-995e0740726f -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/4d63.com/embedfiles@v0.0.0-20190311033909-995e0740726f/LICENSE: - -Copyright (c) 2017, Leigh McCulloch - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. - - --------------------------------------------------------------------------------- -Dependency : 4d63.com/tz -Version: v1.1.1-0.20191124060701-6d37baae851b -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/4d63.com/tz@v1.1.1-0.20191124060701-6d37baae851b/LICENSE: - -MIT License - -Copyright (c) 2018 Leigh McCulloch +Copyright (c) 2017 Olivier Poitrey Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -2300,49 +2243,16 @@ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --------------------------------------------------------------------------------- - -zoneinfo.go generated from /lib/time/zoneinfo.zip from Go. - -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -------------------------------------------------------------------------------- -Dependency : cloud.google.com/go -Version: v0.51.0 +Dependency : github.com/spf13/cobra +Version: v0.0.5 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go@v0.51.0/LICENSE: - +Contents of probable licence file $GOMODCACHE/github.com/spf13/cobra@v0.0.5/LICENSE.txt: - Apache License + Apache License Version 2.0, January 2004 http://www.apache.org/licenses/ @@ -2517,58 +2427,300 @@ Contents of probable licence file $GOMODCACHE/cloud.google.com/go@v0.51.0/LICENS incurred by, or claims asserted against, such Contributor by reason of your accepting any such warranty or additional liability. - END OF TERMS AND CONDITIONS - APPENDIX: How to apply the Apache License to your work. +-------------------------------------------------------------------------------- +Dependency : github.com/stretchr/testify +Version: v1.6.1 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. +Contents of probable licence file $GOMODCACHE/github.com/stretchr/testify@v1.6.1/LICENSE: - Copyright [yyyy] [name of copyright owner] +MIT License - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at +Copyright (c) 2012-2020 Mat Ryer, Tyler Bunnell and contributors. - http://www.apache.org/licenses/LICENSE-2.0 +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. -------------------------------------------------------------------------------- -Dependency : cloud.google.com/go/bigquery -Version: v1.0.1 -Licence type (autodetected): Apache-2.0 +Dependency : go.uber.org/zap +Version: v1.14.0 +Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/bigquery@v1.0.1/LICENSE: +Contents of probable licence file $GOMODCACHE/go.uber.org/zap@v1.14.0/LICENSE.txt: +Copyright (c) 2016-2017 Uber Technologies, Inc. - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. - 1. Definitions. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. +-------------------------------------------------------------------------------- +Dependency : golang.org/x/net +Version: v0.0.0-20200822124328-c89045814202 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- - "Legal Entity" shall mean the union of the acting entity and all +Contents of probable licence file $GOMODCACHE/golang.org/x/net@v0.0.0-20200822124328-c89045814202/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/sync +Version: v0.0.0-20200625203802-6e8e738ad208 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/sync@v0.0.0-20200625203802-6e8e738ad208/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : golang.org/x/time +Version: v0.0.0-20200630173020-3af7569d3a1e +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/time@v0.0.0-20200630173020-3af7569d3a1e/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + + +================================================================================ +Indirect dependencies + + +-------------------------------------------------------------------------------- +Dependency : 4d63.com/embedfiles +Version: v0.0.0-20190311033909-995e0740726f +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/4d63.com/embedfiles@v0.0.0-20190311033909-995e0740726f/LICENSE: + +Copyright (c) 2017, Leigh McCulloch + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : 4d63.com/tz +Version: v1.1.1-0.20191124060701-6d37baae851b +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/4d63.com/tz@v1.1.1-0.20191124060701-6d37baae851b/LICENSE: + +MIT License + +Copyright (c) 2018 Leigh McCulloch + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + +-------------------------------------------------------------------------------- + +zoneinfo.go generated from /lib/time/zoneinfo.zip from Go. + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : cloud.google.com/go +Version: v0.51.0 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/cloud.google.com/go@v0.51.0/LICENSE: + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all other entities that control, are controlled by, or are under common control with that entity. For the purposes of this definition, "control" means (i) the power, direct or indirect, to cause the @@ -2758,12 +2910,12 @@ Contents of probable licence file $GOMODCACHE/cloud.google.com/go/bigquery@v1.0. -------------------------------------------------------------------------------- -Dependency : cloud.google.com/go/datastore -Version: v1.0.0 +Dependency : cloud.google.com/go/bigquery +Version: v1.0.1 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/datastore@v1.0.0/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/bigquery@v1.0.1/LICENSE: Apache License @@ -2970,12 +3122,12 @@ Contents of probable licence file $GOMODCACHE/cloud.google.com/go/datastore@v1.0 -------------------------------------------------------------------------------- -Dependency : cloud.google.com/go/pubsub -Version: v1.0.1 +Dependency : cloud.google.com/go/datastore +Version: v1.0.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/pubsub@v1.0.1/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/datastore@v1.0.0/LICENSE: Apache License @@ -3182,12 +3334,12 @@ Contents of probable licence file $GOMODCACHE/cloud.google.com/go/pubsub@v1.0.1/ -------------------------------------------------------------------------------- -Dependency : cloud.google.com/go/storage -Version: v1.0.0 +Dependency : cloud.google.com/go/pubsub +Version: v1.0.1 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/cloud.google.com/go/storage@v1.0.0/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/pubsub@v1.0.1/LICENSE: Apache License @@ -3394,12 +3546,13 @@ Contents of probable licence file $GOMODCACHE/cloud.google.com/go/storage@v1.0.0 -------------------------------------------------------------------------------- -Dependency : code.cloudfoundry.org/go-diodes -Version: v0.0.0-20190809170250-f77fb823c7ee +Dependency : cloud.google.com/go/storage +Version: v1.0.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/code.cloudfoundry.org/go-diodes@v0.0.0-20190809170250-f77fb823c7ee/LICENSE: +Contents of probable licence file $GOMODCACHE/cloud.google.com/go/storage@v1.0.0/LICENSE: + Apache License Version 2.0, January 2004 @@ -3603,10 +3756,221 @@ Contents of probable licence file $GOMODCACHE/code.cloudfoundry.org/go-diodes@v0 See the License for the specific language governing permissions and limitations under the License. + -------------------------------------------------------------------------------- -Dependency : code.cloudfoundry.org/go-loggregator -Version: v7.4.0+incompatible -Licence type (autodetected): Apache-2.0 +Dependency : code.cloudfoundry.org/go-diodes +Version: v0.0.0-20190809170250-f77fb823c7ee +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/code.cloudfoundry.org/go-diodes@v0.0.0-20190809170250-f77fb823c7ee/LICENSE: + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------------------------------------- +Dependency : code.cloudfoundry.org/go-loggregator +Version: v7.4.0+incompatible +Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- Contents of probable licence file $GOMODCACHE/code.cloudfoundry.org/go-loggregator@v7.4.0+incompatible/LICENSE: @@ -20091,329 +20455,42 @@ in accepting such obligations, You may act only on Your own behalf and on Your sole responsibility, not on behalf of any other Contributor, and only if You agree to indemnify, defend, and hold each Contributor harmless for any liability incurred by, or claims asserted against, such Contributor by reason of your -accepting any such warranty or additional liability. - -END OF TERMS AND CONDITIONS - -APPENDIX: How to apply the Apache License to your work - -To apply the Apache License to your work, attach the following boilerplate -notice, with the fields enclosed by brackets "[]" replaced with your own -identifying information. (Don't include the brackets!) The text should be -enclosed in the appropriate comment syntax for the file format. We also -recommend that a file or class name and description of purpose be included on -the same "printed page" as the copyright notice for easier identification within -third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - --------------------------------------------------------------------------------- -Dependency : github.com/golang/mock -Version: v1.3.1 -Licence type (autodetected): Apache-2.0 --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/golang/mock@v1.3.1/LICENSE: - - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - --------------------------------------------------------------------------------- -Dependency : github.com/golang/protobuf -Version: v1.4.2 -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/golang/protobuf@v1.4.2/LICENSE: - -Copyright 2010 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - +accepting any such warranty or additional liability. +END OF TERMS AND CONDITIONS --------------------------------------------------------------------------------- -Dependency : github.com/golang/snappy -Version: v0.0.1 -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- +APPENDIX: How to apply the Apache License to your work -Contents of probable licence file $GOMODCACHE/github.com/golang/snappy@v0.0.1/LICENSE: +To apply the Apache License to your work, attach the following boilerplate +notice, with the fields enclosed by brackets "[]" replaced with your own +identifying information. (Don't include the brackets!) The text should be +enclosed in the appropriate comment syntax for the file format. We also +recommend that a file or class name and description of purpose be included on +the same "printed page" as the copyright notice for easier identification within +third-party archives. -Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + Copyright [yyyy] [name of copyright owner] -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. + http://www.apache.org/licenses/LICENSE-2.0 -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. -------------------------------------------------------------------------------- -Dependency : github.com/google/btree -Version: v1.0.0 +Dependency : github.com/golang/mock +Version: v1.3.1 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/google/btree@v1.0.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/golang/mock@v1.3.1/LICENSE: Apache License @@ -20620,12 +20697,87 @@ Contents of probable licence file $GOMODCACHE/github.com/google/btree@v1.0.0/LIC -------------------------------------------------------------------------------- -Dependency : github.com/google/flatbuffers -Version: v1.7.2-0.20170925184458-7a6b2bf521e9 +Dependency : github.com/golang/protobuf +Version: v1.4.2 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/golang/protobuf@v1.4.2/LICENSE: + +Copyright 2010 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + + +-------------------------------------------------------------------------------- +Dependency : github.com/golang/snappy +Version: v0.0.1 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/golang/snappy@v0.0.1/LICENSE: + +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/google/btree +Version: v1.0.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/google/flatbuffers@v1.7.2-0.20170925184458-7a6b2bf521e9/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/google/btree@v1.0.0/LICENSE: Apache License @@ -20816,7 +20968,7 @@ Contents of probable licence file $GOMODCACHE/github.com/google/flatbuffers@v1.7 same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2014 Google Inc. + Copyright [yyyy] [name of copyright owner] Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -20824,131 +20976,20 @@ Contents of probable licence file $GOMODCACHE/github.com/google/flatbuffers@v1.7 http://www.apache.org/licenses/LICENSE-2.0 - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - --------------------------------------------------------------------------------- -Dependency : github.com/google/go-github/v28 -Version: v28.1.1 -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/google/go-github/v28@v28.1.1/LICENSE: - -Copyright (c) 2013 The go-github AUTHORS. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - --------------------------------------------------------------------------------- -Dependency : github.com/google/go-github/v29 -Version: v29.0.2 -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/google/go-github/v29@v29.0.2/LICENSE: - -Copyright (c) 2013 The go-github AUTHORS. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - --------------------------------------------------------------------------------- -Dependency : github.com/google/go-querystring -Version: v1.0.0 -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/google/go-querystring@v1.0.0/LICENSE: - -Copyright (c) 2013 Google. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + -------------------------------------------------------------------------------- -Dependency : github.com/google/gofuzz -Version: v1.1.0 +Dependency : github.com/google/flatbuffers +Version: v1.7.2-0.20170925184458-7a6b2bf521e9 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/google/gofuzz@v1.1.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/google/flatbuffers@v1.7.2-0.20170925184458-7a6b2bf521e9/LICENSE.txt: Apache License @@ -21139,7 +21180,7 @@ Contents of probable licence file $GOMODCACHE/github.com/google/gofuzz@v1.1.0/LI same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright [yyyy] [name of copyright owner] + Copyright 2014 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. @@ -21155,15 +21196,14 @@ Contents of probable licence file $GOMODCACHE/github.com/google/gofuzz@v1.1.0/LI -------------------------------------------------------------------------------- -Dependency : github.com/google/gopacket -Version: v1.1.18-0.20191009163724-0ad7f2610e34 +Dependency : github.com/google/go-github/v28 +Version: v28.1.1 Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/google/gopacket@v1.1.18-0.20191009163724-0ad7f2610e34/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/google/go-github/v28@v28.1.1/LICENSE: -Copyright (c) 2012 Google, Inc. All rights reserved. -Copyright (c) 2009-2011 Andreas Krennmair. All rights reserved. +Copyright (c) 2013 The go-github AUTHORS. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -21175,7 +21215,7 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - * Neither the name of Andreas Krennmair, Google, nor the names of its + * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. @@ -21193,12 +21233,86 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- -Dependency : github.com/google/licenseclassifier -Version: v0.0.0-20200402202327-879cb1424de0 +Dependency : github.com/google/go-github/v29 +Version: v29.0.2 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/google/go-github/v29@v29.0.2/LICENSE: + +Copyright (c) 2013 The go-github AUTHORS. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/google/go-querystring +Version: v1.0.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/google/go-querystring@v1.0.0/LICENSE: + +Copyright (c) 2013 Google. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/google/gofuzz +Version: v1.1.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/google/licenseclassifier@v0.0.0-20200402202327-879cb1424de0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/google/gofuzz@v1.1.0/LICENSE: Apache License @@ -21405,12 +21519,50 @@ Contents of probable licence file $GOMODCACHE/github.com/google/licenseclassifie -------------------------------------------------------------------------------- -Dependency : github.com/google/martian -Version: v2.1.0+incompatible +Dependency : github.com/google/gopacket +Version: v1.1.18-0.20191009163724-0ad7f2610e34 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/google/gopacket@v1.1.18-0.20191009163724-0ad7f2610e34/LICENSE: + +Copyright (c) 2012 Google, Inc. All rights reserved. +Copyright (c) 2009-2011 Andreas Krennmair. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Andreas Krennmair, Google, nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/google/licenseclassifier +Version: v0.0.0-20200402202327-879cb1424de0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/google/martian@v2.1.0+incompatible/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/google/licenseclassifier@v0.0.0-20200402202327-879cb1424de0/LICENSE: Apache License @@ -21617,12 +21769,12 @@ Contents of probable licence file $GOMODCACHE/github.com/google/martian@v2.1.0+i -------------------------------------------------------------------------------- -Dependency : github.com/google/pprof -Version: v0.0.0-20191218002539-d4f498aebedc +Dependency : github.com/google/martian +Version: v2.1.0+incompatible Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/google/pprof@v0.0.0-20191218002539-d4f498aebedc/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/google/martian@v2.1.0+incompatible/LICENSE: Apache License @@ -21829,12 +21981,12 @@ Contents of probable licence file $GOMODCACHE/github.com/google/pprof@v0.0.0-201 -------------------------------------------------------------------------------- -Dependency : github.com/google/renameio -Version: v0.1.0 +Dependency : github.com/google/pprof +Version: v0.0.0-20191218002539-d4f498aebedc Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/google/renameio@v0.1.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/google/pprof@v0.0.0-20191218002539-d4f498aebedc/LICENSE: Apache License @@ -22041,12 +22193,12 @@ Contents of probable licence file $GOMODCACHE/github.com/google/renameio@v0.1.0/ -------------------------------------------------------------------------------- -Dependency : github.com/google/subcommands -Version: v1.0.1 +Dependency : github.com/google/renameio +Version: v0.1.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/google/subcommands@v1.0.1/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/google/renameio@v0.1.0/LICENSE: Apache License @@ -22235,104 +22387,30 @@ Contents of probable licence file $GOMODCACHE/github.com/google/subcommands@v1.0 comment syntax for the file format. We also recommend that a file or class name and description of purpose be included on the same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - - --------------------------------------------------------------------------------- -Dependency : github.com/google/uuid -Version: v1.1.2-0.20190416172445-c2e93f3ae59f -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/google/uuid@v1.1.2-0.20190416172445-c2e93f3ae59f/LICENSE: - -Copyright (c) 2009,2014 Google Inc. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - --------------------------------------------------------------------------------- -Dependency : github.com/googleapis/gax-go/v2 -Version: v2.0.5 -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/googleapis/gax-go/v2@v2.0.5/LICENSE: - -Copyright 2016, Google Inc. -All rights reserved. -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. -------------------------------------------------------------------------------- -Dependency : github.com/googleapis/gnostic -Version: v0.4.1 +Dependency : github.com/google/subcommands +Version: v1.0.1 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/googleapis/gnostic@v0.4.1/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/google/subcommands@v1.0.1/LICENSE: Apache License @@ -22538,16 +22616,15 @@ Contents of probable licence file $GOMODCACHE/github.com/googleapis/gnostic@v0.4 limitations under the License. - -------------------------------------------------------------------------------- -Dependency : github.com/gopherjs/gopherjs -Version: v0.0.0-20181017120253-0766667cb4d1 -Licence type (autodetected): BSD-2-Clause +Dependency : github.com/google/uuid +Version: v1.1.2-0.20190416172445-c2e93f3ae59f +Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/gopherjs/gopherjs@v0.0.0-20181017120253-0766667cb4d1/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/google/uuid@v1.1.2-0.20190416172445-c2e93f3ae59f/LICENSE: -Copyright (c) 2013 Richard Musiol. All rights reserved. +Copyright (c) 2009,2014 Google Inc. All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are @@ -22559,586 +22636,478 @@ notice, this list of conditions and the following disclaimer. copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - --------------------------------------------------------------------------------- -Dependency : github.com/gorhill/cronexpr -Version: v0.0.0-20161205141322-d520615e531a -Licence type (autodetected): Apache-2.0 --------------------------------------------------------------------------------- - -No licence file provided. - --------------------------------------------------------------------------------- -Dependency : github.com/gorilla/mux -Version: v1.7.2 -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/gorilla/mux@v1.7.2/LICENSE: - -Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its + * Neither the name of Google Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - --------------------------------------------------------------------------------- -Dependency : github.com/gorilla/websocket -Version: v1.4.1 -Licence type (autodetected): BSD-2-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/gorilla/websocket@v1.4.1/LICENSE: - -Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - - Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - - Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - --------------------------------------------------------------------------------- -Dependency : github.com/gregjones/httpcache -Version: v0.0.0-20180305231024-9cad4c3443a7 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/gregjones/httpcache@v0.0.0-20180305231024-9cad4c3443a7/LICENSE.txt: - -Copyright © 2012 Greg Jones (greg.jones@gmail.com) - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - --------------------------------------------------------------------------------- -Dependency : github.com/grpc-ecosystem/grpc-gateway -Version: v1.13.0 -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/grpc-ecosystem/grpc-gateway@v1.13.0/LICENSE.txt: - -Copyright (c) 2015, Gengo, Inc. -All rights reserved. - -Redistribution and use in source and binary forms, with or without modification, -are permitted provided that the following conditions are met: - - * Redistributions of source code must retain the above copyright notice, - this list of conditions and the following disclaimer. - - * Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - - * Neither the name of Gengo, Inc. nor the names of its - contributors may be used to endorse or promote products derived from this - software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR -ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON -ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - --------------------------------------------------------------------------------- -Dependency : github.com/h2non/filetype -Version: v1.1.1-0.20201130172452-f60988ab73d5 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/h2non/filetype@v1.1.1-0.20201130172452-f60988ab73d5/LICENSE: - -The MIT License - -Copyright (c) Tomas Aparicio - -Permission is hereby granted, free of charge, to any person -obtaining a copy of this software and associated documentation -files (the "Software"), to deal in the Software without -restriction, including without limitation the rights to use, -copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the -Software is furnished to do so, subject to the following -conditions: - -The above copyright notice and this permission notice shall be -included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -OTHER DEALINGS IN THE SOFTWARE. - - --------------------------------------------------------------------------------- -Dependency : github.com/hashicorp/errwrap -Version: v1.0.0 -Licence type (autodetected): MPL-2.0 --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/hashicorp/errwrap@v1.0.0/LICENSE: - -Mozilla Public License, version 2.0 - -1. Definitions - -1.1. “Contributor” - - means each individual or legal entity that creates, contributes to the - creation of, or owns Covered Software. - -1.2. “Contributor Version” - - means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. - -1.3. “Contribution” - - means Covered Software of a particular Contributor. - -1.4. “Covered Software” - - means Source Code Form to which the initial Contributor has attached the - notice in Exhibit A, the Executable Form of such Source Code Form, and - Modifications of such Source Code Form, in each case including portions - thereof. - -1.5. “Incompatible With Secondary Licenses” - means - - a. that the initial Contributor has attached the notice described in - Exhibit B to the Covered Software; or - - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. - -1.6. “Executable Form” - - means any form of the work other than Source Code Form. - -1.7. “Larger Work” +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. -1.8. “License” +-------------------------------------------------------------------------------- +Dependency : github.com/googleapis/gax-go/v2 +Version: v2.0.5 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- - means this document. +Contents of probable licence file $GOMODCACHE/github.com/googleapis/gax-go/v2@v2.0.5/LICENSE: -1.9. “Licensable” +Copyright 2016, Google Inc. +All rights reserved. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. -1.10. “Modifications” +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - means any of the following: - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or +-------------------------------------------------------------------------------- +Dependency : github.com/googleapis/gnostic +Version: v0.4.1 +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- - b. any new file in Source Code Form that contains any Covered Software. +Contents of probable licence file $GOMODCACHE/github.com/googleapis/gnostic@v0.4.1/LICENSE: -1.11. “Patent Claims” of a Contributor - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ -1.12. “Secondary License” + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - means either the GNU General Public License, Version 2.0, the GNU Lesser - General Public License, Version 2.1, the GNU Affero General Public - License, Version 3.0, or any later versions of those licenses. + 1. Definitions. -1.13. “Source Code Form” + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. - means the form of the work preferred for making modifications. + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. -1.14. “You” (or “Your”) + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. - means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is - controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause - the direction or management of such entity, whether by contract or - otherwise, or (b) ownership of more than fifty percent (50%) of the - outstanding shares or beneficial ownership of such entity. + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. -2. License Grants and Conditions + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. -2.1. Grants + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). - Each Contributor hereby grants You a world-wide, royalty-free, - non-exclusive license: + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. - a. under intellectual property rights (other than patent or trademark) - Licensable by such Contributor to use, reproduce, make available, - modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." - b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. -2.2. Effective Date + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. -2.3. Limitations on Grant Scope + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and - a. for any code that a Contributor has removed from Covered Software; or + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and - b. for infringements caused by: (i) Your and any other third party’s - modifications of Covered Software, or (ii) the combination of its - Contributions with other software (except as part of its Contributor - Version); or + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. -2.4. Subsequent Licenses + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. - No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. -2.5. Representation + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. -2.6. Fair Use + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. + END OF TERMS AND CONDITIONS -2.7. Conditions + APPENDIX: How to apply the Apache License to your work. - Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in - Section 2.1. + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + Copyright [yyyy] [name of copyright owner] -3. Responsibilities + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at -3.1. Distribution of Source Form + http://www.apache.org/licenses/LICENSE-2.0 - All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. -3.2. Distribution of Executable Form - If You distribute Covered Software in Executable Form then: - a. such Covered Software must also be made available in Source Code Form, - as described in Section 3.1, and You must inform recipients of the - Executable Form how they can obtain a copy of such Source Code Form by - reasonable means in a timely manner, at a charge no more than the cost - of distribution to the recipient; and +-------------------------------------------------------------------------------- +Dependency : github.com/gopherjs/gopherjs +Version: v0.0.0-20181017120253-0766667cb4d1 +Licence type (autodetected): BSD-2-Clause +-------------------------------------------------------------------------------- - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. +Contents of probable licence file $GOMODCACHE/github.com/gopherjs/gopherjs@v0.0.0-20181017120253-0766667cb4d1/LICENSE: -3.3. Distribution of a Larger Work +Copyright (c) 2013 Richard Musiol. All rights reserved. - You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: -3.4. Notices + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -3.5. Application of Additional Terms - You may choose to offer, and to charge a fee for, warranty, support, - indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any - liability incurred by such Contributor as a result of warranty, support, - indemnity or liability terms You offer. You may include additional - disclaimers of warranty and limitations of liability specific to any - jurisdiction. +-------------------------------------------------------------------------------- +Dependency : github.com/gorhill/cronexpr +Version: v0.0.0-20161205141322-d520615e531a +Licence type (autodetected): Apache-2.0 +-------------------------------------------------------------------------------- -4. Inability to Comply Due to Statute or Regulation +No licence file provided. - If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. +-------------------------------------------------------------------------------- +Dependency : github.com/gorilla/mux +Version: v1.7.2 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- -5. Termination +Contents of probable licence file $GOMODCACHE/github.com/gorilla/mux@v1.7.2/LICENSE: -5.1. The rights granted under this License will terminate automatically if You - fail to comply with any of its terms. However, if You become compliant, - then the rights granted under this License from a particular Contributor - are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. +Copyright (c) 2012-2018 The Gorilla Authors. All rights reserved. -5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: -5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user - license agreements (excluding distributors and resellers) which have been - validly granted by You or Your distributors under this License prior to - termination shall survive termination. + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. -6. Disclaimer of Warranty +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. -7. Limitation of Liability +-------------------------------------------------------------------------------- +Dependency : github.com/gorilla/websocket +Version: v1.4.1 +Licence type (autodetected): BSD-2-Clause +-------------------------------------------------------------------------------- - Under no circumstances and under no legal theory, whether tort (including - negligence), contract, or otherwise, shall any Contributor, or anyone who - distributes Covered Software as permitted above, be liable to You for any - direct, indirect, special, incidental, or consequential damages of any - character including, without limitation, damages for lost profits, loss of - goodwill, work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses, even if such party shall have been - informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. +Contents of probable licence file $GOMODCACHE/github.com/gorilla/websocket@v1.4.1/LICENSE: -8. Litigation +Copyright (c) 2013 The Gorilla WebSocket Authors. All rights reserved. - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: -9. Miscellaneous + Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. + Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -10. Versions of the License -10.1. New Versions +-------------------------------------------------------------------------------- +Dependency : github.com/gregjones/httpcache +Version: v0.0.0-20180305231024-9cad4c3443a7 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- - Mozilla Foundation is the license steward. Except as provided in Section - 10.3, no one other than the license steward has the right to modify or - publish new versions of this License. Each version will be given a - distinguishing version number. +Contents of probable licence file $GOMODCACHE/github.com/gregjones/httpcache@v0.0.0-20180305231024-9cad4c3443a7/LICENSE.txt: -10.2. Effect of New Versions +Copyright © 2012 Greg Jones (greg.jones@gmail.com) - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license - steward. +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: -10.3. Modified Versions +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). +THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. +-------------------------------------------------------------------------------- +Dependency : github.com/grpc-ecosystem/grpc-gateway +Version: v1.13.0 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- -Exhibit A - Source Code Form License Notice +Contents of probable licence file $GOMODCACHE/github.com/grpc-ecosystem/grpc-gateway@v1.13.0/LICENSE.txt: - This Source Code Form is subject to the - terms of the Mozilla Public License, v. - 2.0. If a copy of the MPL was not - distributed with this file, You can - obtain one at - http://mozilla.org/MPL/2.0/. +Copyright (c) 2015, Gengo, Inc. +All rights reserved. -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: -You may add additional accurate notices of copyright ownership. + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. -Exhibit B - “Incompatible With Secondary Licenses” Notice + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by - the Mozilla Public License, v. 2.0. + * Neither the name of Gengo, Inc. nor the names of its + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- -Dependency : github.com/hashicorp/go-hclog -Version: v0.9.2 +Dependency : github.com/h2non/filetype +Version: v1.1.1-0.20201130172452-f60988ab73d5 Licence type (autodetected): MIT -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/hashicorp/go-hclog@v0.9.2/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/h2non/filetype@v1.1.1-0.20201130172452-f60988ab73d5/LICENSE: -MIT License +The MIT License -Copyright (c) 2017 HashiCorp +Copyright (c) Tomas Aparicio -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: +Permission is hereby granted, free of charge, to any person +obtaining a copy of this software and associated documentation +files (the "Software"), to deal in the Software without +restriction, including without limitation the rights to use, +copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the +Software is furnished to do so, subject to the following +conditions: -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES +OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT +HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING +FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------------- -Dependency : github.com/hashicorp/go-multierror -Version: v1.1.0 +Dependency : github.com/hashicorp/errwrap +Version: v1.0.0 Licence type (autodetected): MPL-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/hashicorp/go-multierror@v1.1.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/hashicorp/errwrap@v1.0.0/LICENSE: Mozilla Public License, version 2.0 @@ -23495,101 +23464,133 @@ Exhibit B - “Incompatible With Secondary Licenses” Notice the Mozilla Public License, v. 2.0. + -------------------------------------------------------------------------------- -Dependency : github.com/hashicorp/go-retryablehttp -Version: v0.6.6 +Dependency : github.com/hashicorp/go-hclog +Version: v0.9.2 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/hashicorp/go-hclog@v0.9.2/LICENSE: + +MIT License + +Copyright (c) 2017 HashiCorp + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + +-------------------------------------------------------------------------------- +Dependency : github.com/hashicorp/go-multierror +Version: v1.1.0 Licence type (autodetected): MPL-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/hashicorp/go-retryablehttp@v0.6.6/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/hashicorp/go-multierror@v1.1.0/LICENSE: Mozilla Public License, version 2.0 1. Definitions -1.1. "Contributor" +1.1. “Contributor” means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software. -1.2. "Contributor Version" +1.2. “Contributor Version” means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor's Contribution. + Contributor and that particular Contributor’s Contribution. -1.3. "Contribution" +1.3. “Contribution” means Covered Software of a particular Contributor. -1.4. "Covered Software" +1.4. “Covered Software” means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof. -1.5. "Incompatible With Secondary Licenses" +1.5. “Incompatible With Secondary Licenses” means a. that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or - b. that the Covered Software was made available under the terms of - version 1.1 or earlier of the License, but not also under the terms of - a Secondary License. + b. that the Covered Software was made available under the terms of version + 1.1 or earlier of the License, but not also under the terms of a + Secondary License. -1.6. "Executable Form" +1.6. “Executable Form” means any form of the work other than Source Code Form. -1.7. "Larger Work" +1.7. “Larger Work” - means a work that combines Covered Software with other material, in a - separate file or files, that is not Covered Software. + means a work that combines Covered Software with other material, in a separate + file or files, that is not Covered Software. -1.8. "License" +1.8. “License” means this document. -1.9. "Licensable" +1.9. “Licensable” - means having the right to grant, to the maximum extent possible, whether - at the time of the initial grant or subsequently, any and all of the - rights conveyed by this License. + means having the right to grant, to the maximum extent possible, whether at the + time of the initial grant or subsequently, any and all of the rights conveyed by + this License. -1.10. "Modifications" +1.10. “Modifications” means any of the following: - a. any file in Source Code Form that results from an addition to, - deletion from, or modification of the contents of Covered Software; or + a. any file in Source Code Form that results from an addition to, deletion + from, or modification of the contents of Covered Software; or b. any new file in Source Code Form that contains any Covered Software. -1.11. "Patent Claims" of a Contributor +1.11. “Patent Claims” of a Contributor - means any patent claim(s), including without limitation, method, - process, and apparatus claims, in any patent Licensable by such - Contributor that would be infringed, but for the grant of the License, - by the making, using, selling, offering for sale, having made, import, - or transfer of either its Contributions or its Contributor Version. + means any patent claim(s), including without limitation, method, process, + and apparatus claims, in any patent Licensable by such Contributor that + would be infringed, but for the grant of the License, by the making, + using, selling, offering for sale, having made, import, or transfer of + either its Contributions or its Contributor Version. -1.12. "Secondary License" +1.12. “Secondary License” means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses. -1.13. "Source Code Form" +1.13. “Source Code Form” means the form of the work preferred for making modifications. -1.14. "You" (or "Your") +1.14. “You” (or “Your”) means an individual or a legal entity exercising rights under this - License. For legal entities, "You" includes any entity that controls, is + License. For legal entities, “You” includes any entity that controls, is controlled by, or is under common control with You. For purposes of this - definition, "control" means (a) the power, direct or indirect, to cause + definition, “control” means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. @@ -23605,59 +23606,57 @@ Mozilla Public License, version 2.0 a. under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or - as part of a Larger Work; and + Contributions, either on an unmodified basis, with Modifications, or as + part of a Larger Work; and b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its - Contributions or its Contributor Version. + sale, have made, import, and otherwise transfer either its Contributions + or its Contributor Version. 2.2. Effective Date - The licenses granted in Section 2.1 with respect to any Contribution - become effective for each Contribution on the date the Contributor first - distributes such Contribution. + The licenses granted in Section 2.1 with respect to any Contribution become + effective for each Contribution on the date the Contributor first distributes + such Contribution. 2.3. Limitations on Grant Scope - The licenses granted in this Section 2 are the only rights granted under - this License. No additional rights or licenses will be implied from the - distribution or licensing of Covered Software under this License. - Notwithstanding Section 2.1(b) above, no patent license is granted by a - Contributor: + The licenses granted in this Section 2 are the only rights granted under this + License. No additional rights or licenses will be implied from the distribution + or licensing of Covered Software under this License. Notwithstanding Section + 2.1(b) above, no patent license is granted by a Contributor: a. for any code that a Contributor has removed from Covered Software; or - b. for infringements caused by: (i) Your and any other third party's + b. for infringements caused by: (i) Your and any other third party’s modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or - c. under Patent Claims infringed by Covered Software in the absence of - its Contributions. + c. under Patent Claims infringed by Covered Software in the absence of its + Contributions. - This License does not grant any rights in the trademarks, service marks, - or logos of any Contributor (except as may be necessary to comply with - the notice requirements in Section 3.4). + This License does not grant any rights in the trademarks, service marks, or + logos of any Contributor (except as may be necessary to comply with the + notice requirements in Section 3.4). 2.4. Subsequent Licenses No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this - License (see Section 10.2) or under the terms of a Secondary License (if - permitted under the terms of Section 3.3). + distribute the Covered Software under a subsequent version of this License + (see Section 10.2) or under the terms of a Secondary License (if permitted + under the terms of Section 3.3). 2.5. Representation - Each Contributor represents that the Contributor believes its - Contributions are its original creation(s) or it has sufficient rights to - grant the rights to its Contributions conveyed by this License. + Each Contributor represents that the Contributor believes its Contributions + are its original creation(s) or it has sufficient rights to grant the + rights to its Contributions conveyed by this License. 2.6. Fair Use - This License is not intended to limit any rights You have under - applicable copyright doctrines of fair use, fair dealing, or other - equivalents. + This License is not intended to limit any rights You have under applicable + copyright doctrines of fair use, fair dealing, or other equivalents. 2.7. Conditions @@ -23670,12 +23669,11 @@ Mozilla Public License, version 2.0 3.1. Distribution of Source Form All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under - the terms of this License. You must inform recipients that the Source - Code Form of the Covered Software is governed by the terms of this - License, and how they can obtain a copy of this License. You may not - attempt to alter or restrict the recipients' rights in the Source Code - Form. + Modifications that You create or to which You contribute, must be under the + terms of this License. You must inform recipients that the Source Code Form + of the Covered Software is governed by the terms of this License, and how + they can obtain a copy of this License. You may not attempt to alter or + restrict the recipients’ rights in the Source Code Form. 3.2. Distribution of Executable Form @@ -23687,40 +23685,39 @@ Mozilla Public License, version 2.0 reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and - b. You may distribute such Executable Form under the terms of this - License, or sublicense it under different terms, provided that the - license for the Executable Form does not attempt to limit or alter the - recipients' rights in the Source Code Form under this License. + b. You may distribute such Executable Form under the terms of this License, + or sublicense it under different terms, provided that the license for + the Executable Form does not attempt to limit or alter the recipients’ + rights in the Source Code Form under this License. 3.3. Distribution of a Larger Work You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for - the Covered Software. If the Larger Work is a combination of Covered - Software with a work governed by one or more Secondary Licenses, and the - Covered Software is not Incompatible With Secondary Licenses, this - License permits You to additionally distribute such Covered Software - under the terms of such Secondary License(s), so that the recipient of - the Larger Work may, at their option, further distribute the Covered - Software under the terms of either this License or such Secondary - License(s). + provided that You also comply with the requirements of this License for the + Covered Software. If the Larger Work is a combination of Covered Software + with a work governed by one or more Secondary Licenses, and the Covered + Software is not Incompatible With Secondary Licenses, this License permits + You to additionally distribute such Covered Software under the terms of + such Secondary License(s), so that the recipient of the Larger Work may, at + their option, further distribute the Covered Software under the terms of + either this License or such Secondary License(s). 3.4. Notices - You may not remove or alter the substance of any license notices - (including copyright notices, patent notices, disclaimers of warranty, or - limitations of liability) contained within the Source Code Form of the - Covered Software, except that You may alter any license notices to the - extent required to remedy known factual inaccuracies. + You may not remove or alter the substance of any license notices (including + copyright notices, patent notices, disclaimers of warranty, or limitations + of liability) contained within the Source Code Form of the Covered + Software, except that You may alter any license notices to the extent + required to remedy known factual inaccuracies. 3.5. Application of Additional Terms You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on - behalf of any Contributor. You must make it absolutely clear that any - such warranty, support, indemnity, or liability obligation is offered by - You alone, and You hereby agree to indemnify every Contributor for any + Software. However, You may do so only on Your own behalf, and not on behalf + of any Contributor. You must make it absolutely clear that any such + warranty, support, indemnity, or liability obligation is offered by You + alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any @@ -23729,14 +23726,14 @@ Mozilla Public License, version 2.0 4. Inability to Comply Due to Statute or Regulation If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, - judicial order, or regulation then You must: (a) comply with the terms of - this License to the maximum extent possible; and (b) describe the - limitations and the code they affect. Such description must be placed in a - text file included with all distributions of the Covered Software under - this License. Except to the extent prohibited by statute or regulation, - such description must be sufficiently detailed for a recipient of ordinary - skill to be able to understand it. + with respect to some or all of the Covered Software due to statute, judicial + order, or regulation then You must: (a) comply with the terms of this License + to the maximum extent possible; and (b) describe the limitations and the code + they affect. Such description must be placed in a text file included with all + distributions of the Covered Software under this License. Except to the + extent prohibited by statute or regulation, such description must be + sufficiently detailed for a recipient of ordinary skill to be able to + understand it. 5. Termination @@ -23744,22 +23741,21 @@ Mozilla Public License, version 2.0 fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing - basis, if such Contributor fails to notify You of the non-compliance by - some reasonable means prior to 60 days after You have come back into - compliance. Moreover, Your grants from a particular Contributor are - reinstated on an ongoing basis if such Contributor notifies You of the - non-compliance by some reasonable means, this is the first time You have - received notice of non-compliance with this License from such - Contributor, and You become compliant prior to 30 days after Your receipt - of the notice. + explicitly and finally terminates Your grants, and (b) on an ongoing basis, + if such Contributor fails to notify You of the non-compliance by some + reasonable means prior to 60 days after You have come back into compliance. + Moreover, Your grants from a particular Contributor are reinstated on an + ongoing basis if such Contributor notifies You of the non-compliance by + some reasonable means, this is the first time You have received notice of + non-compliance with this License from such Contributor, and You become + compliant prior to 30 days after Your receipt of the notice. 5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, - counter-claims, and cross-claims) alleging that a Contributor Version - directly or indirectly infringes any patent, then the rights granted to - You by any and all Contributors for the Covered Software under Section - 2.1 of this License shall terminate. + infringement claim (excluding declaratory judgment actions, counter-claims, + and cross-claims) alleging that a Contributor Version directly or + indirectly infringes any patent, then the rights granted to You by any and + all Contributors for the Covered Software under Section 2.1 of this License + shall terminate. 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been @@ -23768,16 +23764,16 @@ Mozilla Public License, version 2.0 6. Disclaimer of Warranty - Covered Software is provided under this License on an "as is" basis, - without warranty of any kind, either expressed, implied, or statutory, - including, without limitation, warranties that the Covered Software is free - of defects, merchantable, fit for a particular purpose or non-infringing. - The entire risk as to the quality and performance of the Covered Software - is with You. Should any Covered Software prove defective in any respect, - You (not any Contributor) assume the cost of any necessary servicing, - repair, or correction. This disclaimer of warranty constitutes an essential - part of this License. No use of any Covered Software is authorized under - this License except under this disclaimer. + Covered Software is provided under this License on an “as is” basis, without + warranty of any kind, either expressed, implied, or statutory, including, + without limitation, warranties that the Covered Software is free of defects, + merchantable, fit for a particular purpose or non-infringing. The entire + risk as to the quality and performance of the Covered Software is with You. + Should any Covered Software prove defective in any respect, You (not any + Contributor) assume the cost of any necessary servicing, repair, or + correction. This disclaimer of warranty constitutes an essential part of this + License. No use of any Covered Software is authorized under this License + except under this disclaimer. 7. Limitation of Liability @@ -23789,29 +23785,27 @@ Mozilla Public License, version 2.0 goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses, even if such party shall have been informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from - such party's negligence to the extent applicable law prohibits such - limitation. Some jurisdictions do not allow the exclusion or limitation of - incidental or consequential damages, so this exclusion and limitation may - not apply to You. + shall not apply to liability for death or personal injury resulting from such + party’s negligence to the extent applicable law prohibits such limitation. + Some jurisdictions do not allow the exclusion or limitation of incidental or + consequential damages, so this exclusion and limitation may not apply to You. 8. Litigation - Any litigation relating to this License may be brought only in the courts - of a jurisdiction where the defendant maintains its principal place of - business and such litigation shall be governed by laws of that - jurisdiction, without reference to its conflict-of-law provisions. Nothing - in this Section shall prevent a party's ability to bring cross-claims or - counter-claims. + Any litigation relating to this License may be brought only in the courts of + a jurisdiction where the defendant maintains its principal place of business + and such litigation shall be governed by laws of that jurisdiction, without + reference to its conflict-of-law provisions. Nothing in this Section shall + prevent a party’s ability to bring cross-claims or counter-claims. 9. Miscellaneous - This License represents the complete agreement concerning the subject - matter hereof. If any provision of this License is held to be - unenforceable, such provision shall be reformed only to the extent - necessary to make it enforceable. Any law or regulation which provides that - the language of a contract shall be construed against the drafter shall not - be used to construe this License against a Contributor. + This License represents the complete agreement concerning the subject matter + hereof. If any provision of this License is held to be unenforceable, such + provision shall be reformed only to the extent necessary to make it + enforceable. Any law or regulation which provides that the language of a + contract shall be construed against the drafter shall not be used to construe + this License against a Contributor. 10. Versions of the License @@ -23825,24 +23819,23 @@ Mozilla Public License, version 2.0 10.2. Effect of New Versions - You may distribute the Covered Software under the terms of the version - of the License under which You originally received the Covered Software, - or under the terms of any subsequent version published by the license + You may distribute the Covered Software under the terms of the version of + the License under which You originally received the Covered Software, or + under the terms of any subsequent version published by the license steward. 10.3. Modified Versions If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a - modified version of this License if you rename the license and remove - any references to the name of the license steward (except to note that - such modified license differs from this License). + create a new license for such software, you may create and use a modified + version of this License if you rename the license and remove any + references to the name of the license steward (except to note that such + modified license differs from this License). -10.4. Distributing Source Code Form that is Incompatible With Secondary - Licenses If You choose to distribute Source Code Form that is - Incompatible With Secondary Licenses under the terms of this version of - the License, the notice described in Exhibit B of this License must be - attached. +10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses + If You choose to distribute Source Code Form that is Incompatible With + Secondary Licenses under the terms of this version of the License, the + notice described in Exhibit B of this License must be attached. Exhibit A - Source Code Form License Notice @@ -23853,28 +23846,26 @@ Exhibit A - Source Code Form License Notice obtain one at http://mozilla.org/MPL/2.0/. -If it is not possible or desirable to put the notice in a particular file, -then You may include the notice in a location (such as a LICENSE file in a -relevant directory) where a recipient would be likely to look for such a -notice. +If it is not possible or desirable to put the notice in a particular file, then +You may include the notice in a location (such as a LICENSE file in a relevant +directory) where a recipient would be likely to look for such a notice. You may add additional accurate notices of copyright ownership. -Exhibit B - "Incompatible With Secondary Licenses" Notice +Exhibit B - “Incompatible With Secondary Licenses” Notice - This Source Code Form is "Incompatible - With Secondary Licenses", as defined by + This Source Code Form is “Incompatible + With Secondary Licenses”, as defined by the Mozilla Public License, v. 2.0. - -------------------------------------------------------------------------------- -Dependency : github.com/hashicorp/go-uuid -Version: v1.0.2 +Dependency : github.com/hashicorp/go-retryablehttp +Version: v0.6.6 Licence type (autodetected): MPL-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/hashicorp/go-uuid@v1.0.2/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/hashicorp/go-retryablehttp@v0.6.6/LICENSE: Mozilla Public License, version 2.0 @@ -24242,100 +24233,100 @@ Exhibit B - "Incompatible With Secondary Licenses" Notice -------------------------------------------------------------------------------- -Dependency : github.com/hashicorp/go-version -Version: v1.0.0 +Dependency : github.com/hashicorp/go-uuid +Version: v1.0.2 Licence type (autodetected): MPL-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/hashicorp/go-version@v1.0.0/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/hashicorp/go-uuid@v1.0.2/LICENSE: Mozilla Public License, version 2.0 1. Definitions -1.1. “Contributor” +1.1. "Contributor" means each individual or legal entity that creates, contributes to the creation of, or owns Covered Software. -1.2. “Contributor Version” +1.2. "Contributor Version" means the combination of the Contributions of others (if any) used by a - Contributor and that particular Contributor’s Contribution. + Contributor and that particular Contributor's Contribution. -1.3. “Contribution” +1.3. "Contribution" means Covered Software of a particular Contributor. -1.4. “Covered Software” +1.4. "Covered Software" means Source Code Form to which the initial Contributor has attached the notice in Exhibit A, the Executable Form of such Source Code Form, and Modifications of such Source Code Form, in each case including portions thereof. -1.5. “Incompatible With Secondary Licenses” +1.5. "Incompatible With Secondary Licenses" means a. that the initial Contributor has attached the notice described in Exhibit B to the Covered Software; or - b. that the Covered Software was made available under the terms of version - 1.1 or earlier of the License, but not also under the terms of a - Secondary License. + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. -1.6. “Executable Form” +1.6. "Executable Form" means any form of the work other than Source Code Form. -1.7. “Larger Work” +1.7. "Larger Work" - means a work that combines Covered Software with other material, in a separate - file or files, that is not Covered Software. + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. -1.8. “License” +1.8. "License" means this document. -1.9. “Licensable” +1.9. "Licensable" - means having the right to grant, to the maximum extent possible, whether at the - time of the initial grant or subsequently, any and all of the rights conveyed by - this License. + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. -1.10. “Modifications” +1.10. "Modifications" means any of the following: - a. any file in Source Code Form that results from an addition to, deletion - from, or modification of the contents of Covered Software; or - - b. any new file in Source Code Form that contains any Covered Software. - -1.11. “Patent Claims” of a Contributor + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or - means any patent claim(s), including without limitation, method, process, - and apparatus claims, in any patent Licensable by such Contributor that - would be infringed, but for the grant of the License, by the making, - using, selling, offering for sale, having made, import, or transfer of - either its Contributions or its Contributor Version. + b. any new file in Source Code Form that contains any Covered Software. -1.12. “Secondary License” +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" means either the GNU General Public License, Version 2.0, the GNU Lesser General Public License, Version 2.1, the GNU Affero General Public License, Version 3.0, or any later versions of those licenses. -1.13. “Source Code Form” +1.13. "Source Code Form" means the form of the work preferred for making modifications. -1.14. “You” (or “Your”) +1.14. "You" (or "Your") means an individual or a legal entity exercising rights under this - License. For legal entities, “You” includes any entity that controls, is + License. For legal entities, "You" includes any entity that controls, is controlled by, or is under common control with You. For purposes of this - definition, “control” means (a) the power, direct or indirect, to cause + definition, "control" means (a) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (b) ownership of more than fifty percent (50%) of the outstanding shares or beneficial ownership of such entity. @@ -24351,57 +24342,59 @@ Mozilla Public License, version 2.0 a. under intellectual property rights (other than patent or trademark) Licensable by such Contributor to use, reproduce, make available, modify, display, perform, distribute, and otherwise exploit its - Contributions, either on an unmodified basis, with Modifications, or as - part of a Larger Work; and + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and b. under Patent Claims of such Contributor to make, use, sell, offer for - sale, have made, import, and otherwise transfer either its Contributions - or its Contributor Version. + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. 2.2. Effective Date - The licenses granted in Section 2.1 with respect to any Contribution become - effective for each Contribution on the date the Contributor first distributes - such Contribution. + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. 2.3. Limitations on Grant Scope - The licenses granted in this Section 2 are the only rights granted under this - License. No additional rights or licenses will be implied from the distribution - or licensing of Covered Software under this License. Notwithstanding Section - 2.1(b) above, no patent license is granted by a Contributor: + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: a. for any code that a Contributor has removed from Covered Software; or - b. for infringements caused by: (i) Your and any other third party’s + b. for infringements caused by: (i) Your and any other third party's modifications of Covered Software, or (ii) the combination of its Contributions with other software (except as part of its Contributor Version); or - c. under Patent Claims infringed by Covered Software in the absence of its - Contributions. + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. - This License does not grant any rights in the trademarks, service marks, or - logos of any Contributor (except as may be necessary to comply with the - notice requirements in Section 3.4). + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). 2.4. Subsequent Licenses No Contributor makes additional grants as a result of Your choice to - distribute the Covered Software under a subsequent version of this License - (see Section 10.2) or under the terms of a Secondary License (if permitted - under the terms of Section 3.3). + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). 2.5. Representation - Each Contributor represents that the Contributor believes its Contributions - are its original creation(s) or it has sufficient rights to grant the - rights to its Contributions conveyed by this License. + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. 2.6. Fair Use - This License is not intended to limit any rights You have under applicable - copyright doctrines of fair use, fair dealing, or other equivalents. + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. 2.7. Conditions @@ -24414,11 +24407,12 @@ Mozilla Public License, version 2.0 3.1. Distribution of Source Form All distribution of Covered Software in Source Code Form, including any - Modifications that You create or to which You contribute, must be under the - terms of this License. You must inform recipients that the Source Code Form - of the Covered Software is governed by the terms of this License, and how - they can obtain a copy of this License. You may not attempt to alter or - restrict the recipients’ rights in the Source Code Form. + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. 3.2. Distribution of Executable Form @@ -24430,39 +24424,40 @@ Mozilla Public License, version 2.0 reasonable means in a timely manner, at a charge no more than the cost of distribution to the recipient; and - b. You may distribute such Executable Form under the terms of this License, - or sublicense it under different terms, provided that the license for - the Executable Form does not attempt to limit or alter the recipients’ - rights in the Source Code Form under this License. + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. 3.3. Distribution of a Larger Work You may create and distribute a Larger Work under terms of Your choice, - provided that You also comply with the requirements of this License for the - Covered Software. If the Larger Work is a combination of Covered Software - with a work governed by one or more Secondary Licenses, and the Covered - Software is not Incompatible With Secondary Licenses, this License permits - You to additionally distribute such Covered Software under the terms of - such Secondary License(s), so that the recipient of the Larger Work may, at - their option, further distribute the Covered Software under the terms of - either this License or such Secondary License(s). + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). 3.4. Notices - You may not remove or alter the substance of any license notices (including - copyright notices, patent notices, disclaimers of warranty, or limitations - of liability) contained within the Source Code Form of the Covered - Software, except that You may alter any license notices to the extent - required to remedy known factual inaccuracies. + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. 3.5. Application of Additional Terms You may choose to offer, and to charge a fee for, warranty, support, indemnity or liability obligations to one or more recipients of Covered - Software. However, You may do so only on Your own behalf, and not on behalf - of any Contributor. You must make it absolutely clear that any such - warranty, support, indemnity, or liability obligation is offered by You - alone, and You hereby agree to indemnify every Contributor for any + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any liability incurred by such Contributor as a result of warranty, support, indemnity or liability terms You offer. You may include additional disclaimers of warranty and limitations of liability specific to any @@ -24471,14 +24466,14 @@ Mozilla Public License, version 2.0 4. Inability to Comply Due to Statute or Regulation If it is impossible for You to comply with any of the terms of this License - with respect to some or all of the Covered Software due to statute, judicial - order, or regulation then You must: (a) comply with the terms of this License - to the maximum extent possible; and (b) describe the limitations and the code - they affect. Such description must be placed in a text file included with all - distributions of the Covered Software under this License. Except to the - extent prohibited by statute or regulation, such description must be - sufficiently detailed for a recipient of ordinary skill to be able to - understand it. + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. 5. Termination @@ -24486,21 +24481,22 @@ Mozilla Public License, version 2.0 fail to comply with any of its terms. However, if You become compliant, then the rights granted under this License from a particular Contributor are reinstated (a) provisionally, unless and until such Contributor - explicitly and finally terminates Your grants, and (b) on an ongoing basis, - if such Contributor fails to notify You of the non-compliance by some - reasonable means prior to 60 days after You have come back into compliance. - Moreover, Your grants from a particular Contributor are reinstated on an - ongoing basis if such Contributor notifies You of the non-compliance by - some reasonable means, this is the first time You have received notice of - non-compliance with this License from such Contributor, and You become - compliant prior to 30 days after Your receipt of the notice. + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. 5.2. If You initiate litigation against any entity by asserting a patent - infringement claim (excluding declaratory judgment actions, counter-claims, - and cross-claims) alleging that a Contributor Version directly or - indirectly infringes any patent, then the rights granted to You by any and - all Contributors for the Covered Software under Section 2.1 of this License - shall terminate. + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. 5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user license agreements (excluding distributors and resellers) which have been @@ -24509,16 +24505,16 @@ Mozilla Public License, version 2.0 6. Disclaimer of Warranty - Covered Software is provided under this License on an “as is” basis, without - warranty of any kind, either expressed, implied, or statutory, including, - without limitation, warranties that the Covered Software is free of defects, - merchantable, fit for a particular purpose or non-infringing. The entire - risk as to the quality and performance of the Covered Software is with You. - Should any Covered Software prove defective in any respect, You (not any - Contributor) assume the cost of any necessary servicing, repair, or - correction. This disclaimer of warranty constitutes an essential part of this - License. No use of any Covered Software is authorized under this License - except under this disclaimer. + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. 7. Limitation of Liability @@ -24530,27 +24526,29 @@ Mozilla Public License, version 2.0 goodwill, work stoppage, computer failure or malfunction, or any and all other commercial damages or losses, even if such party shall have been informed of the possibility of such damages. This limitation of liability - shall not apply to liability for death or personal injury resulting from such - party’s negligence to the extent applicable law prohibits such limitation. - Some jurisdictions do not allow the exclusion or limitation of incidental or - consequential damages, so this exclusion and limitation may not apply to You. + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. 8. Litigation - Any litigation relating to this License may be brought only in the courts of - a jurisdiction where the defendant maintains its principal place of business - and such litigation shall be governed by laws of that jurisdiction, without - reference to its conflict-of-law provisions. Nothing in this Section shall - prevent a party’s ability to bring cross-claims or counter-claims. + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. 9. Miscellaneous - This License represents the complete agreement concerning the subject matter - hereof. If any provision of this License is held to be unenforceable, such - provision shall be reformed only to the extent necessary to make it - enforceable. Any law or regulation which provides that the language of a - contract shall be construed against the drafter shall not be used to construe - this License against a Contributor. + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. 10. Versions of the License @@ -24564,23 +24562,24 @@ Mozilla Public License, version 2.0 10.2. Effect of New Versions - You may distribute the Covered Software under the terms of the version of - the License under which You originally received the Covered Software, or - under the terms of any subsequent version published by the license + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license steward. 10.3. Modified Versions If you create software not governed by this License, and you want to - create a new license for such software, you may create and use a modified - version of this License if you rename the license and remove any - references to the name of the license steward (except to note that such - modified license differs from this License). + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). -10.4. Distributing Source Code Form that is Incompatible With Secondary Licenses - If You choose to distribute Source Code Form that is Incompatible With - Secondary Licenses under the terms of this version of the License, the - notice described in Exhibit B of this License must be attached. +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. Exhibit A - Source Code Form License Notice @@ -24591,16 +24590,17 @@ Exhibit A - Source Code Form License Notice obtain one at http://mozilla.org/MPL/2.0/. -If it is not possible or desirable to put the notice in a particular file, then -You may include the notice in a location (such as a LICENSE file in a relevant -directory) where a recipient would be likely to look for such a notice. +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. You may add additional accurate notices of copyright ownership. -Exhibit B - “Incompatible With Secondary Licenses” Notice +Exhibit B - "Incompatible With Secondary Licenses" Notice - This Source Code Form is “Incompatible - With Secondary Licenses”, as defined by + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by the Mozilla Public License, v. 2.0. diff --git a/cmd/fleet/handleCheckin.go b/cmd/fleet/handleCheckin.go index 8818b4634..07761fe34 100644 --- a/cmd/fleet/handleCheckin.go +++ b/cmd/fleet/handleCheckin.go @@ -26,9 +26,10 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/policy" "github.com/elastic/fleet-server/v7/internal/pkg/smap" "github.com/elastic/fleet-server/v7/internal/pkg/sqn" - "github.com/miolini/datacounter" + "github.com/hashicorp/go-version" "github.com/julienschmidt/httprouter" + "github.com/miolini/datacounter" "github.com/rs/zerolog" "github.com/rs/zerolog/log" ) @@ -67,6 +68,7 @@ func (rt Router) handleCheckin(w http.ResponseWriter, r *http.Request, ps httpro } type CheckinT struct { + verCon version.Constraints cfg *config.Server cache cache.Cache bc *BulkCheckin @@ -79,6 +81,7 @@ type CheckinT struct { } func NewCheckinT( + verCon version.Constraints, cfg *config.Server, c cache.Cache, bc *BulkCheckin, @@ -96,6 +99,7 @@ func NewCheckinT( Msg("Checkin install limits") ct := &CheckinT{ + verCon: verCon, cfg: cfg, cache: c, bc: bc, @@ -124,6 +128,11 @@ func (ct *CheckinT) _handleCheckin(w http.ResponseWriter, r *http.Request, id st return err } + err = validateUserAgent(r, ct.verCon) + if err != nil { + return err + } + // Metrics; serenity now. dfunc := cntCheckin.IncStart() defer dfunc() diff --git a/cmd/fleet/handleEnroll.go b/cmd/fleet/handleEnroll.go index 55ff4c2d1..06ff76d28 100644 --- a/cmd/fleet/handleEnroll.go +++ b/cmd/fleet/handleEnroll.go @@ -24,6 +24,7 @@ import ( "github.com/elastic/go-elasticsearch/v8" "github.com/gofrs/uuid" + "github.com/hashicorp/go-version" "github.com/julienschmidt/httprouter" "github.com/miolini/datacounter" "github.com/rs/zerolog/log" @@ -41,18 +42,20 @@ var ( ) type EnrollerT struct { + verCon version.Constraints bulker bulk.Bulk cache cache.Cache limit *limit.Limiter } -func NewEnrollerT(cfg *config.Server, bulker bulk.Bulk, c cache.Cache) (*EnrollerT, error) { +func NewEnrollerT(verCon version.Constraints, cfg *config.Server, bulker bulk.Bulk, c cache.Cache) (*EnrollerT, error) { log.Info(). Interface("limits", cfg.Limits.EnrollLimit). Msg("Enroller install limits") return &EnrollerT{ + verCon: verCon, limit: limit.NewLimiter(&cfg.Limits.EnrollLimit), bulker: bulker, cache: c, @@ -113,6 +116,11 @@ func (et *EnrollerT) handleEnroll(r *http.Request) ([]byte, error) { return nil, err } + err = validateUserAgent(r, et.verCon) + if err != nil { + return nil, err + } + // Metrics; serenity now. dfunc := cntEnroll.IncStart() defer dfunc() diff --git a/cmd/fleet/main.go b/cmd/fleet/main.go index 1c03d26c9..2590b0334 100644 --- a/cmd/fleet/main.go +++ b/cmd/fleet/main.go @@ -31,6 +31,7 @@ import ( "github.com/elastic/elastic-agent-client/v7/pkg/client" "github.com/elastic/elastic-agent-client/v7/pkg/proto" + "github.com/hashicorp/go-version" "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/spf13/cobra" @@ -348,7 +349,8 @@ func (a *AgentMode) OnError(err error) { } type FleetServer struct { - version string + ver string + verCon version.Constraints policyId string cfg *config.Config @@ -358,9 +360,14 @@ type FleetServer struct { } // NewFleetServer creates the actual fleet server service. -func NewFleetServer(cfg *config.Config, c cache.Cache, version string, reporter status.Reporter) (*FleetServer, error) { +func NewFleetServer(cfg *config.Config, c cache.Cache, verStr string, reporter status.Reporter) (*FleetServer, error) { + verCon, err := buildVersionConstraint(verStr) + if err != nil { + return nil, err + } return &FleetServer{ - version: version, + ver: verStr, + verCon: verCon, cfg: cfg, cfgCh: make(chan *config.Config, 1), cache: c, @@ -513,7 +520,7 @@ func (f *FleetServer) runServer(ctx context.Context, cfg *config.Config) (err er } g.Go(loggedRunFunc(ctx, "Policy index monitor", pim.Run)) - cord := coordinator.NewMonitor(cfg.Fleet, f.version, bulker, pim, coordinator.NewCoordinatorZero) + cord := coordinator.NewMonitor(cfg.Fleet, f.ver, bulker, pim, coordinator.NewCoordinatorZero) g.Go(loggedRunFunc(ctx, "Coordinator policy monitor", cord.Run)) // Policy monitor @@ -545,8 +552,8 @@ func (f *FleetServer) runServer(ctx context.Context, cfg *config.Config) (err er bc := NewBulkCheckin(bulker) g.Go(loggedRunFunc(ctx, "Bulk checkin", bc.Run)) - ct := NewCheckinT(&f.cfg.Inputs[0].Server, f.cache, bc, pm, am, ad, tr, bulker) - et, err := NewEnrollerT(&f.cfg.Inputs[0].Server, bulker, f.cache) + ct := NewCheckinT(f.verCon, &f.cfg.Inputs[0].Server, f.cache, bc, pm, am, ad, tr, bulker) + et, err := NewEnrollerT(f.verCon, &f.cfg.Inputs[0].Server, bulker, f.cache) if err != nil { return err } diff --git a/cmd/fleet/metrics.go b/cmd/fleet/metrics.go index 460f5e4b3..a4abb56fd 100644 --- a/cmd/fleet/metrics.go +++ b/cmd/fleet/metrics.go @@ -36,7 +36,7 @@ var ( func (f *FleetServer) initMetrics(ctx context.Context, cfg *config.Config) (*api.Server, error) { registry := monitoring.GetNamespace("info").GetRegistry() - monitoring.NewString(registry, "version").Set(f.version) + monitoring.NewString(registry, "version").Set(f.ver) monitoring.NewString(registry, "name").Set("fleet-server") metrics.SetupMetrics("fleet-server") diff --git a/cmd/fleet/server_test.go b/cmd/fleet/server_test.go index 508d9757c..49bf4ba02 100644 --- a/cmd/fleet/server_test.go +++ b/cmd/fleet/server_test.go @@ -33,14 +33,15 @@ func TestRunServer(t *testing.T) { cfg.Host = "localhost" cfg.Port = port + verCon := mustBuildConstraints("8.0.0") c, err := cache.New(cache.Config{NumCounters: 100, MaxCost: 100000}) require.NoError(t, err) bulker := ftesting.MockBulk{} pim := mock.NewMockIndexMonitor() pm := policy.NewMonitor(bulker, pim, 5*time.Millisecond) bc := NewBulkCheckin(nil) - ct := NewCheckinT(cfg, c, bc, pm, nil, nil, nil, nil) - et, err := NewEnrollerT(cfg, nil, c) + ct := NewCheckinT(verCon, cfg, c, bc, pm, nil, nil, nil, nil) + et, err := NewEnrollerT(verCon, cfg, nil, c) require.NoError(t, err) router := NewRouter(bulker, ct, et, nil, nil, nil) diff --git a/cmd/fleet/userAgent.go b/cmd/fleet/userAgent.go new file mode 100644 index 000000000..1773be60f --- /dev/null +++ b/cmd/fleet/userAgent.go @@ -0,0 +1,78 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fleet + +import ( + "errors" + "fmt" + "math" + "net/http" + "strconv" + "strings" + + "github.com/hashicorp/go-version" +) + +const ( + // MinVersion is the minimum version an Elastic Agent must be to communicate + MinVersion = "7.13" + + userAgentPrefix = "elastic agent " +) + +var ( + ErrInvalidUserAgent = errors.New("user-agent is invalid") + ErrUnsupportedVersion = errors.New("version is not supported") +) + +// buildVersionConstraint turns the version into a constraint to ensure that the connecting Elastic Agent's are +// a supported version. +func buildVersionConstraint(verStr string) (version.Constraints, error) { + ver, err := version.NewVersion(verStr) + if err != nil { + return nil, err + } + verStr = maximizePatch(ver) + return version.NewConstraint(fmt.Sprintf(">= %s, <= %s", MinVersion, verStr)) +} + +// maximizePatch turns the version into a string that has the patch value set to the maximum integer. +// +// Used to allow the Elastic Agent to be at a higher patch version than the Fleet Server, but require that the +// Elastic Agent is not higher in MAJOR or MINOR. +func maximizePatch(ver *version.Version) string { + segments := ver.Segments() + if len(segments) > 2 { + segments = segments[:2] + } + segments = append(segments, math.MaxInt32) + segStrs := make([]string, 0, len(segments)) + for _, segment := range segments { + segStrs = append(segStrs, strconv.Itoa(segment)) + } + return strings.Join(segStrs, ".") +} + +// validateUserAgent validates that the User-Agent of the connecting Elastic Agent is valid and that the version is +// supported for this Fleet Server. +func validateUserAgent(r *http.Request, verConst version.Constraints) error { + userAgent := r.Header.Get("User-Agent") + if userAgent == "" { + return ErrInvalidUserAgent + } + userAgent = strings.ToLower(userAgent) + if !strings.HasPrefix(userAgent, userAgentPrefix) { + return ErrInvalidUserAgent + } + verStr := strings.TrimSpace(strings.TrimSuffix(strings.TrimPrefix(userAgent, userAgentPrefix), "-snapshot")) + ver, err := version.NewVersion(verStr) + if err != nil { + return ErrInvalidUserAgent + } + if !verConst.Check(ver) { + return ErrUnsupportedVersion + } + return nil +} diff --git a/cmd/fleet/userAgent_test.go b/cmd/fleet/userAgent_test.go new file mode 100644 index 000000000..e9c8d9926 --- /dev/null +++ b/cmd/fleet/userAgent_test.go @@ -0,0 +1,99 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fleet + +import ( + "net/http/httptest" + "testing" + + "github.com/hashicorp/go-version" +) + +func TestValidateUserAgent(t *testing.T) { + tests := []struct { + userAgent string + verCon version.Constraints + err error + }{ + { + userAgent: "", + verCon: nil, + err: ErrInvalidUserAgent, + }, + { + userAgent: "bad value", + verCon: nil, + err: ErrInvalidUserAgent, + }, + { + userAgent: "eLaStIc AGeNt", + verCon: nil, + err: ErrInvalidUserAgent, + }, + { + userAgent: "eLaStIc AGeNt v7.10.0", + verCon: mustBuildConstraints("7.13.0"), + err: ErrUnsupportedVersion, + }, + { + userAgent: "eLaStIc AGeNt v7.11.1", + verCon: mustBuildConstraints("7.13.0"), + err: ErrUnsupportedVersion, + }, + { + userAgent: "eLaStIc AGeNt v7.12.5", + verCon: mustBuildConstraints("7.13.0"), + err: ErrUnsupportedVersion, + }, + { + userAgent: "eLaStIc AGeNt v7.13.0", + verCon: mustBuildConstraints("7.13.0"), + err: nil, + }, + { + userAgent: "eLaStIc AGeNt v7.13.0", + verCon: mustBuildConstraints("7.13.1"), + err: nil, + }, + { + userAgent: "eLaStIc AGeNt v7.13.1", + verCon: mustBuildConstraints("7.13.0"), + err: nil, + }, + { + userAgent: "eLaStIc AGeNt v7.14.0", + verCon: mustBuildConstraints("7.13.0"), + err: ErrUnsupportedVersion, + }, + { + userAgent: "eLaStIc AGeNt v8.0.0", + verCon: mustBuildConstraints("7.13.0"), + err: ErrUnsupportedVersion, + }, + { + userAgent: "eLaStIc AGeNt v7.13.0", + verCon: mustBuildConstraints("8.0.0"), + err: nil, + }, + } + for _, tr := range tests { + t.Run(tr.userAgent, func(t *testing.T) { + req := httptest.NewRequest("GET", "/", nil) + req.Header.Set("User-Agent", tr.userAgent) + res := validateUserAgent(req, tr.verCon) + if tr.err != res { + t.Fatalf("err mismatch: %v != %v", tr.err, res) + } + }) + } +} + +func mustBuildConstraints(verStr string) version.Constraints { + con, err := buildVersionConstraint(verStr) + if err != nil { + panic(err) + } + return con +} diff --git a/go.mod b/go.mod index 745deb277..9245ee834 100644 --- a/go.mod +++ b/go.mod @@ -12,6 +12,7 @@ require ( github.com/gofrs/uuid v3.3.0+incompatible github.com/google/go-cmp v0.4.0 github.com/hashicorp/go-cleanhttp v0.5.1 + github.com/hashicorp/go-version v1.3.0 github.com/hashicorp/golang-lru v0.5.2-0.20190520140433-59383c442f7d github.com/julienschmidt/httprouter v1.3.0 github.com/miolini/datacounter v1.0.2 diff --git a/go.sum b/go.sum index d6533a4b9..8ebda6804 100644 --- a/go.sum +++ b/go.sum @@ -449,8 +449,9 @@ github.com/hashicorp/go-retryablehttp v0.6.6 h1:HJunrbHTDDbBb/ay4kxa1n+dLmttUlnP github.com/hashicorp/go-retryablehttp v0.6.6/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-uuid v1.0.2 h1:cfejS+Tpcp13yd5nYHWDI6qVCny6wyX2Mt5SGur2IGE= github.com/hashicorp/go-uuid v1.0.2/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= -github.com/hashicorp/go-version v1.0.0 h1:21MVWPKDphxa7ineQQTrCU5brh7OuVVAzGOCnnCPtE8= github.com/hashicorp/go-version v1.0.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/go-version v1.3.0 h1:McDWVJIU/y+u1BRV06dPaLfLCaT7fUTJLp5r04x7iNw= +github.com/hashicorp/go-version v1.3.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hashicorp/golang-lru v0.5.2-0.20190520140433-59383c442f7d h1:Ft6PtvobE9vwkCsuoNO5DZDbhKkKuktAlSsiOi1X5NA= From 52495c580591cc9cbcdf1b980c1f1b0bc94f180e Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Thu, 15 Apr 2021 17:42:54 +0000 Subject: [PATCH 063/240] Utilize new ES fleet polling API for global checkpoint monitoring (#200) (#243) * Utilize fleet polling API for global checkpoint monitoring * Adjust for API changes, configurable poll timeout * Update retryDelay to 3 secs * Update tests for monitor API change * Adjust to the latest API changes * Remove fleet indexes bootstrapping for tests, it is done by fleet system index plugin now * Fix unit tests (cherry picked from commit 13a5550c51ea65b491be3e9df21f55dd6071a1bb) Co-authored-by: Aleksandr Maus --- cmd/fleet/handleCheckin.go | 4 +- cmd/fleet/main.go | 20 ++- dev-tools/integration/main.go | 2 +- internal/pkg/bulk/bulk.go | 2 +- internal/pkg/config/config_test.go | 12 +- internal/pkg/config/monitor.go | 9 +- internal/pkg/config/output.go | 18 +- internal/pkg/config/output_test.go | 2 +- .../coordinator/monitor_integration_test.go | 2 +- internal/pkg/es/client.go | 4 +- internal/pkg/es/error.go | 5 + internal/pkg/es/fleet_global_checkpoints.go | 164 ++++++++++++++++++ internal/pkg/monitor/global_checkpoint.go | 94 +++++----- internal/pkg/monitor/mock/monitor.go | 9 +- internal/pkg/monitor/monitor.go | 150 ++++++++-------- .../pkg/monitor/monitor_integration_test.go | 6 +- internal/pkg/monitor/subscription_monitor.go | 10 +- .../subscription_monitor_integration_test.go | 4 +- .../pkg/policy/monitor_integration_test.go | 4 +- internal/pkg/policy/monitor_test.go | 2 + internal/pkg/sqn/sqn.go | 21 ++- internal/pkg/testing/esutil/bootstrap.go | 8 +- 22 files changed, 391 insertions(+), 161 deletions(-) create mode 100644 internal/pkg/es/fleet_global_checkpoints.go diff --git a/cmd/fleet/handleCheckin.go b/cmd/fleet/handleCheckin.go index 07761fe34..d1a7e4dda 100644 --- a/cmd/fleet/handleCheckin.go +++ b/cmd/fleet/handleCheckin.go @@ -313,8 +313,8 @@ func (ct *CheckinT) fetchAgentPendingActions(ctx context.Context, seqno sqn.SeqN now := time.Now().UTC().Format(time.RFC3339) return dl.FindActions(ctx, ct.bulker, dl.QueryAgentActions, map[string]interface{}{ - dl.FieldSeqNo: seqno.Get(0), - dl.FieldMaxSeqNo: ct.gcp.GetCheckpoint(), + dl.FieldSeqNo: seqno.Value(), + dl.FieldMaxSeqNo: ct.gcp.GetCheckpoint().Value(), dl.FieldExpiration: now, dl.FieldAgents: []string{agentId}, }) diff --git a/cmd/fleet/main.go b/cmd/fleet/main.go index 2590b0334..64d6ccc87 100644 --- a/cmd/fleet/main.go +++ b/cmd/fleet/main.go @@ -21,6 +21,7 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/config" "github.com/elastic/fleet-server/v7/internal/pkg/coordinator" "github.com/elastic/fleet-server/v7/internal/pkg/dl" + "github.com/elastic/fleet-server/v7/internal/pkg/es" "github.com/elastic/fleet-server/v7/internal/pkg/logger" "github.com/elastic/fleet-server/v7/internal/pkg/monitor" "github.com/elastic/fleet-server/v7/internal/pkg/policy" @@ -505,7 +506,13 @@ func (f *FleetServer) runServer(ctx context.Context, cfg *config.Config) (err er // shutdown before the bulker is then cancelled. bulkCtx, bulkCancel := context.WithCancel(context.Background()) defer bulkCancel() - es, bulker, err := bulk.InitES(bulkCtx, cfg) + esCli, bulker, err := bulk.InitES(bulkCtx, cfg) + if err != nil { + return err + } + + // Monitoring es client, longer timeout, no retries + monCli, err := es.NewClient(ctx, cfg, true) if err != nil { return err } @@ -514,7 +521,10 @@ func (f *FleetServer) runServer(ctx context.Context, cfg *config.Config) (err er g, ctx := errgroup.WithContext(ctx) // Coordinator policy monitor - pim, err := monitor.New(dl.FleetPolicies, es, monitor.WithFetchSize(cfg.Inputs[0].Monitor.FetchSize)) + pim, err := monitor.New(dl.FleetPolicies, esCli, monCli, + monitor.WithFetchSize(cfg.Inputs[0].Monitor.FetchSize), + monitor.WithPollTimeout(cfg.Inputs[0].Monitor.PollTimeout), + ) if err != nil { return err } @@ -536,7 +546,11 @@ func (f *FleetServer) runServer(ctx context.Context, cfg *config.Config) (err er var ad *action.Dispatcher var tr *action.TokenResolver - am, err = monitor.NewSimple(dl.FleetActions, es, monitor.WithExpiration(true), monitor.WithFetchSize(cfg.Inputs[0].Monitor.FetchSize)) + am, err = monitor.NewSimple(dl.FleetActions, esCli, monCli, + monitor.WithExpiration(true), + monitor.WithFetchSize(cfg.Inputs[0].Monitor.FetchSize), + monitor.WithPollTimeout(cfg.Inputs[0].Monitor.PollTimeout), + ) if err != nil { return err } diff --git a/dev-tools/integration/main.go b/dev-tools/integration/main.go index a4e329319..e43ed35d1 100644 --- a/dev-tools/integration/main.go +++ b/dev-tools/integration/main.go @@ -31,7 +31,7 @@ func main() { checkErr(err) ctx := context.Background() - es, err := es.NewClient(ctx, cfg) + es, err := es.NewClient(ctx, cfg, false) checkErr(err) err = esutil.EnsureESIndices(ctx, es) diff --git a/internal/pkg/bulk/bulk.go b/internal/pkg/bulk/bulk.go index 2b98ef49f..f47063028 100644 --- a/internal/pkg/bulk/bulk.go +++ b/internal/pkg/bulk/bulk.go @@ -87,7 +87,7 @@ const ( func InitES(ctx context.Context, cfg *config.Config, opts ...BulkOpt) (*elasticsearch.Client, Bulk, error) { - es, err := es.NewClient(ctx, cfg) + es, err := es.NewClient(ctx, cfg, false) if err != nil { return nil, nil, err } diff --git a/internal/pkg/config/config_test.go b/internal/pkg/config/config_test.go index f50a67c31..c7c00bad3 100644 --- a/internal/pkg/config/config_test.go +++ b/internal/pkg/config/config_test.go @@ -93,7 +93,8 @@ func TestConfig(t *testing.T) { MaxCost: defaultCacheMaxCost, }, Monitor: Monitor{ - FetchSize: defaultFetchSize, + FetchSize: defaultFetchSize, + PollTimeout: defaultPollTimeout, }, }, }, @@ -182,7 +183,8 @@ func TestConfig(t *testing.T) { MaxCost: defaultCacheMaxCost, }, Monitor: Monitor{ - FetchSize: defaultFetchSize, + FetchSize: defaultFetchSize, + PollTimeout: defaultPollTimeout, }, }, }, @@ -269,7 +271,8 @@ func TestConfig(t *testing.T) { MaxCost: defaultCacheMaxCost, }, Monitor: Monitor{ - FetchSize: defaultFetchSize, + FetchSize: defaultFetchSize, + PollTimeout: defaultPollTimeout, }, }, }, @@ -356,7 +359,8 @@ func TestConfig(t *testing.T) { MaxCost: defaultCacheMaxCost, }, Monitor: Monitor{ - FetchSize: defaultFetchSize, + FetchSize: defaultFetchSize, + PollTimeout: defaultPollTimeout, }, }, }, diff --git a/internal/pkg/config/monitor.go b/internal/pkg/config/monitor.go index 1d3f8a31d..e88e8e09a 100644 --- a/internal/pkg/config/monitor.go +++ b/internal/pkg/config/monitor.go @@ -4,14 +4,19 @@ package config +import "time" + const ( - defaultFetchSize = 1000 + defaultFetchSize = 1000 + defaultPollTimeout = 5 * time.Minute ) type Monitor struct { - FetchSize int `config:"fetch_size"` + FetchSize int `config:"fetch_size"` + PollTimeout time.Duration `config:"poll_timeout"` } func (m *Monitor) InitDefaults() { m.FetchSize = defaultFetchSize + m.PollTimeout = defaultPollTimeout } diff --git a/internal/pkg/config/output.go b/internal/pkg/config/output.go index b800f4100..7e6e5205d 100644 --- a/internal/pkg/config/output.go +++ b/internal/pkg/config/output.go @@ -20,6 +20,10 @@ import ( "github.com/elastic/beats/v7/libbeat/common/transport/tlscommon" ) +// The timeout would be driven by the server for long poll. +// Giving it some sane long value. +const httpTransportLongPollTimeout = 10 * time.Minute + var hasScheme = regexp.MustCompile(`^([a-z][a-z0-9+\-.]*)://`) // Elasticsearch is the configuration for elasticsearch. @@ -77,7 +81,7 @@ func (c *Elasticsearch) Validate() error { } // ToESConfig converts the configuration object into the config for the elasticsearch client. -func (c *Elasticsearch) ToESConfig() (elasticsearch.Config, error) { +func (c *Elasticsearch) ToESConfig(longPoll bool) (elasticsearch.Config, error) { // build the addresses addrs := make([]string, len(c.Hosts)) for i, host := range c.Hosts { @@ -104,6 +108,17 @@ func (c *Elasticsearch) ToESConfig() (elasticsearch.Config, error) { ResponseHeaderTimeout: c.Timeout, ExpectContinueTimeout: 1 * time.Second, } + + disableRetry := false + + if longPoll { + httpTransport.IdleConnTimeout = httpTransportLongPollTimeout + httpTransport.ResponseHeaderTimeout = httpTransportLongPollTimeout + + // no retries for long poll monitoring + disableRetry = true + } + if c.TLS != nil && c.TLS.IsEnabled() { tls, err := tlscommon.LoadTLSConfig(c.TLS) if err != nil { @@ -136,6 +151,7 @@ func (c *Elasticsearch) ToESConfig() (elasticsearch.Config, error) { Header: h, Transport: httpTransport, MaxRetries: c.MaxRetries, + DisableRetry: disableRetry, }, nil } diff --git a/internal/pkg/config/output_test.go b/internal/pkg/config/output_test.go index dc29e2457..9f604df35 100644 --- a/internal/pkg/config/output_test.go +++ b/internal/pkg/config/output_test.go @@ -171,7 +171,7 @@ func TestToESConfig(t *testing.T) { cmpopts.IgnoreUnexported(tls.Config{}), } t.Run(name, func(t *testing.T) { - res, err := test.cfg.ToESConfig() + res, err := test.cfg.ToESConfig(false) require.NoError(t, err) test.result.Header.Set("X-elastic-product-origin", "fleet") if !assert.True(t, cmp.Equal(test.result, res, copts...)) { diff --git a/internal/pkg/coordinator/monitor_integration_test.go b/internal/pkg/coordinator/monitor_integration_test.go index 29375e6dd..d039a8abc 100644 --- a/internal/pkg/coordinator/monitor_integration_test.go +++ b/internal/pkg/coordinator/monitor_integration_test.go @@ -36,7 +36,7 @@ func TestMonitorLeadership(t *testing.T) { serversIndex := ftesting.SetupIndex(bulkCtx, t, bulker, es.MappingServer) policiesIndex := ftesting.SetupIndex(bulkCtx, t, bulker, es.MappingPolicy) leadersIndex := ftesting.SetupIndex(bulkCtx, t, bulker, es.MappingPolicyLeader) - pim, err := monitor.New(policiesIndex, bulker.Client()) + pim, err := monitor.New(policiesIndex, bulker.Client(), bulker.Client()) if err != nil { t.Fatal(err) } diff --git a/internal/pkg/es/client.go b/internal/pkg/es/client.go index b2159fd96..792700ef9 100644 --- a/internal/pkg/es/client.go +++ b/internal/pkg/es/client.go @@ -15,8 +15,8 @@ import ( "github.com/rs/zerolog/log" ) -func NewClient(ctx context.Context, cfg *config.Config) (*elasticsearch.Client, error) { - escfg, err := cfg.Output.Elasticsearch.ToESConfig() +func NewClient(ctx context.Context, cfg *config.Config, longPoll bool) (*elasticsearch.Client, error) { + escfg, err := cfg.Output.Elasticsearch.ToESConfig(longPoll) if err != nil { return nil, err } diff --git a/internal/pkg/es/error.go b/internal/pkg/es/error.go index 4966c88f6..008097546 100644 --- a/internal/pkg/es/error.go +++ b/internal/pkg/es/error.go @@ -22,7 +22,10 @@ type ErrElastic struct { func (e *ErrElastic) Unwrap() error { if e.Type == "index_not_found_exception" { return ErrIndexNotFound + } else if e.Type == "timeout_exception" { + return ErrTimeout } + return nil } @@ -35,6 +38,8 @@ var ( ErrElasticNotFound = errors.New("elastic not found") ErrInvalidBody = errors.New("invalid body") ErrIndexNotFound = errors.New("index not found") + ErrTimeout = errors.New("timeout") + ErrNotFound = errors.New("not found") ) func TranslateError(status int, e ErrorT) error { diff --git a/internal/pkg/es/fleet_global_checkpoints.go b/internal/pkg/es/fleet_global_checkpoints.go new file mode 100644 index 000000000..449cc17e6 --- /dev/null +++ b/internal/pkg/es/fleet_global_checkpoints.go @@ -0,0 +1,164 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package es + +import ( + "context" + "net/http" + "strconv" + "strings" + "time" + + "github.com/elastic/fleet-server/v7/internal/pkg/sqn" + "github.com/elastic/go-elasticsearch/v8/esapi" +) + +// The wrapper for the new _fleet global_checkpoints that is not the part of the +// standard client library at the moment. +// The shape mimics the official client API and should be easy drop-in replacement in the future. +// This should be replaced the official client library when/if the new API makes it in. + +func NewGlobalCheckpointsRequest(t esapi.Transport) GlobalCheckpoints { + return func(o ...func(*GlobalCheckpointsRequest)) (*esapi.Response, error) { + var r = GlobalCheckpointsRequest{} + for _, f := range o { + f(&r) + } + return r.Do(r.ctx, t) + } +} + +// Copied from the official client +func formatDuration(d time.Duration) string { + if d < time.Millisecond { + return strconv.FormatInt(int64(d), 10) + "nanos" + } + return strconv.FormatInt(int64(d)/int64(time.Millisecond), 10) + "ms" +} + +type GlobalCheckpoints func(o ...func(*GlobalCheckpointsRequest)) (*esapi.Response, error) + +// GlobalCheckpointsRequest configures the _fleet API global_checkpoints request. +// +type GlobalCheckpointsRequest struct { + ctx context.Context + + Index string + WaitForAdvance *bool + Checkpoints []int64 + Timeout time.Duration + + Header http.Header +} + +// Do executes the request and returns response or error. +// +func (r GlobalCheckpointsRequest) Do(ctx context.Context, transport esapi.Transport) (*esapi.Response, error) { + var ( + method string + path strings.Builder + params map[string]string + ) + + method = "GET" + + path.Grow(1 + len(r.Index) + len("/_fleet/global_checkpoints")) + if len(r.Index) > 0 { + path.WriteString("/") + path.WriteString(r.Index) + } + path.WriteString("/_fleet/global_checkpoints") + + params = make(map[string]string) + + if r.WaitForAdvance != nil { + params["wait_for_advance"] = strconv.FormatBool(*r.WaitForAdvance) + } + + if len(r.Checkpoints) > 0 { + seqNo := sqn.SeqNo(r.Checkpoints) + params["checkpoints"] = seqNo.String() + } + + if r.Timeout != 0 { + params["timeout"] = formatDuration(r.Timeout) + } + + req, err := http.NewRequest(method, path.String(), nil) + if err != nil { + return nil, err + } + + if len(params) > 0 { + q := req.URL.Query() + for k, v := range params { + q.Set(k, v) + } + req.URL.RawQuery = q.Encode() + } + + if len(r.Header) > 0 { + if len(req.Header) == 0 { + req.Header = r.Header + } else { + for k, vv := range r.Header { + for _, v := range vv { + req.Header.Add(k, v) + } + } + } + } + + if ctx != nil { + req = req.WithContext(ctx) + } + + res, err := transport.Perform(req) + if err != nil { + return nil, err + } + + response := esapi.Response{ + StatusCode: res.StatusCode, + Body: res.Body, + Header: res.Header, + } + + return &response, nil +} + +// WithContext sets the request context. +// +func (f GlobalCheckpoints) WithContext(v context.Context) func(*GlobalCheckpointsRequest) { + return func(r *GlobalCheckpointsRequest) { + r.ctx = v + } +} + +// WithIndex - an index name +// +func (f GlobalCheckpoints) WithIndex(index string) func(*GlobalCheckpointsRequest) { + return func(r *GlobalCheckpointsRequest) { + r.Index = index + } +} + +func (f GlobalCheckpoints) WithWaitForAdvance(v bool) func(*GlobalCheckpointsRequest) { + return func(r *GlobalCheckpointsRequest) { + r.WaitForAdvance = &v + } +} + +func (f GlobalCheckpoints) WithCheckpoints(checkpoints []int64) func(*GlobalCheckpointsRequest) { + return func(r *GlobalCheckpointsRequest) { + r.Checkpoints = checkpoints + } +} + +func (f GlobalCheckpoints) WithTimeout(to time.Duration) func(*GlobalCheckpointsRequest) { + return func(r *GlobalCheckpointsRequest) { + r.Timeout = to + } +} diff --git a/internal/pkg/monitor/global_checkpoint.go b/internal/pkg/monitor/global_checkpoint.go index 89f0e8025..56bd598c8 100644 --- a/internal/pkg/monitor/global_checkpoint.go +++ b/internal/pkg/monitor/global_checkpoint.go @@ -8,73 +8,89 @@ import ( "context" "encoding/json" "errors" - "fmt" + "net/http" + "time" - "github.com/elastic/fleet-server/v7/internal/pkg/es" + esh "github.com/elastic/fleet-server/v7/internal/pkg/es" + "github.com/elastic/fleet-server/v7/internal/pkg/sqn" "github.com/elastic/go-elasticsearch/v8" + "github.com/elastic/go-elasticsearch/v8/esapi" ) var ErrGlobalCheckpoint = errors.New("global checkpoint error") -type shard struct { - SeqNo struct { - GlobalCheckpoint int64 `json:"global_checkpoint"` - } `json:"seq_no"` -} +// Global checkpoint response +// {"global_checkpoints":[-1]} -type indexStats struct { - Shards map[string][]shard `json:"shards"` +type globalCheckpointsResponse struct { + GlobalCheckpoints []int64 `json:"global_checkpoints"` + TimedOut bool `json:"timed_out"` + Error esh.ErrorT `json:"error,omitempty"` } -type statsResponse struct { - IndexStats map[string]indexStats `json:"indices"` +func queryGlobalCheckpoint(ctx context.Context, es *elasticsearch.Client, index string) (seqno sqn.SeqNo, err error) { + req := esh.NewGlobalCheckpointsRequest(es.Transport) + res, err := req(req.WithContext(ctx), + req.WithIndex(index)) - Error es.ErrorT `json:"error,omitempty"` -} + if err != nil { + return + } + + seqno, err = processGlobalCheckpointResponse(res) + if errors.Is(err, esh.ErrIndexNotFound) { + seqno = sqn.DefaultSeqNo + err = nil + } -func queryGlobalCheckpoint(ctx context.Context, es *elasticsearch.Client, index string) (seqno int64, err error) { - seqno = defaultSeqNo + return seqno, err +} - res, err := es.Indices.Stats( - es.Indices.Stats.WithContext(ctx), - es.Indices.Stats.WithIndex(index), - es.Indices.Stats.WithLevel("shards"), +func waitCheckpointAdvance(ctx context.Context, es *elasticsearch.Client, index string, checkpoint sqn.SeqNo, to time.Duration) (seqno sqn.SeqNo, err error) { + req := esh.NewGlobalCheckpointsRequest(es.Transport) + res, err := req(req.WithContext(ctx), + req.WithIndex(index), + req.WithCheckpoints(checkpoint), + req.WithWaitForAdvance(true), + req.WithTimeout(to), ) if err != nil { return } + return processGlobalCheckpointResponse(res) +} + +func processGlobalCheckpointResponse(res *esapi.Response) (seqno sqn.SeqNo, err error) { defer res.Body.Close() - var sres statsResponse + // Don't parse the payload if timeout + if res.StatusCode == http.StatusGatewayTimeout { + return seqno, esh.ErrTimeout + } + + // Parse payload + var sres globalCheckpointsResponse err = json.NewDecoder(res.Body).Decode(&sres) if err != nil { return } - if len(sres.IndexStats) > 1 { - indices := make([]string, 0, len(sres.IndexStats)) - for k := range sres.IndexStats { - indices = append(indices, k) - } - return seqno, fmt.Errorf("more than one indices found %v, %w", indices, ErrGlobalCheckpoint) + // Check error + err = esh.TranslateError(res.StatusCode, sres.Error) + if err != nil { + return nil, err + } + + if sres.TimedOut { + return nil, esh.ErrTimeout } - if len(sres.IndexStats) > 0 { - // Grab the first and only index stats - var stats indexStats - for _, stats = range sres.IndexStats { - break - } - - if shards, ok := stats.Shards["0"]; ok { - if len(shards) > 0 { - seqno = shards[0].SeqNo.GlobalCheckpoint - } - } + if len(sres.GlobalCheckpoints) == 0 { + return nil, esh.ErrNotFound } - return + return sres.GlobalCheckpoints, nil } diff --git a/internal/pkg/monitor/mock/monitor.go b/internal/pkg/monitor/mock/monitor.go index 47368dcf7..25b268f07 100644 --- a/internal/pkg/monitor/mock/monitor.go +++ b/internal/pkg/monitor/mock/monitor.go @@ -12,6 +12,7 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/es" "github.com/elastic/fleet-server/v7/internal/pkg/monitor" + "github.com/elastic/fleet-server/v7/internal/pkg/sqn" ) var gMockIndexCounter uint64 @@ -26,7 +27,7 @@ func (s *mockSubT) Output() <-chan []es.HitT { } type MockIndexMonitor struct { - checkpoint int64 + checkpoint sqn.SeqNo mut sync.RWMutex subs map[uint64]*mockSubT @@ -35,13 +36,13 @@ type MockIndexMonitor struct { // NewMockIndexMonitor returns a mock monitor. func NewMockIndexMonitor() *MockIndexMonitor { return &MockIndexMonitor{ - checkpoint: -1, + checkpoint: sqn.DefaultSeqNo, subs: make(map[uint64]*mockSubT), } } // GetCheckpoint returns the current checkpoint. -func (m *MockIndexMonitor) GetCheckpoint() int64 { +func (m *MockIndexMonitor) GetCheckpoint() sqn.SeqNo { return m.checkpoint } @@ -85,7 +86,7 @@ func (m *MockIndexMonitor) Notify(ctx context.Context, hits []es.HitT) { sz := len(hits) if sz > 0 { maxVal := hits[sz-1].SeqNo - m.checkpoint = maxVal + m.checkpoint = []int64{maxVal} m.mut.RLock() var wg sync.WaitGroup diff --git a/internal/pkg/monitor/monitor.go b/internal/pkg/monitor/monitor.go index 8d4812bf8..2dd8ac1f3 100644 --- a/internal/pkg/monitor/monitor.go +++ b/internal/pkg/monitor/monitor.go @@ -9,12 +9,14 @@ import ( "context" "encoding/json" "errors" - "sync/atomic" + "sync" "time" "github.com/elastic/fleet-server/v7/internal/pkg/dl" "github.com/elastic/fleet-server/v7/internal/pkg/dsl" "github.com/elastic/fleet-server/v7/internal/pkg/es" + "github.com/elastic/fleet-server/v7/internal/pkg/sleep" + "github.com/elastic/fleet-server/v7/internal/pkg/sqn" "github.com/elastic/go-elasticsearch/v8" "github.com/rs/zerolog" @@ -22,7 +24,7 @@ import ( ) const ( - defaultCheckInterval = 1 * time.Second // check every second for the new action + defaultPollTimeout = 5 * time.Minute // default long poll timeout defaultSeqNo = int64(-1) // the _seq_no in elasticsearch start with 0 defaultWithExpiration = false @@ -34,7 +36,12 @@ const ( // One action can be split up into multiple documents up to the 1000 agents per action if needed. defaultFetchSize = 1000 - tightLoopCheckInterval = 10 * time.Millisecond // when we get a full page (fetchSize) of documents, use this interval to repeatedly poll for more records + // Retry delay on error waiting on the global checkpoint update. + // This is the wait time between requests to elastisearch in case if: + // 1. Index is not found (index is created only on the first document save) + // 2. Any other error waiting on global checkpoint, except timeouts. + // For the long poll timeout, start a new request as soon as possible. + retryDelay = 3 * time.Second ) const ( @@ -63,7 +70,7 @@ type HitsT struct { } type GlobalCheckpointProvider interface { - GetCheckpoint() int64 + GetCheckpoint() sqn.SeqNo } // SimpleMonitor monitors for new documents in an index @@ -83,16 +90,18 @@ type SimpleMonitor interface { // simpleMonitorT monitors for new documents in an index type simpleMonitorT struct { - cli *elasticsearch.Client + esCli *elasticsearch.Client + monCli *elasticsearch.Client tmplCheck *dsl.Tmpl tmplQuery *dsl.Tmpl index string - checkInterval time.Duration + pollTimeout time.Duration withExpiration bool fetchSize int - checkpoint int64 // index global checkpoint + checkpoint sqn.SeqNo // index global checkpoint + mx sync.RWMutex // checkpoint mutex log zerolog.Logger @@ -105,14 +114,16 @@ type simpleMonitorT struct { type Option func(SimpleMonitor) // New creates new simple monitor -func NewSimple(index string, cli *elasticsearch.Client, opts ...Option) (SimpleMonitor, error) { +func NewSimple(index string, esCli, monCli *elasticsearch.Client, opts ...Option) (SimpleMonitor, error) { + m := &simpleMonitorT{ index: index, - cli: cli, - checkInterval: defaultCheckInterval, + esCli: esCli, + monCli: monCli, + pollTimeout: defaultPollTimeout, withExpiration: defaultWithExpiration, fetchSize: defaultFetchSize, - checkpoint: defaultSeqNo, + checkpoint: sqn.DefaultSeqNo, outCh: make(chan []es.HitT, 1), } @@ -146,10 +157,10 @@ func WithFetchSize(fetchSize int) Option { } } -// WithCheckInterval sets a periodic check interval -func WithCheckInterval(interval time.Duration) Option { +// WithPollTimeout sets the global checkpoint polling timeout +func WithPollTimeout(to time.Duration) Option { return func(m SimpleMonitor) { - m.(*simpleMonitorT).checkInterval = interval + m.(*simpleMonitorT).pollTimeout = to } } @@ -173,17 +184,21 @@ func (m *simpleMonitorT) Output() <-chan []es.HitT { } // GetCheckpoint implements GlobalCheckpointProvider interface -func (m *simpleMonitorT) GetCheckpoint() int64 { +func (m *simpleMonitorT) GetCheckpoint() sqn.SeqNo { return m.loadCheckpoint() } -func (m *simpleMonitorT) storeCheckpoint(val int64) { - m.log.Debug().Int64("checkpoint", val).Msg("updated checkpoint") - atomic.StoreInt64(&m.checkpoint, val) +func (m *simpleMonitorT) storeCheckpoint(val sqn.SeqNo) { + m.log.Debug().Ints64("checkpoints", val).Msg("updated checkpoint") + m.mx.Lock() + defer m.mx.Unlock() + m.checkpoint = val.Clone() } -func (m *simpleMonitorT) loadCheckpoint() int64 { - return atomic.LoadInt64(&m.checkpoint) +func (m *simpleMonitorT) loadCheckpoint() sqn.SeqNo { + m.mx.RLock() + defer m.mx.RUnlock() + return m.checkpoint.Clone() } // Run runs monitor. @@ -200,10 +215,10 @@ func (m *simpleMonitorT) Run(ctx context.Context) (err error) { }() // Initialize global checkpoint from the index stats - var checkpoint int64 - checkpoint, err = queryGlobalCheckpoint(ctx, m.cli, m.index) + var checkpoint sqn.SeqNo + checkpoint, err = queryGlobalCheckpoint(ctx, m.monCli, m.index) if err != nil { - m.log.Error().Err(err).Msg("failed to initialize the global checkpoint") + m.log.Error().Err(err).Msg("failed to initialize the global checkpoints") return err } m.storeCheckpoint(checkpoint) @@ -214,29 +229,40 @@ func (m *simpleMonitorT) Run(ctx context.Context) (err error) { m.readyCh = nil } - // Start timer loop to check for global checkpoint changes - t := time.NewTimer(m.checkInterval) - defer t.Stop() for { - select { - case <-t.C: - interval := m.checkInterval + checkpoint := m.loadCheckpoint() + + // Wait checkpoint advance + newCheckpoint, err := waitCheckpointAdvance(ctx, m.monCli, m.index, checkpoint, m.pollTimeout) + if err != nil { + if errors.Is(err, es.ErrIndexNotFound) { + // Wait until created + m.log.Info().Msgf("index not found, try again in %v", retryDelay) + } else if errors.Is(err, es.ErrTimeout) { + // Timed out, wait again + m.log.Debug().Msg("wait global checkpoints advance, timeout, wait again") + continue + } else { + // Log the error and keep trying + m.log.Error().Err(err).Msg("failed waiting global checkpoints advance") + } - hits, err := m.check(ctx) + // Delay next attempt + err = sleep.WithContext(ctx, retryDelay) if err != nil { - m.log.Error().Err(err).Msg("failed checking new documents") - } else { - count := m.notify(ctx, hits) + return err + } + } - // Change check interval if fetched the full page (m.fetchSize) of documents - if count == m.fetchSize { - m.log.Debug().Int("count", count).Dur("wait_next_check", interval).Msg("tight loop check") - interval = tightLoopCheckInterval - } + // Fetch up to known checkpoint + count := m.fetchSize + for count == m.fetchSize { + hits, err := m.fetch(ctx, newCheckpoint) + if err != nil { + m.log.Error().Err(err).Msg("failed checking new documents") + break } - t.Reset(interval) - case <-ctx.Done(): - return ctx.Err() + count = m.notify(ctx, hits) } } } @@ -247,7 +273,7 @@ func (m *simpleMonitorT) notify(ctx context.Context, hits []es.HitT) int { select { case m.outCh <- hits: maxVal := hits[sz-1].SeqNo - m.storeCheckpoint(maxVal) + m.storeCheckpoint([]int64{maxVal}) return sz case <-ctx.Done(): } @@ -255,45 +281,21 @@ func (m *simpleMonitorT) notify(ctx context.Context, hits []es.HitT) int { return 0 } -func (m *simpleMonitorT) check(ctx context.Context) ([]es.HitT, error) { +func (m *simpleMonitorT) fetch(ctx context.Context, maxCheckpoint sqn.SeqNo) ([]es.HitT, error) { now := time.Now().UTC().Format(time.RFC3339) checkpoint := m.loadCheckpoint() // Run check query that detects that there are new documents available params := map[string]interface{}{ - dl.FieldSeqNo: checkpoint, + dl.FieldSeqNo: checkpoint.Value(), + dl.FieldMaxSeqNo: maxCheckpoint.Value(), } if m.withExpiration { params[dl.FieldExpiration] = now } - hits, err := m.search(ctx, m.tmplCheck, params) - if err != nil { - return nil, err - } - - if len(hits) == 0 { - return nil, nil - } - - // New documents are detected, fetch global checkpoint - gcp, err := queryGlobalCheckpoint(ctx, m.cli, m.index) - if err != nil { - m.log.Error().Err(err).Msg("failed to check the global checkpoint") - return nil, err - } - - // If global check point is still not greater that the current known checkpoint, return nothing - if gcp <= checkpoint { - return nil, nil - } - - // Fetch documents capped by the global checkpoint - // Reusing params for the documents query - params[dl.FieldMaxSeqNo] = gcp - - hits, err = m.search(ctx, m.tmplQuery, params) + hits, err := m.search(ctx, m.tmplQuery, params) if err != nil { return nil, err } @@ -307,10 +309,10 @@ func (m *simpleMonitorT) search(ctx context.Context, tmpl *dsl.Tmpl, params map[ return nil, err } - res, err := m.cli.Search( - m.cli.Search.WithContext(ctx), - m.cli.Search.WithIndex(m.index), - m.cli.Search.WithBody(bytes.NewBuffer(query)), + res, err := m.esCli.Search( + m.esCli.Search.WithContext(ctx), + m.esCli.Search.WithIndex(m.index), + m.esCli.Search.WithBody(bytes.NewBuffer(query)), ) if err != nil { return nil, err diff --git a/internal/pkg/monitor/monitor_integration_test.go b/internal/pkg/monitor/monitor_integration_test.go index eb1ebefb2..82cdd87cc 100644 --- a/internal/pkg/monitor/monitor_integration_test.go +++ b/internal/pkg/monitor/monitor_integration_test.go @@ -10,7 +10,6 @@ import ( "context" "sync" "testing" - "time" "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/require" @@ -21,8 +20,6 @@ import ( ftesting "github.com/elastic/fleet-server/v7/internal/pkg/testing" ) -const testMonitorIntervalMS = 100 - func setupIndex(ctx context.Context, t *testing.T) (string, bulk.Bulk) { index, bulker := ftesting.SetupIndexWithBulk(ctx, t, es.MappingAction) return index, bulker @@ -46,8 +43,7 @@ func TestSimpleMonitorNonEmptyIndex(t *testing.T) { func runSimpleMonitorTest(t *testing.T, ctx context.Context, index string, bulker bulk.Bulk) { readyCh := make(chan error) - mon, err := NewSimple(index, bulker.Client(), - WithCheckInterval(testMonitorIntervalMS*time.Millisecond), + mon, err := NewSimple(index, bulker.Client(), bulker.Client(), WithReadyChan(readyCh), ) require.NoError(t, err) diff --git a/internal/pkg/monitor/subscription_monitor.go b/internal/pkg/monitor/subscription_monitor.go index 06907d5ec..f0880cebe 100644 --- a/internal/pkg/monitor/subscription_monitor.go +++ b/internal/pkg/monitor/subscription_monitor.go @@ -6,11 +6,13 @@ package monitor import ( "context" - "github.com/elastic/fleet-server/v7/internal/pkg/es" "sync" "sync/atomic" "time" + "github.com/elastic/fleet-server/v7/internal/pkg/es" + "github.com/elastic/fleet-server/v7/internal/pkg/sqn" + "github.com/elastic/go-elasticsearch/v8" "github.com/rs/zerolog/log" "golang.org/x/sync/errgroup" @@ -60,8 +62,8 @@ type monitorT struct { } // New creates new subscription monitor -func New(index string, cli *elasticsearch.Client, opts ...Option) (Monitor, error) { - sm, err := NewSimple(index, cli, opts...) +func New(index string, esCli, monCli *elasticsearch.Client, opts ...Option) (Monitor, error) { + sm, err := NewSimple(index, esCli, monCli, opts...) if err != nil { return nil, err } @@ -75,7 +77,7 @@ func New(index string, cli *elasticsearch.Client, opts ...Option) (Monitor, erro return m, nil } -func (m *monitorT) GetCheckpoint() int64 { +func (m *monitorT) GetCheckpoint() sqn.SeqNo { return m.sm.GetCheckpoint() } diff --git a/internal/pkg/monitor/subscription_monitor_integration_test.go b/internal/pkg/monitor/subscription_monitor_integration_test.go index d24a894cf..22d226a33 100644 --- a/internal/pkg/monitor/subscription_monitor_integration_test.go +++ b/internal/pkg/monitor/subscription_monitor_integration_test.go @@ -10,7 +10,6 @@ import ( "context" "sync" "testing" - "time" "github.com/google/go-cmp/cmp" "github.com/stretchr/testify/require" @@ -38,8 +37,7 @@ func TestMonitorNonEmptyIndex(t *testing.T) { func runMonitorTest(t *testing.T, ctx context.Context, index string, bulker bulk.Bulk) { readyCh := make(chan error) - mon, err := New(index, bulker.Client(), - WithCheckInterval(testMonitorIntervalMS*time.Millisecond), + mon, err := New(index, bulker.Client(), bulker.Client(), WithReadyChan(readyCh), ) require.NoError(t, err) diff --git a/internal/pkg/policy/monitor_integration_test.go b/internal/pkg/policy/monitor_integration_test.go index 5983b8ab3..6bbb83b7f 100644 --- a/internal/pkg/policy/monitor_integration_test.go +++ b/internal/pkg/policy/monitor_integration_test.go @@ -22,8 +22,6 @@ import ( ftesting "github.com/elastic/fleet-server/v7/internal/pkg/testing" ) -const testMonitorIntervalMS = 100 - func setupIndex(ctx context.Context, t *testing.T) (string, bulk.Bulk) { index, bulker := ftesting.SetupIndexWithBulk(ctx, t, es.MappingPolicy) return index, bulker @@ -34,7 +32,7 @@ func TestMonitor_Integration(t *testing.T) { defer cancel() index, bulker := setupIndex(ctx, t) - im, err := monitor.New(index, bulker.Client(), monitor.WithCheckInterval(testMonitorIntervalMS)) + im, err := monitor.New(index, bulker.Client(), bulker.Client()) if err != nil { t.Fatal(err) } diff --git a/internal/pkg/policy/monitor_test.go b/internal/pkg/policy/monitor_test.go index b67bd45e4..7057aaf8d 100644 --- a/internal/pkg/policy/monitor_test.go +++ b/internal/pkg/policy/monitor_test.go @@ -160,6 +160,7 @@ func TestMonitor_SamePolicy(t *testing.T) { gotPolicy := false tm := time.NewTimer(1 * time.Second) + defer tm.Stop() select { case <-s.Output(): gotPolicy = true @@ -233,6 +234,7 @@ func TestMonitor_NewPolicyUncoordinated(t *testing.T) { gotPolicy := false tm := time.NewTimer(1 * time.Second) + defer tm.Stop() select { case <-s.Output(): gotPolicy = true diff --git a/internal/pkg/sqn/sqn.go b/internal/pkg/sqn/sqn.go index a2d2def60..d9832fe4f 100644 --- a/internal/pkg/sqn/sqn.go +++ b/internal/pkg/sqn/sqn.go @@ -11,6 +11,8 @@ import ( const UndefinedSeqNo = -1 +var DefaultSeqNo = []int64{UndefinedSeqNo} + // Abstracts the array of document seq numbers type SeqNo []int64 @@ -25,9 +27,20 @@ func (s SeqNo) IsSet() bool { return len(s) > 0 && s[0] >= 0 } -func (s SeqNo) Get(idx int) int64 { - if idx < len(s) { - return s[idx] +// Returns one/first value until we get and API to get the next checkpoints on search +func (s SeqNo) Value() int64 { + if len(s) == 0 { + return UndefinedSeqNo + } + return s[0] +} + +func (s SeqNo) Clone() SeqNo { + if s == nil { + return nil } - return UndefinedSeqNo + + r := make(SeqNo, len(s)) + copy(r, s) + return r } diff --git a/internal/pkg/testing/esutil/bootstrap.go b/internal/pkg/testing/esutil/bootstrap.go index 535f201d7..f8242971b 100644 --- a/internal/pkg/testing/esutil/bootstrap.go +++ b/internal/pkg/testing/esutil/bootstrap.go @@ -22,13 +22,7 @@ type indexConfig struct { var indexConfigs = map[string]indexConfig{ // Commenting out the boostrapping for now here, just in case if it needs to be "enabled" again. // Will remove all the boostrapping code completely later once all is fully integrated - ".fleet-actions": {mapping: es.MappingAction}, - ".fleet-actions-results": {mapping: es.MappingActionResult, datastream: true}, - ".fleet-agents": {mapping: es.MappingAgent}, - ".fleet-enrollment-api-keys": {mapping: es.MappingEnrollmentApiKey}, - ".fleet-policies": {mapping: es.MappingPolicy}, - ".fleet-policies-leader": {mapping: es.MappingPolicyLeader}, - ".fleet-servers": {mapping: es.MappingServer}, + ".fleet-actions-results": {mapping: es.MappingActionResult, datastream: true}, } // Bootstrap creates .fleet-actions data stream From 917ffbeb9863abda691e4193361f758464297201 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Mon, 19 Apr 2021 12:13:55 +0000 Subject: [PATCH 064/240] Update fleet server monitor logging (#247) (#248) (cherry picked from commit cb8aa6c08b238632d09241327fc164ae9bff3f69) Co-authored-by: Aleksandr Maus --- internal/pkg/monitor/monitor.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/pkg/monitor/monitor.go b/internal/pkg/monitor/monitor.go index 2dd8ac1f3..e109f83ee 100644 --- a/internal/pkg/monitor/monitor.go +++ b/internal/pkg/monitor/monitor.go @@ -237,14 +237,14 @@ func (m *simpleMonitorT) Run(ctx context.Context) (err error) { if err != nil { if errors.Is(err, es.ErrIndexNotFound) { // Wait until created - m.log.Info().Msgf("index not found, try again in %v", retryDelay) + m.log.Debug().Msgf("index not found, poll again in %v", retryDelay) } else if errors.Is(err, es.ErrTimeout) { // Timed out, wait again - m.log.Debug().Msg("wait global checkpoints advance, timeout, wait again") + m.log.Debug().Msg("timeout on global checkpoints advance, poll again") continue } else { // Log the error and keep trying - m.log.Error().Err(err).Msg("failed waiting global checkpoints advance") + m.log.Error().Err(err).Msg("failed on waiting for global checkpoints advance") } // Delay next attempt From f69cbc321d2e4ca8679b89c3ff53acf7466e77b0 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Mon, 19 Apr 2021 18:08:41 +0000 Subject: [PATCH 065/240] Validate Elasticsearch version for compatibility (#249) (#250) (cherry picked from commit df6eb877c04ac6d44fc304ebd2fb12d6193d0ea1) Co-authored-by: Aleksandr Maus --- cmd/fleet/main.go | 7 +++ internal/pkg/es/info.go | 50 ++++++++++++++++++ internal/pkg/ver/check.go | 87 ++++++++++++++++++++++++++++++++ internal/pkg/ver/check_test.go | 92 ++++++++++++++++++++++++++++++++++ 4 files changed, 236 insertions(+) create mode 100644 internal/pkg/es/info.go create mode 100644 internal/pkg/ver/check.go create mode 100644 internal/pkg/ver/check_test.go diff --git a/cmd/fleet/main.go b/cmd/fleet/main.go index 64d6ccc87..1ae820305 100644 --- a/cmd/fleet/main.go +++ b/cmd/fleet/main.go @@ -29,6 +29,7 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/reload" "github.com/elastic/fleet-server/v7/internal/pkg/signal" "github.com/elastic/fleet-server/v7/internal/pkg/status" + "github.com/elastic/fleet-server/v7/internal/pkg/ver" "github.com/elastic/elastic-agent-client/v7/pkg/client" "github.com/elastic/elastic-agent-client/v7/pkg/proto" @@ -511,6 +512,12 @@ func (f *FleetServer) runServer(ctx context.Context, cfg *config.Config) (err er return err } + // Check version compatibility with Elasticsearch + err = ver.CheckCompatibility(ctx, esCli, f.ver) + if err != nil { + return fmt.Errorf("failed version compatibility check with elasticsearch: %w", err) + } + // Monitoring es client, longer timeout, no retries monCli, err := es.NewClient(ctx, cfg, true) if err != nil { diff --git a/internal/pkg/es/info.go b/internal/pkg/es/info.go new file mode 100644 index 000000000..46fe4df21 --- /dev/null +++ b/internal/pkg/es/info.go @@ -0,0 +1,50 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package es + +import ( + "context" + "encoding/json" + "strings" + + "github.com/elastic/go-elasticsearch/v8" +) + +type versionInfo struct { + Number string `json:"number"` +} + +type infoResponse struct { + Version versionInfo `json:"version"` + Error ErrorT `json:"error,omitempty"` +} + +func FetchESVersion(ctx context.Context, esCli *elasticsearch.Client) (version string, err error) { + res, err := esCli.Info( + esCli.Info.WithContext(ctx), + ) + + if err != nil { + return + } + defer res.Body.Close() + + var sres infoResponse + + err = json.NewDecoder(res.Body).Decode(&sres) + if err != nil { + return + } + + // Check error + err = TranslateError(res.StatusCode, sres.Error) + if err != nil { + return + } + + verStr := strings.TrimSpace(strings.TrimSuffix(strings.ToLower(sres.Version.Number), "-snapshot")) + + return verStr, nil +} diff --git a/internal/pkg/ver/check.go b/internal/pkg/ver/check.go new file mode 100644 index 000000000..10e8f85b2 --- /dev/null +++ b/internal/pkg/ver/check.go @@ -0,0 +1,87 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package ver + +import ( + "context" + "errors" + "fmt" + "strconv" + "strings" + + esh "github.com/elastic/fleet-server/v7/internal/pkg/es" + + "github.com/elastic/go-elasticsearch/v8" + "github.com/hashicorp/go-version" + "github.com/rs/zerolog/log" +) + +var ( + ErrUnsupportedVersion = errors.New("unsupported version") + ErrMalformedVersion = errors.New("malformed version") +) + +func CheckCompatibility(ctx context.Context, esCli *elasticsearch.Client, fleetVersion string) error { + log.Debug().Str("fleet_version", fleetVersion).Msg("check version compatibility with elasticsearch") + + esVersion, err := esh.FetchESVersion(ctx, esCli) + + if err != nil { + log.Error().Err(err).Msg("failed to fetch elasticsearch version") + return err + } + log.Debug().Str("elasticsearch_version", esVersion).Msg("fetched elasticsearch version") + + return checkCompatibility(fleetVersion, esVersion) +} + +func checkCompatibility(fleetVersion, esVersion string) error { + verConst, err := buildVersionConstraint(fleetVersion) + if err != nil { + log.Error().Err(err).Str("fleet_version", fleetVersion).Msg("failed to build constraint") + return err + } + + ver, err := parseVersion(esVersion) + if err != nil { + return err + } + + if !verConst.Check(ver) { + log.Error().Err(ErrUnsupportedVersion).Msg("failed elasticsearch version check") + return ErrUnsupportedVersion + } + log.Info().Str("fleet_version", fleetVersion).Str("elasticsearch_version", esVersion).Msg("versions are compatible") + return nil +} + +func buildVersionConstraint(fleetVersion string) (version.Constraints, error) { + ver, err := parseVersion(fleetVersion) + if err != nil { + return nil, err + } + return version.NewConstraint(fmt.Sprintf(">= %s", minimizePatch(ver))) +} + +func minimizePatch(ver *version.Version) string { + segments := ver.Segments() + if len(segments) > 2 { + segments = segments[:2] + } + segments = append(segments, 0) + segStrs := make([]string, 0, len(segments)) + for _, segment := range segments { + segStrs = append(segStrs, strconv.Itoa(segment)) + } + return strings.Join(segStrs, ".") +} + +func parseVersion(sver string) (*version.Version, error) { + ver, err := version.NewVersion(sver) + if err != nil { + return nil, fmt.Errorf("%v: %w", err, ErrMalformedVersion) + } + return ver, nil +} diff --git a/internal/pkg/ver/check_test.go b/internal/pkg/ver/check_test.go new file mode 100644 index 000000000..03f49fc7a --- /dev/null +++ b/internal/pkg/ver/check_test.go @@ -0,0 +1,92 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package ver + +import ( + "errors" + "testing" +) + +func TestCheckCompatibilityInternal(t *testing.T) { + tests := []struct { + name string + fleetVersion string + esVersion string + err error + }{ + { + name: "empty fleet and elasticsearch version", + fleetVersion: "", + esVersion: "", + err: ErrMalformedVersion, + }, + { + name: "empty fleet version", + fleetVersion: "", + esVersion: "8.0.0", + err: ErrMalformedVersion, + }, + { + name: "empty elasticsearch version", + fleetVersion: "7.13", + esVersion: "", + err: ErrMalformedVersion, + }, + { + name: "supported elasticsearch 713-713", + fleetVersion: "7.13.0", + esVersion: "7.13.0", + err: nil, + }, + { + name: "supported elasticsearch 7131-7132", + fleetVersion: "7.13.2", + esVersion: "7.13.1", + err: nil, + }, + { + name: "supported elasticsearch 713-714", + fleetVersion: "7.13.2", + esVersion: "7.14.2", + err: nil, + }, + { + name: "supported elasticsearch 715-800", + fleetVersion: "7.15.2", + esVersion: "8.0.0", + err: nil, + }, + { + name: "unsupported elasticsearch 714-713", + fleetVersion: "7.14.0", + esVersion: "7.13.1", + err: ErrUnsupportedVersion, + }, + { + name: "unsupported elasticsearch 800-718", + fleetVersion: "8.0.0", + esVersion: "7.18.0", + err: ErrUnsupportedVersion, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + err := checkCompatibility(tc.fleetVersion, tc.esVersion) + if tc.err != nil { + if err == nil { + t.Error("expected error") + } else { + if !errors.Is(err, tc.err) { + t.Errorf("unexpected error kind: %v", err) + } + } + } else { + if err != nil { + t.Error("unexpected error") + } + } + }) + } +} From f407332ade0cfd78651cab2df0bcb8354580eea3 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 20 Apr 2021 17:52:57 +0000 Subject: [PATCH 066/240] Add script to avoid race condition on updating policy metadata in agent record (#257) (cherry picked from commit 36455b5e34a28e962459cfe95419be681139a776) Co-authored-by: Sean Cunningham --- cmd/fleet/handleAck.go | 117 ++++++++++++++++++++---------------- cmd/fleet/handleAck_test.go | 39 ++++++++++++ internal/pkg/bulk/bulk.go | 23 ++++++- internal/pkg/bulk/multi.go | 2 +- internal/pkg/bulk/opt.go | 9 ++- 5 files changed, 134 insertions(+), 56 deletions(-) create mode 100644 cmd/fleet/handleAck_test.go diff --git a/cmd/fleet/handleAck.go b/cmd/fleet/handleAck.go index ba399d952..608c91b0e 100644 --- a/cmd/fleet/handleAck.go +++ b/cmd/fleet/handleAck.go @@ -5,11 +5,13 @@ package fleet import ( + "bytes" "context" "encoding/json" "errors" "io/ioutil" "net/http" + "strconv" "strings" "time" @@ -199,34 +201,26 @@ func (ack *AckT) handlePolicyChange(ctx context.Context, agent *model.Agent, act } } - if found { - updates := make([]bulk.BulkOp, 0, 1) - fields := map[string]interface{}{ - dl.FieldPolicyRevisionIdx: currRev, - dl.FieldPolicyCoordinatorIdx: currCoord, - } - fields[dl.FieldUpdatedAt] = time.Now().UTC().Format(time.RFC3339) - - source, err := json.Marshal(map[string]interface{}{ - "doc": fields, - }) - if err != nil { - return err - } - - updates = append(updates, bulk.BulkOp{ - Id: agent.Id, - Body: source, - Index: dl.FleetAgents, - }) - - err = ack.bulk.MUpdate(ctx, updates, bulk.WithRefresh()) - if err != nil { - return err - } + if !found { + return nil } - return nil + body := makeUpdatePolicyBody( + agent.PolicyId, + currRev, + currCoord, + ) + + err := ack.bulk.Update( + ctx, + dl.FleetAgents, + agent.Id, + body, + bulk.WithRefresh(), + bulk.WithRetryOnConflict(3), + ) + + return err } func (ack *AckT) handleUnenroll(ctx context.Context, agent *model.Agent) error { @@ -237,52 +231,35 @@ func (ack *AckT) handleUnenroll(ctx context.Context, agent *model.Agent) error { } } - updates := make([]bulk.BulkOp, 0, 1) now := time.Now().UTC().Format(time.RFC3339) - fields := map[string]interface{}{ + doc := bulk.UpdateFields{ dl.FieldActive: false, dl.FieldUnenrolledAt: now, dl.FieldUpdatedAt: now, } - source, err := json.Marshal(map[string]interface{}{ - "doc": fields, - }) + body, err := doc.Marshal() if err != nil { return err } - updates = append(updates, bulk.BulkOp{ - Id: agent.Id, - Body: source, - Index: dl.FleetAgents, - }) - - return ack.bulk.MUpdate(ctx, updates, bulk.WithRefresh()) + return ack.bulk.Update(ctx, dl.FleetAgents, agent.Id, body, bulk.WithRefresh()) } func (ack *AckT) handleUpgrade(ctx context.Context, agent *model.Agent) error { - updates := make([]bulk.BulkOp, 0, 1) + now := time.Now().UTC().Format(time.RFC3339) - fields := map[string]interface{}{ - dl.FieldUpgradedAt: now, + doc := bulk.UpdateFields{ dl.FieldUpgradeStartedAt: nil, + dl.FieldUpgradedAt: now, } - source, err := json.Marshal(map[string]interface{}{ - "doc": fields, - }) + body, err := doc.Marshal() if err != nil { return err } - updates = append(updates, bulk.BulkOp{ - Id: agent.Id, - Body: source, - Index: dl.FleetAgents, - }) - - return ack.bulk.MUpdate(ctx, updates, bulk.WithRefresh()) + return ack.bulk.Update(ctx, dl.FleetAgents, agent.Id, body, bulk.WithRefresh()) } func _getAPIKeyIDs(agent *model.Agent) []string { @@ -295,3 +272,41 @@ func _getAPIKeyIDs(agent *model.Agent) []string { } return keys } + +// Generate an update script that validates that the policy_id +// has not changed underneath us by an upstream process (Kibana or otherwise). +// We have a race condition where a user could have assigned a new policy to +// an agent while we were busy updating the old one. A blind update to the +// agent record without a check could set the revision and coordIdx for the wrong +// policy. This script should be coupled with a "retry_on_conflict" parameter +// to allow for *other* changes to the agent record while we running the script. +// (For example, say the background bulk check-in timestamp update task fires) +// +// WARNING: This assumes the input data is sanitized. + +const kUpdatePolicyPrefix = `{"script":{"lang":"painless","source":"if (ctx._source.policy_id == params.id) {ctx._source.` + + dl.FieldPolicyRevisionIdx + + ` = params.rev;ctx._source.` + + dl.FieldPolicyCoordinatorIdx + + `= params.coord;ctx._source.` + + dl.FieldUpdatedAt + + ` = params.ts;} else {ctx.op = \"noop\";}","params": {"id":"` + +func makeUpdatePolicyBody(policyId string, newRev, coordIdx int64) []byte { + + var buf bytes.Buffer + buf.Grow(384) + + // Not pretty, but fast. + buf.WriteString(kUpdatePolicyPrefix) + buf.WriteString(policyId) + buf.WriteString(`","rev":`) + buf.WriteString(strconv.FormatInt(newRev, 10)) + buf.WriteString(`,"coord":`) + buf.WriteString(strconv.FormatInt(coordIdx, 10)) + buf.WriteString(`,"ts":"`) + buf.WriteString(time.Now().UTC().Format(time.RFC3339)) + buf.WriteString(`"}}}`) + + return buf.Bytes() +} diff --git a/cmd/fleet/handleAck_test.go b/cmd/fleet/handleAck_test.go new file mode 100644 index 000000000..ce6135406 --- /dev/null +++ b/cmd/fleet/handleAck_test.go @@ -0,0 +1,39 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fleet + +import ( + "testing" + + "encoding/json" +) + +func BenchmarkMakeUpdatePolicyBody(b *testing.B) { + b.ReportAllocs() + + const policyId = "ed110be4-c2a0-42b8-adc0-94c2f0569207" + const newRev = 2 + const coord = 1 + + for n := 0; n < b.N; n++ { + makeUpdatePolicyBody(policyId, newRev, coord) + } +} + +func TestMakeUpdatePolicyBody(t *testing.T) { + + const policyId = "ed110be4-c2a0-42b8-adc0-94c2f0569207" + const newRev = 2 + const coord = 1 + + data := makeUpdatePolicyBody(policyId, newRev, coord) + + var i interface{} + err := json.Unmarshal(data, &i) + + if err != nil { + t.Fatal(err) + } +} diff --git a/internal/pkg/bulk/bulk.go b/internal/pkg/bulk/bulk.go index f47063028..de563e18c 100644 --- a/internal/pkg/bulk/bulk.go +++ b/internal/pkg/bulk/bulk.go @@ -9,6 +9,7 @@ import ( "context" "encoding/json" "fmt" + "strconv" "time" "github.com/elastic/fleet-server/v7/internal/pkg/config" @@ -579,7 +580,7 @@ func (b *Bulker) waitBulkAction(ctx context.Context, action Action, index, id st const kSlop = 64 buf.Grow(len(body) + kSlop) - if err := b.writeBulkMeta(&buf, action, index, id); err != nil { + if err := b.writeBulkMeta(&buf, action, index, id, opt); err != nil { return nil, err } @@ -722,7 +723,7 @@ func (b *Bulker) writeMget(buf *bytes.Buffer, index, id string) error { return nil } -func (b *Bulker) writeBulkMeta(buf *bytes.Buffer, action Action, index, id string) error { +func (b *Bulker) writeBulkMeta(buf *bytes.Buffer, action Action, index, id string, opts optionsT) error { if err := b.validateMeta(index, id); err != nil { return err } @@ -735,7 +736,11 @@ func (b *Bulker) writeBulkMeta(buf *bytes.Buffer, action Action, index, id strin buf.WriteString(id) buf.WriteString(`",`) } - + if opts.RetryOnConflict > 0 { + buf.WriteString(`"retry_on_conflict":`) + buf.WriteString(strconv.Itoa(opts.RetryOnConflict)) + buf.WriteString(`,`) + } buf.WriteString(`"_index":"`) buf.WriteString(index) buf.WriteString("\"}}\n") @@ -803,3 +808,15 @@ func (b *Bulker) dispatch(ctx context.Context, action Action, opts optionsT, dat return respT{err: ctx.Err()} } + +type UpdateFields map[string]interface{} + +func (u UpdateFields) Marshal() ([]byte, error) { + doc := struct { + Doc map[string]interface{} `json:"doc"` + }{ + u, + } + + return json.Marshal(doc) +} diff --git a/internal/pkg/bulk/multi.go b/internal/pkg/bulk/multi.go index c484f1ba9..704f4ea4d 100644 --- a/internal/pkg/bulk/multi.go +++ b/internal/pkg/bulk/multi.go @@ -28,7 +28,7 @@ func (b *Bulker) multiWaitBulkAction(ctx context.Context, action Action, ops []B var buf bytes.Buffer buf.Grow(len(op.Body) + kSlop) - if err := b.writeBulkMeta(&buf, action, op.Index, op.Id); err != nil { + if err := b.writeBulkMeta(&buf, action, op.Index, op.Id, opt); err != nil { return nil, err } diff --git a/internal/pkg/bulk/opt.go b/internal/pkg/bulk/opt.go index b73aae6a1..c3aa367d7 100644 --- a/internal/pkg/bulk/opt.go +++ b/internal/pkg/bulk/opt.go @@ -12,7 +12,8 @@ import ( // Transaction options type optionsT struct { - Refresh bool + Refresh bool + RetryOnConflict int } type Opt func(*optionsT) @@ -23,6 +24,12 @@ func WithRefresh() Opt { } } +func WithRetryOnConflict(n int) Opt { + return func(opt *optionsT) { + opt.RetryOnConflict = n + } +} + //----- // Bulk API options From eac4e29e047d779aee4cd142ef840e25a4ca39a2 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 20 Apr 2021 18:03:40 +0000 Subject: [PATCH 067/240] Move runtime configuration so that can be configured via agent. (#258) (cherry picked from commit 47e9560283c789c8fd879dd2b459641479d339db) Co-authored-by: Sean Cunningham --- cmd/fleet/main.go | 7 ++++--- example/fleet-server-100.yml | 8 ++++---- fleet-server.yml | 3 --- internal/pkg/config/config.go | 2 -- internal/pkg/config/input.go | 2 ++ 5 files changed, 10 insertions(+), 12 deletions(-) diff --git a/cmd/fleet/main.go b/cmd/fleet/main.go index 1ae820305..cc5d1c56e 100644 --- a/cmd/fleet/main.go +++ b/cmd/fleet/main.go @@ -480,12 +480,13 @@ func loggedRunFunc(ctx context.Context, tag string, runfn runFunc) func() error } func initRuntime(cfg *config.Config) { - if cfg.Runtime.GCPercent != 0 { - old := debug.SetGCPercent(cfg.Runtime.GCPercent) + gcPercent := cfg.Inputs[0].Server.Runtime.GCPercent + if gcPercent != 0 { + old := debug.SetGCPercent(gcPercent) log.Info(). Int("old", old). - Int("new", cfg.Runtime.GCPercent). + Int("new", gcPercent). Msg("SetGCPercent") } } diff --git a/example/fleet-server-100.yml b/example/fleet-server-100.yml index d8f8e87ea..3a8add744 100644 --- a/example/fleet-server-100.yml +++ b/example/fleet-server-100.yml @@ -13,9 +13,6 @@ fleet: agent: id: 1e4954ce-af37-4731-9f4a-407b08e69e42 # Normally provided by the agent; stubbed here. -runtime: - gc_percent: 20 # Force the GC to execute more frequently: see https://golang.org/pkg/runtime/debug/#SetGCPercent - inputs: - cache: num_counters: 2000 # Limit the size of the hash table to rougly 10x expected number of elements @@ -43,7 +40,10 @@ inputs: ssl: enabled: true key: /path/to/key.pem # To support TLS, server needs cert, key pair - certificate: /path/to/cert.pem + certificate: /path/to/cert.pem + runtime: + gc_percent: 20 # Force the GC to execute more frequently: see https://golang.org/pkg/runtime/debug/#SetGCPercent + http: enabled: true # Enable metrics on http://localhost:5066/stats \ No newline at end of file diff --git a/fleet-server.yml b/fleet-server.yml index 4e2ed9618..774009c67 100644 --- a/fleet-server.yml +++ b/fleet-server.yml @@ -11,9 +11,6 @@ fleet: logging: level: '${LOG_LEVEL:DEBUG}' -runtime: - gc_percent: 100 # Overide the golang GC target percentage (see https://golang.org/pkg/runtime/debug/#SetGCPercent) - # Input config provided by the Elastic Agent for the server #inputs: # - type: fleet-server diff --git a/internal/pkg/config/config.go b/internal/pkg/config/config.go index c0d2d3af3..2f636792b 100644 --- a/internal/pkg/config/config.go +++ b/internal/pkg/config/config.go @@ -27,7 +27,6 @@ type Config struct { Inputs []Input `config:"inputs"` Logging Logging `config:"logging"` HTTP HTTP `config:"http"` - Runtime Runtime `config:"runtime"` } // InitDefaults initializes the defaults for the configuration. @@ -35,7 +34,6 @@ func (c *Config) InitDefaults() { c.Inputs = make([]Input, 1) c.Inputs[0].InitDefaults() c.HTTP.InitDefaults() - c.Runtime.InitDefaults() } // Validate ensures that the configuration is valid. diff --git a/internal/pkg/config/input.go b/internal/pkg/config/input.go index ec804a9e5..93d5bfd87 100644 --- a/internal/pkg/config/input.go +++ b/internal/pkg/config/input.go @@ -65,6 +65,7 @@ type Server struct { CompressionLevel int `config:"compression_level"` CompressionThresh int `config:"compression_threshold"` Limits ServerLimits `config:"limits"` + Runtime Runtime `config:"runtime"` } // InitDefaults initializes the defaults for the configuration. @@ -76,6 +77,7 @@ func (c *Server) InitDefaults() { c.CompressionThresh = 1024 c.Profiler.InitDefaults() c.Limits.InitDefaults() + c.Runtime.InitDefaults() } // BindAddress returns the binding address for the HTTP server. From 45e3923ac37cbd5dafc469a5871270fdb1c9e46b Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Wed, 21 Apr 2021 02:48:02 +0000 Subject: [PATCH 068/240] Wait on index when polling global checkpoints (#254) (#259) (cherry picked from commit 2716a45dd64e05fcc49763905fd1ea20168c466d) Co-authored-by: Aleksandr Maus --- internal/pkg/es/fleet_global_checkpoints.go | 11 +++++++++++ internal/pkg/monitor/global_checkpoint.go | 1 + 2 files changed, 12 insertions(+) diff --git a/internal/pkg/es/fleet_global_checkpoints.go b/internal/pkg/es/fleet_global_checkpoints.go index 449cc17e6..9c88979a6 100644 --- a/internal/pkg/es/fleet_global_checkpoints.go +++ b/internal/pkg/es/fleet_global_checkpoints.go @@ -47,6 +47,7 @@ type GlobalCheckpointsRequest struct { Index string WaitForAdvance *bool + WaitForIndex *bool Checkpoints []int64 Timeout time.Duration @@ -77,6 +78,10 @@ func (r GlobalCheckpointsRequest) Do(ctx context.Context, transport esapi.Transp params["wait_for_advance"] = strconv.FormatBool(*r.WaitForAdvance) } + if r.WaitForIndex != nil { + params["wait_for_index"] = strconv.FormatBool(*r.WaitForIndex) + } + if len(r.Checkpoints) > 0 { seqNo := sqn.SeqNo(r.Checkpoints) params["checkpoints"] = seqNo.String() @@ -151,6 +156,12 @@ func (f GlobalCheckpoints) WithWaitForAdvance(v bool) func(*GlobalCheckpointsReq } } +func (f GlobalCheckpoints) WithWaitForIndex(v bool) func(*GlobalCheckpointsRequest) { + return func(r *GlobalCheckpointsRequest) { + r.WaitForIndex = &v + } +} + func (f GlobalCheckpoints) WithCheckpoints(checkpoints []int64) func(*GlobalCheckpointsRequest) { return func(r *GlobalCheckpointsRequest) { r.Checkpoints = checkpoints diff --git a/internal/pkg/monitor/global_checkpoint.go b/internal/pkg/monitor/global_checkpoint.go index 56bd598c8..9d08024aa 100644 --- a/internal/pkg/monitor/global_checkpoint.go +++ b/internal/pkg/monitor/global_checkpoint.go @@ -53,6 +53,7 @@ func waitCheckpointAdvance(ctx context.Context, es *elasticsearch.Client, index req.WithIndex(index), req.WithCheckpoints(checkpoint), req.WithWaitForAdvance(true), + req.WithWaitForIndex(true), req.WithTimeout(to), ) From 5fd10c49f88c13e97a3de01bc0f6b81e4e28720e Mon Sep 17 00:00:00 2001 From: Pier-Hugues Pellerin Date: Wed, 21 Apr 2021 11:23:33 -0400 Subject: [PATCH 069/240] Bump to 7.14.0 (#262) Bump to 7.14.0 --- main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main.go b/main.go index 5deedb5a6..e3932fc2a 100644 --- a/main.go +++ b/main.go @@ -19,7 +19,7 @@ import ( "github.com/elastic/fleet-server/v7/cmd/fleet" ) -const defaultVersion = "7.13.0" +const defaultVersion = "7.14.0" var ( Version string = defaultVersion From dbde638d195247cfab0d43a7eed0dd6bc5d4737e Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Fri, 23 Apr 2021 12:23:47 +0000 Subject: [PATCH 070/240] Bump elastic stack version automation (#252) (#265) (cherry picked from commit 13681c45439171f24dbb9fa91545b339db7de4a8) # Conflicts: # .mergify.yml Co-authored-by: Victor Martinez --- .ci/bump-stack-version.sh | 40 +++++++++++++++++++++ .gitignore | 2 ++ .mergify.yml | 74 +++++++++++++++++++++++++++++++++++++++ 3 files changed, 116 insertions(+) create mode 100755 .ci/bump-stack-version.sh create mode 100644 .mergify.yml diff --git a/.ci/bump-stack-version.sh b/.ci/bump-stack-version.sh new file mode 100755 index 000000000..2d61ca8a9 --- /dev/null +++ b/.ci/bump-stack-version.sh @@ -0,0 +1,40 @@ +#!/usr/bin/env bash +# +# Given the stack version this script will bump the version. +# +# This script is executed by the automation we are putting in place +# and it requires the git add/commit commands. +# +# Parameters: +# $1 -> the version to be bumped. Mandatory. +# $2 -> whether to create a branch where to commit the changes to. +# this is required when reusing an existing Pull Request. +# Optional. Default true. +# +set -euo pipefail +MSG="parameter missing." +VERSION=${1:?$MSG} +CREATE_BRANCH=${2:-true} + +OS=$(uname -s| tr '[:upper:]' '[:lower:]') + +if [ "${OS}" == "darwin" ] ; then + SED="sed -i .bck" +else + SED="sed -i" +fi + +echo "Update stack with version ${VERSION}" +${SED} -E -e "s#(ELASTICSEARCH_VERSION)=[0-9]+\.[0-9]+\.[0-9]+(-[a-f0-9]{8})?#\1=${VERSION}#g" dev-tools/integration/.env + +echo "Commit changes" +if [ "$CREATE_BRANCH" = "true" ]; then + git checkout -b "update-stack-version-$(date "+%Y%m%d%H%M%S")" +else + echo "Branch creation disabled." +fi +git add dev-tools/integration/.env +git diff --staged --quiet || git commit -m "bump stack version ${VERSION}" +git --no-pager log -1 + +echo "You can now push and create a Pull Request" diff --git a/.gitignore b/.gitignore index 30820b819..2f491dea8 100644 --- a/.gitignore +++ b/.gitignore @@ -11,3 +11,5 @@ fleet_server fleet-server.dev.yml *.log *.log.* + +dev-tools/integration/.env.bck \ No newline at end of file diff --git a/.mergify.yml b/.mergify.yml new file mode 100644 index 000000000..aaa5eb576 --- /dev/null +++ b/.mergify.yml @@ -0,0 +1,74 @@ +pull_request_rules: + - name: ask to resolve conflict + conditions: + - conflict + actions: + comment: + message: | + This pull request is now in conflicts. Could you fix it @{{author}}? 🙏 + To fixup this pull request, you can check out it locally. See documentation: https://help.github.com/articles/checking-out-pull-requests-locally/ + ``` + git fetch upstream + git checkout -b {{head}} upstream/{{head}} + git merge upstream/{{base}} + git push upstream {{head}} + ``` + - name: backport patches to 7.x branch + conditions: + - merged + - base=master + - label=v7.14.0 + actions: + backport: + assignees: + - "{{ author }}" + branches: + - "7.x" + labels: + - "backport" + - name: backport patches to 7.13 branch + conditions: + - merged + - base=master + - label=v7.13.0 + actions: + backport: + assignees: + - "{{ author }}" + branches: + - "7.13" + labels: + - "backport" + - name: backport patches to 7.12 branch + conditions: + - merged + - base=master + - label=v7.12.0 + actions: + backport: + assignees: + - "{{ author }}" + branches: + - "7.12" + labels: + - "backport" + - name: automatic merge for 7\. branches when CI passes + conditions: + - check-success=fleet-server/pr-merge + - check-success=CLA + - base~=^7\. + - label=backport + - author=mergify[bot] + actions: + merge: + method: squash + strict: smart+fasttrack + - name: automatic merge when CI passes and the file dev-tools/integration/.env is modified. + conditions: + - check-success=fleet-server/pr-merge + - label=automation + - files~=^dev-tools/integration/.env$ + actions: + merge: + method: squash + strict: smart+fasttrack From c60f83236375aa9f57b6d1fa3c1e442fe1899ee2 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Fri, 23 Apr 2021 14:24:19 +0000 Subject: [PATCH 071/240] Improve log message about degraded being okay during bootstrap. (#268) (#269) (cherry picked from commit 94ee14ca749de181ae57622d9bd67aa7ffe598d9) Co-authored-by: Blake Rouse --- internal/pkg/policy/self.go | 2 +- internal/pkg/policy/self_test.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/pkg/policy/self.go b/internal/pkg/policy/self.go index 39885eada..ce969407f 100644 --- a/internal/pkg/policy/self.go +++ b/internal/pkg/policy/self.go @@ -212,7 +212,7 @@ func (m *selfMonitorT) updateStatus(ctx context.Context) (proto.StateObserved_St var payload map[string]interface{} if m.fleet.Agent.ID == "" { status = proto.StateObserved_DEGRADED - extendMsg = "; missing config fleet.agent.id" + extendMsg = "; missing config fleet.agent.id (expected during bootstrap process)" // Elastic Agent has not been enrolled; Fleet Server passes back the enrollment token so the Elastic Agent // can perform enrollment. diff --git a/internal/pkg/policy/self_test.go b/internal/pkg/policy/self_test.go index 5311c4ac4..2a4dfefd2 100644 --- a/internal/pkg/policy/self_test.go +++ b/internal/pkg/policy/self_test.go @@ -262,7 +262,7 @@ func TestSelfMonitor_DefaultPolicy_Degraded(t *testing.T) { if status != proto.StateObserved_DEGRADED { return fmt.Errorf("should be reported as degraded; instead its %s", status) } - if msg != "Running on default policy with Fleet Server integration; missing config fleet.agent.id" { + if msg != "Running on default policy with Fleet Server integration; missing config fleet.agent.id (expected during bootstrap process)" { return fmt.Errorf("should be matching with default policy") } if payload == nil { @@ -520,7 +520,7 @@ func TestSelfMonitor_SpecificPolicy_Degraded(t *testing.T) { if status != proto.StateObserved_DEGRADED { return fmt.Errorf("should be reported as degraded; instead its %s", status) } - if msg != fmt.Sprintf("Running on policy with Fleet Server integration: %s; missing config fleet.agent.id", policyId) { + if msg != fmt.Sprintf("Running on policy with Fleet Server integration: %s; missing config fleet.agent.id (expected during bootstrap process)", policyId) { return fmt.Errorf("should be matching with specific policy") } if payload == nil { From bc6af80c955599e86ccc044ece6db5668b815171 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 27 Apr 2021 11:58:13 +0000 Subject: [PATCH 072/240] [mergify]: delete branch related to automation after it gets merged and commit messages (#276) (#281) (cherry picked from commit 541def17912865ad74828eec5c150617a0d75691) Co-authored-by: Victor Martinez --- .ci/bump-stack-version.sh | 5 +++-- .ci/jobs/fleet-server.yml | 1 + .mergify.yml | 8 ++++++++ 3 files changed, 12 insertions(+), 2 deletions(-) diff --git a/.ci/bump-stack-version.sh b/.ci/bump-stack-version.sh index 2d61ca8a9..7caaefdf6 100755 --- a/.ci/bump-stack-version.sh +++ b/.ci/bump-stack-version.sh @@ -29,12 +29,13 @@ ${SED} -E -e "s#(ELASTICSEARCH_VERSION)=[0-9]+\.[0-9]+\.[0-9]+(-[a-f0-9]{8})?#\1 echo "Commit changes" if [ "$CREATE_BRANCH" = "true" ]; then - git checkout -b "update-stack-version-$(date "+%Y%m%d%H%M%S")" + base=$(git rev-parse --abbrev-ref HEAD | sed 's#/#-#g') + git checkout -b "update-stack-version-$(date "+%Y%m%d%H%M%S")-${base}" else echo "Branch creation disabled." fi git add dev-tools/integration/.env -git diff --staged --quiet || git commit -m "bump stack version ${VERSION}" +git diff --staged --quiet || git commit -m "[Automation] Update elastic stack version to ${VERSION} for testing" git --no-pager log -1 echo "You can now push and create a Pull Request" diff --git a/.ci/jobs/fleet-server.yml b/.ci/jobs/fleet-server.yml index 1379e8670..fbb3df579 100644 --- a/.ci/jobs/fleet-server.yml +++ b/.ci/jobs/fleet-server.yml @@ -13,6 +13,7 @@ discover-pr-forks-trust: permission discover-pr-origin: merge-current discover-tags: true + head-filter-regex: '^(?!update-stack-version).*$' notification-context: 'fleet-server' repo: fleet-server repo-owner: elastic diff --git a/.mergify.yml b/.mergify.yml index aaa5eb576..b5355bf98 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -72,3 +72,11 @@ pull_request_rules: merge: method: squash strict: smart+fasttrack + - name: delete upstream branch after merging changes on dev-tools/integration/.env + conditions: + - merged + - label=automation + - base~=^update-stack-version + - files~=^dev-tools/integration/.env$ + actions: + delete_head_branch: From 5615e97aa832b1fceb66edd95bb3201533e12302 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 27 Apr 2021 15:12:58 +0000 Subject: [PATCH 073/240] Remove the UUID checks. (#286) (#287) (cherry picked from commit 868367ac2f62ceed6e79d8bd5e493b5618e63406) Co-authored-by: Blake Rouse --- internal/pkg/policy/monitor.go | 4 ---- internal/pkg/policy/revision.go | 5 ----- 2 files changed, 9 deletions(-) diff --git a/internal/pkg/policy/monitor.go b/internal/pkg/policy/monitor.go index e285f2ce4..42ad76634 100644 --- a/internal/pkg/policy/monitor.go +++ b/internal/pkg/policy/monitor.go @@ -12,7 +12,6 @@ import ( "sync/atomic" "time" - "github.com/gofrs/uuid" "github.com/rs/zerolog" "github.com/rs/zerolog/log" @@ -301,9 +300,6 @@ func (m *monitorT) updatePolicy(pp *ParsedPolicy) []subT { // Subscribe creates a new subscription for a policy update. func (m *monitorT) Subscribe(agentId string, policyId string, revisionIdx int64, coordinatorIdx int64) (Subscription, error) { - if _, err := uuid.FromString(policyId); err != nil { - return nil, errors.New("policyId must be a UUID") - } if revisionIdx < 0 { return nil, errors.New("revisionIdx must be greater than or equal to 0") } diff --git a/internal/pkg/policy/revision.go b/internal/pkg/policy/revision.go index 11ff34380..506ce5462 100644 --- a/internal/pkg/policy/revision.go +++ b/internal/pkg/policy/revision.go @@ -10,8 +10,6 @@ import ( "strings" "github.com/elastic/fleet-server/v7/internal/pkg/model" - - "github.com/gofrs/uuid" ) // Revision is a policy revision that is sent as an action ID to an agent. @@ -39,9 +37,6 @@ func RevisionFromString(actionId string) (Revision, bool) { if split[0] != "policy" { return Revision{}, false } - if _, err := uuid.FromString(split[1]); err != nil { - return Revision{}, false - } revIdx, err := strconv.ParseInt(split[2], 10, 64) if err != nil { return Revision{}, false From b1dfb2c52a7da732d891efee13d315bebc9db5d0 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 27 Apr 2021 18:30:23 +0000 Subject: [PATCH 074/240] Report failure running under Elastic Agent (#290) (#292) * In agent-mode report the failure and restart FleetServer. * Add error to prefix the log message. * Simplify the retry logic. * Fix issues with restarting FleetServer and metrics. * Fix format. (cherry picked from commit 214e1650422393d1b870609950ccd1782a72f8da) Co-authored-by: Blake Rouse --- cmd/fleet/main.go | 25 +++++++++++++++++++++---- cmd/fleet/metrics.go | 10 +++++++--- 2 files changed, 28 insertions(+), 7 deletions(-) diff --git a/cmd/fleet/main.go b/cmd/fleet/main.go index cc5d1c56e..5b059a0b3 100644 --- a/cmd/fleet/main.go +++ b/cmd/fleet/main.go @@ -11,6 +11,7 @@ import ( "os" "runtime/debug" "sync" + "time" "github.com/elastic/go-ucfg" "github.com/elastic/go-ucfg/yaml" @@ -28,6 +29,7 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/profile" "github.com/elastic/fleet-server/v7/internal/pkg/reload" "github.com/elastic/fleet-server/v7/internal/pkg/signal" + "github.com/elastic/fleet-server/v7/internal/pkg/sleep" "github.com/elastic/fleet-server/v7/internal/pkg/status" "github.com/elastic/fleet-server/v7/internal/pkg/ver" @@ -41,7 +43,8 @@ import ( ) const ( - kAgentMode = "agent-mode" + kAgentMode = "agent-mode" + kAgentModeRestartLoopDelay = 2 * time.Second ) func installSignalHandler() context.Context { @@ -245,7 +248,21 @@ func (a *AgentMode) Run(ctx context.Context) error { // trigger startChan so OnConfig can continue a.startChan <- struct{}{} - return a.srv.Run(srvCtx) + // keep trying to restart the FleetServer on failure, reporting + // the status back to Elastic Agent + res := make(chan error) + go func() { + for { + err := a.srv.Run(srvCtx) + if err == nil || err == context.Canceled { + res <- err + return + } + // sleep some before calling Run again + sleep.WithContext(srvCtx, kAgentModeRestartLoopDelay) + } + }() + return <-res } func (a *AgentMode) OnConfig(s string) { @@ -452,9 +469,9 @@ func (f *FleetServer) Run(ctx context.Context) error { select { case newCfg = <-f.cfgCh: - log.Debug().Msg("Server configuration update") + log.Info().Msg("Server configuration update") case err := <-ech: - f.reporter.Status(proto.StateObserved_FAILED, err.Error(), nil) + f.reporter.Status(proto.StateObserved_FAILED, fmt.Sprintf("Error - %s", err), nil) log.Error().Err(err).Msg("Fleet Server failed") return err case <-ctx.Done(): diff --git a/cmd/fleet/metrics.go b/cmd/fleet/metrics.go index a4abb56fd..3287898c1 100644 --- a/cmd/fleet/metrics.go +++ b/cmd/fleet/metrics.go @@ -36,9 +36,12 @@ var ( func (f *FleetServer) initMetrics(ctx context.Context, cfg *config.Config) (*api.Server, error) { registry := monitoring.GetNamespace("info").GetRegistry() - monitoring.NewString(registry, "version").Set(f.ver) - monitoring.NewString(registry, "name").Set("fleet-server") - metrics.SetupMetrics("fleet-server") + if registry.Get("version") == nil { + monitoring.NewString(registry, "version").Set(f.ver) + } + if registry.Get("name") == nil { + monitoring.NewString(registry, "name").Set("fleet-server") + } if !cfg.HTTP.Enabled { return nil, nil @@ -83,6 +86,7 @@ func (rt *routeStats) Register(registry *monitoring.Registry) { } func init() { + metrics.SetupMetrics("fleet-server") registry = monitoring.Default.NewRegistry("http_server") cntHttpNew = monitoring.NewUint(registry, "tcp_open") cntHttpClose = monitoring.NewUint(registry, "tcp_close") From eb0e26ae811bb477b534d0bccb473be58af2baaa Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 29 Apr 2021 01:14:28 -0400 Subject: [PATCH 075/240] [Automation] Update elastic stack version to 7.14.0-76235d63 for testing (#301) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index b56cca78c..64b2e3afa 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=8.0.0-SNAPSHOT +ELASTICSEARCH_VERSION=7.14.0-76235d63-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 994dd6b135a124105e8bd7ee82ae7bee97a1c1bb Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Fri, 30 Apr 2021 01:15:11 +0000 Subject: [PATCH 076/240] Reduce the monitor long poll timeout to 4 minutes by default (#306) (#307) (cherry picked from commit e19463de0702dc5b7fc1b9bd5d156aa69d4f3f07) Co-authored-by: Aleksandr Maus --- internal/pkg/config/monitor.go | 2 +- internal/pkg/monitor/monitor.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/internal/pkg/config/monitor.go b/internal/pkg/config/monitor.go index e88e8e09a..d93837878 100644 --- a/internal/pkg/config/monitor.go +++ b/internal/pkg/config/monitor.go @@ -8,7 +8,7 @@ import "time" const ( defaultFetchSize = 1000 - defaultPollTimeout = 5 * time.Minute + defaultPollTimeout = 4 * time.Minute ) type Monitor struct { diff --git a/internal/pkg/monitor/monitor.go b/internal/pkg/monitor/monitor.go index e109f83ee..8bd4ee27c 100644 --- a/internal/pkg/monitor/monitor.go +++ b/internal/pkg/monitor/monitor.go @@ -24,7 +24,7 @@ import ( ) const ( - defaultPollTimeout = 5 * time.Minute // default long poll timeout + defaultPollTimeout = 4 * time.Minute // default long poll timeout defaultSeqNo = int64(-1) // the _seq_no in elasticsearch start with 0 defaultWithExpiration = false @@ -244,7 +244,7 @@ func (m *simpleMonitorT) Run(ctx context.Context) (err error) { continue } else { // Log the error and keep trying - m.log.Error().Err(err).Msg("failed on waiting for global checkpoints advance") + m.log.Info().Err(err).Msg("failed on waiting for global checkpoints advance") } // Delay next attempt From f4d1ac20648180aafc2b882e69b44d03fc05f6be Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Fri, 30 Apr 2021 01:14:37 -0400 Subject: [PATCH 077/240] [Automation] Update elastic stack version to 7.14.0-c9bfc196 for testing (#310) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 64b2e3afa..814070f1f 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.14.0-76235d63-SNAPSHOT +ELASTICSEARCH_VERSION=7.14.0-c9bfc196-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 345b520470a0bcce96673b6c8e8a645153a21eae Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Fri, 30 Apr 2021 08:52:27 +0000 Subject: [PATCH 078/240] mergify: fix delete_head_branch condition (#312) (#313) Check that the name of "head" matches, rather than "base". (cherry picked from commit e36c0971baefb7a25a4ae2c65fa4b297776316e1) Co-authored-by: Victor Martinez --- .mergify.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.mergify.yml b/.mergify.yml index b5355bf98..8da8ccb87 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -76,7 +76,7 @@ pull_request_rules: conditions: - merged - label=automation - - base~=^update-stack-version + - head~=^update-stack-version - files~=^dev-tools/integration/.env$ actions: delete_head_branch: From 0803a30813375a3cecc444c35a132e558a9226c5 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 3 May 2021 01:13:30 -0400 Subject: [PATCH 079/240] [Automation] Update elastic stack version to 7.14.0-83b532ee for testing (#316) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 814070f1f..444a80cbd 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.14.0-c9bfc196-SNAPSHOT +ELASTICSEARCH_VERSION=7.14.0-83b532ee-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From eb23e3b7192f083f1757af8c0ad129224bedcbb3 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Tue, 4 May 2021 01:22:36 -0400 Subject: [PATCH 080/240] [Automation] Update elastic stack version to 7.14.0-90e98b59 for testing (#321) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 444a80cbd..beb4cba84 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.14.0-83b532ee-SNAPSHOT +ELASTICSEARCH_VERSION=7.14.0-90e98b59-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 25c227c88e5c7da86a56b6d45f247ffdc930561d Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 4 May 2021 23:09:45 +0000 Subject: [PATCH 081/240] Add error response to be JSON (#324) (#325) * Return proper HTTP error response for Elastic Agent. * Add header. * Add to tests. * Change error message. * Update cmd/fleet/metrics.go Co-authored-by: Nicolas Ruflin Co-authored-by: Nicolas Ruflin (cherry picked from commit d19227c2b6c605f7404bee365ae5af29863684eb) Co-authored-by: Blake Rouse --- cmd/fleet/error.go | 28 +++++++++++++++++++++++ cmd/fleet/handleAck.go | 6 +++-- cmd/fleet/handleArtifacts.go | 6 +++-- cmd/fleet/handleCheckin.go | 6 +++-- cmd/fleet/handleEnroll.go | 8 ++++--- cmd/fleet/metrics.go | 33 ++++++++++++++++++++++++---- cmd/fleet/server_integration_test.go | 25 ++++++++++++++++++--- 7 files changed, 96 insertions(+), 16 deletions(-) create mode 100644 cmd/fleet/error.go diff --git a/cmd/fleet/error.go b/cmd/fleet/error.go new file mode 100644 index 000000000..c51112449 --- /dev/null +++ b/cmd/fleet/error.go @@ -0,0 +1,28 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package fleet + +import ( + "encoding/json" + "net/http" +) + +type errResp struct { + StatusCode int `json:"statusCode"` + Error string `json:"error"` + Message string `json:"message"` +} + +func WriteError(w http.ResponseWriter, code int, errStr string, msg string) error { + data, err := json.Marshal(&errResp{StatusCode: code, Error: errStr, Message: msg}) + if err != nil { + return err + } + w.Header().Set("Content-Type", "application/json; charset=utf-8") + w.Header().Set("X-Content-Type-Options", "nosniff") + w.WriteHeader(code) + w.Write(data) + return nil +} diff --git a/cmd/fleet/handleAck.go b/cmd/fleet/handleAck.go index 608c91b0e..1cf0e8c00 100644 --- a/cmd/fleet/handleAck.go +++ b/cmd/fleet/handleAck.go @@ -54,14 +54,16 @@ func (rt Router) handleAcks(w http.ResponseWriter, r *http.Request, ps httproute err := rt.ack.handleAcks(w, r, id) if err != nil { - code, lvl := cntAcks.IncError(err) + code, str, msg, lvl := cntAcks.IncError(err) log.WithLevel(lvl). Err(err). Int("code", code). Msg("Fail ACK") - http.Error(w, "", code) + if err := WriteError(w, code, str, msg); err != nil { + log.Error().Err(err).Msg("fail writing error response") + } } } diff --git a/cmd/fleet/handleArtifacts.go b/cmd/fleet/handleArtifacts.go index 153862363..47a9f09a7 100644 --- a/cmd/fleet/handleArtifacts.go +++ b/cmd/fleet/handleArtifacts.go @@ -92,7 +92,7 @@ func (rt Router) handleArtifacts(w http.ResponseWriter, r *http.Request, ps http } if err != nil { - code, lvl := cntArtifacts.IncError(err) + code, str, msg, lvl := cntArtifacts.IncError(err) zlog.WithLevel(lvl). Err(err). @@ -101,7 +101,9 @@ func (rt Router) handleArtifacts(w http.ResponseWriter, r *http.Request, ps http Dur("rtt", time.Since(start)). Msg("Fail handle artifact") - http.Error(w, "", code) + if err := WriteError(w, code, str, msg); err != nil { + log.Error().Err(err).Msg("fail writing error response") + } } } diff --git a/cmd/fleet/handleCheckin.go b/cmd/fleet/handleCheckin.go index d1a7e4dda..11d63075a 100644 --- a/cmd/fleet/handleCheckin.go +++ b/cmd/fleet/handleCheckin.go @@ -49,7 +49,7 @@ func (rt Router) handleCheckin(w http.ResponseWriter, r *http.Request, ps httpro err := rt.ct._handleCheckin(w, r, id, rt.bulker) if err != nil { - code, lvl := cntCheckin.IncError(err) + code, str, msg, lvl := cntCheckin.IncError(err) // Log this as warn for visibility that limit has been reached. // This allows customers to tune the configuration on detection of threshold. @@ -63,7 +63,9 @@ func (rt Router) handleCheckin(w http.ResponseWriter, r *http.Request, ps httpro Int("code", code). Msg("fail checkin") - http.Error(w, "", code) + if err := WriteError(w, code, str, msg); err != nil { + log.Error().Err(err).Msg("fail writing error response") + } } } diff --git a/cmd/fleet/handleEnroll.go b/cmd/fleet/handleEnroll.go index 06ff76d28..37ac6a9b9 100644 --- a/cmd/fleet/handleEnroll.go +++ b/cmd/fleet/handleEnroll.go @@ -75,7 +75,7 @@ func (rt Router) handleEnroll(w http.ResponseWriter, r *http.Request, ps httprou data, err := rt.et.handleEnroll(r) if err != nil { - code, lvl := cntEnroll.IncError(err) + code, str, msg, lvl := cntEnroll.IncError(err) log.WithLevel(lvl). Err(err). @@ -84,13 +84,15 @@ func (rt Router) handleEnroll(w http.ResponseWriter, r *http.Request, ps httprou Dur("tdiff", time.Since(start)). Msg("Enroll fail") - http.Error(w, "", code) + if err := WriteError(w, code, str, msg); err != nil { + log.Error().Err(err).Msg("fail writing error response") + } return } var numWritten int if numWritten, err = w.Write(data); err != nil { - log.Error().Err(err).Msg("Fail send enroll response") + log.Error().Err(err).Msg("fail send enroll response") } cntEnroll.bodyOut.Add(uint64(numWritten)) diff --git a/cmd/fleet/metrics.go b/cmd/fleet/metrics.go index 3287898c1..26a650661 100644 --- a/cmd/fleet/metrics.go +++ b/cmd/fleet/metrics.go @@ -101,29 +101,50 @@ func init() { } // Increment error metric, log and return code -func (rt *routeStats) IncError(err error) (int, zerolog.Level) { +func (rt *routeStats) IncError(err error) (int, string, string, zerolog.Level) { lvl := zerolog.DebugLevel incFail := true var code int + var errStr string + var msgStr string switch err { case ErrAgentNotFound: + errStr = "AgentNotFound" + msgStr = "agent could not be found" code = http.StatusNotFound lvl = zerolog.WarnLevel case limit.ErrRateLimit: + errStr = "RateLimit" + msgStr = "exceeded the rate limit" code = http.StatusTooManyRequests rt.rateLimit.Inc() incFail = false case limit.ErrMaxLimit: + errStr = "MaxLimit" + msgStr = "exceeded the max limit" code = http.StatusTooManyRequests rt.maxLimit.Inc() incFail = false case context.Canceled: + errStr = "ServiceUnavailable" + msgStr = "server is stopping" code = http.StatusServiceUnavailable rt.drop.Inc() incFail = false + case ErrInvalidUserAgent: + errStr = "InvalidUserAgent" + msgStr = "user-agent is invalid" + code = http.StatusBadRequest + lvl = zerolog.InfoLevel + case ErrUnsupportedVersion: + errStr = "UnsupportedVersion" + msgStr = "version is not supported" + code = http.StatusBadRequest + lvl = zerolog.InfoLevel default: + errStr = "BadRequest" lvl = zerolog.InfoLevel code = http.StatusBadRequest } @@ -132,7 +153,7 @@ func (rt *routeStats) IncError(err error) (int, zerolog.Level) { cntCheckin.failure.Inc() } - return code, lvl + return code, errStr, msgStr, lvl } func (rt *routeStats) IncStart() func() { @@ -153,20 +174,24 @@ func (rt *artifactStats) Register(registry *monitoring.Registry) { rt.throttle = monitoring.NewUint(registry, "throttle") } -func (rt *artifactStats) IncError(err error) (code int, lvl zerolog.Level) { +func (rt *artifactStats) IncError(err error) (code int, str string, msg string, lvl zerolog.Level) { switch err { case dl.ErrNotFound: // Artifact not found indicates a race condition upstream // or an attack on the fleet server. Either way it should // show up in the logs at a higher level than debug code = http.StatusNotFound + str = "NotFound" + msg = "not found" rt.notFound.Inc() lvl = zerolog.WarnLevel case ErrorThrottle: code = http.StatusTooManyRequests + str = "TooManyRequests" + msg = "too many requests" rt.throttle.Inc() default: - code, lvl = rt.routeStats.IncError(err) + code, str, msg, lvl = rt.routeStats.IncError(err) } return diff --git a/cmd/fleet/server_integration_test.go b/cmd/fleet/server_integration_test.go index 9ea09f835..5cd779f49 100644 --- a/cmd/fleet/server_integration_test.go +++ b/cmd/fleet/server_integration_test.go @@ -9,8 +9,8 @@ package fleet import ( "bytes" "context" + "encoding/json" "fmt" - "github.com/elastic/fleet-server/v7/internal/pkg/status" "io/ioutil" "net/http" "path" @@ -28,6 +28,7 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/config" "github.com/elastic/fleet-server/v7/internal/pkg/logger" "github.com/elastic/fleet-server/v7/internal/pkg/sleep" + "github.com/elastic/fleet-server/v7/internal/pkg/status" ftesting "github.com/elastic/fleet-server/v7/internal/pkg/testing" ) @@ -171,7 +172,16 @@ func TestServerUnauthorized(t *testing.T) { } raw, _ := ioutil.ReadAll(res.Body) - diff = cmp.Diff("\n", string(raw)) + var resp errResp + err = json.Unmarshal(raw, &resp) + if err != nil { + t.Fatal(err) + } + diff = cmp.Diff(400, resp.StatusCode) + if diff != "" { + t.Fatal(diff) + } + diff = cmp.Diff("BadRequest", resp.Error) if diff != "" { t.Fatal(diff) } @@ -197,7 +207,16 @@ func TestServerUnauthorized(t *testing.T) { } raw, _ := ioutil.ReadAll(res.Body) - diff = cmp.Diff("\n", string(raw)) + var resp errResp + err = json.Unmarshal(raw, &resp) + if err != nil { + t.Fatal(err) + } + diff = cmp.Diff(400, resp.StatusCode) + if diff != "" { + t.Fatal(diff) + } + diff = cmp.Diff("BadRequest", resp.Error) if diff != "" { t.Fatal(diff) } From 86a8e5fa03514d4ca91a303f979c078eabbc081a Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 6 May 2021 01:14:02 -0400 Subject: [PATCH 082/240] [Automation] Update elastic stack version to 7.14.0-5b4d7537 for testing (#330) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index beb4cba84..3abe65861 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.14.0-90e98b59-SNAPSHOT +ELASTICSEARCH_VERSION=7.14.0-5b4d7537-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 8554255daa73f0354c3071fea45d0dc9b58c0939 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Fri, 7 May 2021 01:16:42 -0400 Subject: [PATCH 083/240] [Automation] Update elastic stack version to 7.14.0-8ba10577 for testing (#334) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 3abe65861..aed98adb2 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.14.0-5b4d7537-SNAPSHOT +ELASTICSEARCH_VERSION=7.14.0-8ba10577-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 607b16958c3f76ae0790a0b88b7fda536d95776f Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 10 May 2021 01:13:24 -0400 Subject: [PATCH 084/240] [Automation] Update elastic stack version to 7.14.0-7d5ea6dc for testing (#337) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index aed98adb2..f63d03e77 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.14.0-8ba10577-SNAPSHOT +ELASTICSEARCH_VERSION=7.14.0-7d5ea6dc-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 74742a3f87fa125236d6e2b5cbb59efa3e4d3db8 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Tue, 11 May 2021 01:16:37 -0400 Subject: [PATCH 085/240] [Automation] Update elastic stack version to 7.14.0-e8048b9e for testing (#339) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index f63d03e77..32f67c857 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.14.0-7d5ea6dc-SNAPSHOT +ELASTICSEARCH_VERSION=7.14.0-e8048b9e-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 91ef4f1ed8cce84c4f9bf44b3ccd081d5c467457 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Wed, 12 May 2021 12:44:40 +0000 Subject: [PATCH 086/240] [7.x](backport #319) Refactor bulk API to minimize allocations and increase speed. (#342) * Refactor bulk API to minimize allocations and increase speed. (cherry picked from commit 5f84562d58d087ede3291db890877d4e8ea30979) * Rename checkin.BulkCheckin to checkin.Bulk nits (cherry picked from commit bdb9296a60aab2e93bdfdb2c9f75e3c0c8dc3a7f) Co-authored-by: Sean Cunningham --- NOTICE.txt | 95 +- cmd/fleet/bulkCheckin.go | 124 -- cmd/fleet/handleArtifacts.go | 2 +- cmd/fleet/handleCheckin.go | 62 +- cmd/fleet/main.go | 3 +- cmd/fleet/schema.go | 5 - cmd/fleet/server_test.go | 3 +- go.mod | 2 + go.sum | 7 +- internal/pkg/bulk/block.go | 81 ++ internal/pkg/bulk/bulk.go | 822 ----------- internal/pkg/bulk/bulk_integration_test.go | 438 ++++++ internal/pkg/bulk/bulk_test.go | 382 +++++ internal/pkg/bulk/engine.go | 429 ++++++ internal/pkg/bulk/helpers.go | 43 + internal/pkg/bulk/multi.go | 103 -- internal/pkg/bulk/opBulk.go | 265 ++++ internal/pkg/bulk/opMulti.go | 127 ++ internal/pkg/bulk/opMulti_integration_test.go | 85 ++ internal/pkg/bulk/opMulti_test.go | 61 + internal/pkg/bulk/opRead.go | 155 ++ internal/pkg/bulk/opSearch.go | 185 +++ internal/pkg/bulk/opt.go | 24 +- internal/pkg/bulk/queue.go | 39 + internal/pkg/bulk/schema.go | 50 +- internal/pkg/bulk/schema_easyjson.go | 1261 +++++++++++++++++ internal/pkg/bulk/setup_test.go | 168 +++ internal/pkg/checkin/bulk.go | 233 +++ internal/pkg/checkin/bulk_test.go | 209 +++ internal/pkg/danger/buf.go | 83 ++ internal/pkg/danger/buf_test.go | 43 + .../pkg/dl/action_results_integration_test.go | 2 +- internal/pkg/dl/constants.go | 2 + internal/pkg/dl/policies.go | 2 +- internal/pkg/dl/policies_leader.go | 2 +- internal/pkg/dl/search.go | 4 +- internal/pkg/es/delete.go | 2 +- internal/pkg/es/error.go | 9 +- internal/pkg/es/info.go | 2 +- internal/pkg/monitor/global_checkpoint.go | 2 +- internal/pkg/monitor/monitor.go | 2 +- internal/pkg/testing/bulk.go | 20 +- internal/pkg/ver/check.go | 6 +- 43 files changed, 4521 insertions(+), 1123 deletions(-) delete mode 100644 cmd/fleet/bulkCheckin.go create mode 100644 internal/pkg/bulk/block.go delete mode 100644 internal/pkg/bulk/bulk.go create mode 100644 internal/pkg/bulk/bulk_integration_test.go create mode 100644 internal/pkg/bulk/bulk_test.go create mode 100644 internal/pkg/bulk/engine.go create mode 100644 internal/pkg/bulk/helpers.go delete mode 100644 internal/pkg/bulk/multi.go create mode 100644 internal/pkg/bulk/opBulk.go create mode 100644 internal/pkg/bulk/opMulti.go create mode 100644 internal/pkg/bulk/opMulti_integration_test.go create mode 100644 internal/pkg/bulk/opMulti_test.go create mode 100644 internal/pkg/bulk/opRead.go create mode 100644 internal/pkg/bulk/opSearch.go create mode 100644 internal/pkg/bulk/queue.go create mode 100644 internal/pkg/bulk/schema_easyjson.go create mode 100644 internal/pkg/bulk/setup_test.go create mode 100644 internal/pkg/checkin/bulk.go create mode 100644 internal/pkg/checkin/bulk_test.go create mode 100644 internal/pkg/danger/buf.go create mode 100644 internal/pkg/danger/buf_test.go diff --git a/NOTICE.txt b/NOTICE.txt index b90a30a41..6f4478d1e 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -9,6 +9,36 @@ Third party libraries used by the Elastic Beats project: ================================================================================ +-------------------------------------------------------------------------------- +Dependency : github.com/Pallinder/go-randomdata +Version: v1.2.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/!pallinder/go-randomdata@v1.2.0/LICENSE: + +The MIT License (MIT) + +Copyright (c) 2013 David Pallinder + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + -------------------------------------------------------------------------------- Dependency : github.com/aleksmaus/generate Version: v0.0.0-20210326194607-c630e07a2742 @@ -2119,6 +2149,23 @@ OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------------------- +Dependency : github.com/mailru/easyjson +Version: v0.7.7 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/mailru/easyjson@v0.7.7/LICENSE: + +Copyright (c) 2016 Mail.Ru Group + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + + -------------------------------------------------------------------------------- Dependency : github.com/miolini/datacounter Version: v1.0.2 @@ -25696,6 +25743,37 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +-------------------------------------------------------------------------------- +Dependency : github.com/josharian/intern +Version: v1.0.0 +Licence type (autodetected): MIT +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/josharian/intern@v1.0.0/license.md: + +MIT License + +Copyright (c) 2019 Josh Bleecher Snyder + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + + -------------------------------------------------------------------------------- Dependency : github.com/jpillora/backoff Version: v1.0.0 @@ -26617,23 +26695,6 @@ ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. --------------------------------------------------------------------------------- -Dependency : github.com/mailru/easyjson -Version: v0.7.1 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/mailru/easyjson@v0.7.1/LICENSE: - -Copyright (c) 2016 Mail.Ru Group - -Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - - -------------------------------------------------------------------------------- Dependency : github.com/markbates/pkger Version: v0.17.0 diff --git a/cmd/fleet/bulkCheckin.go b/cmd/fleet/bulkCheckin.go deleted file mode 100644 index adb4916cd..000000000 --- a/cmd/fleet/bulkCheckin.go +++ /dev/null @@ -1,124 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package fleet - -import ( - "context" - "encoding/json" - "sync" - "time" - - "github.com/elastic/fleet-server/v7/internal/pkg/bulk" - "github.com/elastic/fleet-server/v7/internal/pkg/dl" - "github.com/elastic/fleet-server/v7/internal/pkg/sqn" - - "github.com/rs/zerolog/log" -) - -type Fields map[string]interface{} - -const kBulkCheckinFlushInterval = 10 * time.Second - -type PendingData struct { - fields Fields - seqNo sqn.SeqNo -} - -type BulkCheckin struct { - bulker bulk.Bulk - mut sync.Mutex - pending map[string]PendingData -} - -func NewBulkCheckin(bulker bulk.Bulk) *BulkCheckin { - return &BulkCheckin{ - bulker: bulker, - pending: make(map[string]PendingData), - } -} - -func (bc *BulkCheckin) CheckIn(id string, fields Fields, seqno sqn.SeqNo) error { - - if fields == nil { - fields = make(Fields) - } - - timeNow := time.Now().UTC().Format(time.RFC3339) - fields[FieldLastCheckin] = timeNow - - bc.mut.Lock() - bc.pending[id] = PendingData{fields, seqno} - bc.mut.Unlock() - return nil -} - -func (bc *BulkCheckin) Run(ctx context.Context) error { - - tick := time.NewTicker(kBulkCheckinFlushInterval) - - var err error -LOOP: - for { - select { - case <-tick.C: - if err = bc.flush(ctx); err != nil { - log.Error().Err(err).Msg("Eat bulk checkin error; Keep on truckin'") - err = nil - } - - case <-ctx.Done(): - err = ctx.Err() - break LOOP - } - } - - return err -} - -func (bc *BulkCheckin) flush(ctx context.Context) error { - start := time.Now() - - bc.mut.Lock() - pending := bc.pending - bc.pending = make(map[string]PendingData, len(pending)) - bc.mut.Unlock() - - if len(pending) == 0 { - return nil - } - - updates := make([]bulk.BulkOp, 0, len(pending)) - - for id, pendingData := range pending { - doc := pendingData.fields - doc[dl.FieldUpdatedAt] = time.Now().UTC().Format(time.RFC3339) - if pendingData.seqNo.IsSet() { - doc[dl.FieldActionSeqNo] = pendingData.seqNo - } - - source, err := json.Marshal(map[string]interface{}{ - "doc": doc, - }) - - if err != nil { - return err - } - - updates = append(updates, bulk.BulkOp{ - Id: id, - Body: source, - Index: dl.FleetAgents, - }) - } - - err := bc.bulker.MUpdate(ctx, updates, bulk.WithRefresh()) - log.Trace(). - Err(err). - Dur("rtt", time.Since(start)). - Int("cnt", len(updates)). - Msg("Flush updates") - - return err -} diff --git a/cmd/fleet/handleArtifacts.go b/cmd/fleet/handleArtifacts.go index 47a9f09a7..b7ea7acf8 100644 --- a/cmd/fleet/handleArtifacts.go +++ b/cmd/fleet/handleArtifacts.go @@ -94,7 +94,7 @@ func (rt Router) handleArtifacts(w http.ResponseWriter, r *http.Request, ps http if err != nil { code, str, msg, lvl := cntArtifacts.IncError(err) - zlog.WithLevel(lvl). + log.WithLevel(lvl). Err(err). Int("code", code). Int64("nWritten", nWritten). diff --git a/cmd/fleet/handleCheckin.go b/cmd/fleet/handleCheckin.go index 11d63075a..b53ab2110 100644 --- a/cmd/fleet/handleCheckin.go +++ b/cmd/fleet/handleCheckin.go @@ -18,6 +18,7 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/action" "github.com/elastic/fleet-server/v7/internal/pkg/bulk" "github.com/elastic/fleet-server/v7/internal/pkg/cache" + "github.com/elastic/fleet-server/v7/internal/pkg/checkin" "github.com/elastic/fleet-server/v7/internal/pkg/config" "github.com/elastic/fleet-server/v7/internal/pkg/dl" "github.com/elastic/fleet-server/v7/internal/pkg/limit" @@ -73,7 +74,7 @@ type CheckinT struct { verCon version.Constraints cfg *config.Server cache cache.Cache - bc *BulkCheckin + bc *checkin.Bulk pm policy.Monitor gcp monitor.GlobalCheckpointProvider ad *action.Dispatcher @@ -86,7 +87,7 @@ func NewCheckinT( verCon version.Constraints, cfg *config.Server, c cache.Cache, - bc *BulkCheckin, + bc *checkin.Bulk, pm policy.Monitor, gcp monitor.GlobalCheckpointProvider, ad *action.Dispatcher, @@ -153,7 +154,7 @@ func (ct *CheckinT) _handleCheckin(w http.ResponseWriter, r *http.Request, id st cntCheckin.bodyIn.Add(readCounter.Count()) // Compare local_metadata content and update if different - fields, err := parseMeta(agent, &req) + rawMeta, err := parseMeta(agent, &req) if err != nil { return err } @@ -185,7 +186,7 @@ func (ct *CheckinT) _handleCheckin(w http.ResponseWriter, r *http.Request, id st defer longPoll.Stop() // Intial update on checkin, and any user fields that might have changed - ct.bc.CheckIn(agent.Id, fields, seqno) + ct.bc.CheckIn(agent.Id, rawMeta, seqno) // Initial fetch for pending actions var ( @@ -222,7 +223,7 @@ func (ct *CheckinT) _handleCheckin(w http.ResponseWriter, r *http.Request, id st log.Trace().Msg("fire long poll") break LOOP case <-tick.C: - ct.bc.CheckIn(agent.Id, nil, seqno) + ct.bc.CheckIn(agent.Id, nil, nil) } } } @@ -513,31 +514,50 @@ func findAgentByApiKeyId(ctx context.Context, bulker bulk.Bulk, id string) (*mod // parseMeta compares the agent and the request local_metadata content // and returns fields to update the agent record or nil -func parseMeta(agent *model.Agent, req *CheckinRequest) (fields Fields, err error) { - // Quick comparison first +func parseMeta(agent *model.Agent, req *CheckinRequest) ([]byte, error) { + + // Quick comparison first; compare the JSON payloads. + // If the data is not consistently normalized, this short-circuit will not work. if bytes.Equal(req.LocalMeta, agent.LocalMetadata) { log.Trace().Msg("quick comparing local metadata is equal") return nil, nil } - // Compare local_metadata content and update if different - var reqLocalMeta Fields - var agentLocalMeta Fields - err = json.Unmarshal(req.LocalMeta, &reqLocalMeta) - if err != nil { + // Deserialize the request metadata + var reqLocalMeta interface{} + if err := json.Unmarshal(req.LocalMeta, &reqLocalMeta); err != nil { return nil, err } - err = json.Unmarshal(agent.LocalMetadata, &agentLocalMeta) - if err != nil { + + // If empty, don't step on existing data + if reqLocalMeta == nil { + return nil, nil + } + + // Deserialize the agent's metadata copy + var agentLocalMeta interface{} + if err := json.Unmarshal(agent.LocalMetadata, &agentLocalMeta); err != nil { return nil, err } - if reqLocalMeta != nil && !reflect.DeepEqual(reqLocalMeta, agentLocalMeta) { - log.Trace().RawJSON("oldLocalMeta", agent.LocalMetadata).RawJSON("newLocalMeta", req.LocalMeta).Msg("local metadata not equal") - log.Info().RawJSON("req.LocalMeta", req.LocalMeta).Msg("applying new local metadata") - fields = map[string]interface{}{ - FieldLocalMetadata: req.LocalMeta, - } + var outMeta []byte + + // Compare the deserialized meta structures and return the bytes to update if different + if !reflect.DeepEqual(reqLocalMeta, agentLocalMeta) { + + log.Trace(). + Str("agentId", agent.Id). + RawJSON("oldLocalMeta", agent.LocalMetadata). + RawJSON("newLocalMeta", req.LocalMeta). + Msg("local metadata not equal") + + log.Info(). + Str("agentId", agent.Id). + RawJSON("req.LocalMeta", req.LocalMeta). + Msg("applying new local metadata") + + outMeta = req.LocalMeta } - return fields, nil + + return outMeta, nil } diff --git a/cmd/fleet/main.go b/cmd/fleet/main.go index 5b059a0b3..95b705d5a 100644 --- a/cmd/fleet/main.go +++ b/cmd/fleet/main.go @@ -19,6 +19,7 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/action" "github.com/elastic/fleet-server/v7/internal/pkg/bulk" "github.com/elastic/fleet-server/v7/internal/pkg/cache" + "github.com/elastic/fleet-server/v7/internal/pkg/checkin" "github.com/elastic/fleet-server/v7/internal/pkg/config" "github.com/elastic/fleet-server/v7/internal/pkg/coordinator" "github.com/elastic/fleet-server/v7/internal/pkg/dl" @@ -588,7 +589,7 @@ func (f *FleetServer) runServer(ctx context.Context, cfg *config.Config) (err er return err } - bc := NewBulkCheckin(bulker) + bc := checkin.NewBulk(bulker) g.Go(loggedRunFunc(ctx, "Bulk checkin", bc.Run)) ct := NewCheckinT(f.verCon, &f.cfg.Inputs[0].Server, f.cache, bc, pm, am, ad, tr, bulker) diff --git a/cmd/fleet/schema.go b/cmd/fleet/schema.go index 7ad4d29ff..e4378345d 100644 --- a/cmd/fleet/schema.go +++ b/cmd/fleet/schema.go @@ -18,11 +18,6 @@ const ( TypeUpgrade = "UPGRADE" ) -const ( - FieldLastCheckin = "last_checkin" - FieldLocalMetadata = "local_metadata" -) - const kFleetAccessRolesJSON = ` { "fleet-apikey-access": { diff --git a/cmd/fleet/server_test.go b/cmd/fleet/server_test.go index 49bf4ba02..846b078ce 100644 --- a/cmd/fleet/server_test.go +++ b/cmd/fleet/server_test.go @@ -16,6 +16,7 @@ import ( "github.com/stretchr/testify/require" "github.com/elastic/fleet-server/v7/internal/pkg/cache" + "github.com/elastic/fleet-server/v7/internal/pkg/checkin" "github.com/elastic/fleet-server/v7/internal/pkg/config" "github.com/elastic/fleet-server/v7/internal/pkg/monitor/mock" "github.com/elastic/fleet-server/v7/internal/pkg/policy" @@ -39,7 +40,7 @@ func TestRunServer(t *testing.T) { bulker := ftesting.MockBulk{} pim := mock.NewMockIndexMonitor() pm := policy.NewMonitor(bulker, pim, 5*time.Millisecond) - bc := NewBulkCheckin(nil) + bc := checkin.NewBulk(nil) ct := NewCheckinT(verCon, cfg, c, bc, pm, nil, nil, nil, nil) et, err := NewEnrollerT(verCon, cfg, nil, c) require.NoError(t, err) diff --git a/go.mod b/go.mod index 9245ee834..fbde00dfc 100644 --- a/go.mod +++ b/go.mod @@ -3,6 +3,7 @@ module github.com/elastic/fleet-server/v7 go 1.15 require ( + github.com/Pallinder/go-randomdata v1.2.0 github.com/aleksmaus/generate v0.0.0-20210326194607-c630e07a2742 github.com/dgraph-io/ristretto v0.0.3 github.com/elastic/beats/v7 v7.11.1 @@ -15,6 +16,7 @@ require ( github.com/hashicorp/go-version v1.3.0 github.com/hashicorp/golang-lru v0.5.2-0.20190520140433-59383c442f7d github.com/julienschmidt/httprouter v1.3.0 + github.com/mailru/easyjson v0.7.7 github.com/miolini/datacounter v1.0.2 github.com/pkg/errors v0.9.1 github.com/rs/xid v1.2.1 diff --git a/go.sum b/go.sum index 8ebda6804..3dd47c8d8 100644 --- a/go.sum +++ b/go.sum @@ -88,6 +88,8 @@ github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46 h1:lsxEuwrXEAo github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/Pallinder/go-randomdata v1.2.0 h1:DZ41wBchNRb/0GfsePLiSwb0PHZmT67XY00lCDlaYPg= +github.com/Pallinder/go-randomdata v1.2.0/go.mod h1:yHmJgulpD2Nfrm0cR9tI/+oAgRqCQQixsA8HyRZfV9Y= github.com/PuerkitoBio/purell v1.0.0 h1:0GoNN3taZV6QI81IXgCbxMyEaJDXMSIjArYBCYzVVvs= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/urlesc v0.0.0-20160726150825-5bd2802263f2 h1:JCHLVE3B+kJde7bIEo5N4J+ZbLhp0J1Fs+ulyRws4gE= @@ -493,6 +495,8 @@ github.com/jonboulle/clockwork v0.2.2 h1:UOGuzwb1PwsrDAObMuhUnj0p5ULPj8V/xJ7Kx9q github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josephspurrier/goversioninfo v0.0.0-20190209210621-63e6d1acd3dd h1:KikNiFwUO3QLyeKyN4k9yBH9Pcu/gU/yficWi61cJIw= github.com/josephspurrier/goversioninfo v0.0.0-20190209210621-63e6d1acd3dd/go.mod h1:eJTEwMjXb7kZ633hO3Ln9mBUCOjX2+FlTljvpl9SYdE= +github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= +github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= github.com/jpillora/backoff v1.0.0 h1:uvFg412JmmHBHw7iwprIxkPMI+sGQ4kzOWsMeHnm2EA= github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= @@ -543,8 +547,9 @@ github.com/magefile/mage v1.10.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXq github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/mailru/easyjson v0.0.0-20160728113105-d5b7844b561a/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.1 h1:mdxE1MF9o53iCb2Ghj1VfWvh7ZOwHpnVG/xwXrV90U8= github.com/mailru/easyjson v0.7.1/go.mod h1:KAzv3t3aY1NaHWoQz1+4F1ccyAH66Jk7yos7ldAVICs= +github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= +github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/markbates/pkger v0.17.0 h1:RFfyBPufP2V6cddUyyEVSHBpaAnM1WzaMNyqomeT+iY= github.com/markbates/pkger v0.17.0/go.mod h1:0JoVlrol20BSywW79rN3kdFFsE5xYM+rSCQDXbLhiuI= github.com/martini-contrib/render v0.0.0-20150707142108-ec18f8345a11 h1:YFh+sjyJTMQSYjKwM4dFKhJPJC/wfo98tPUc17HdoYw= diff --git a/internal/pkg/bulk/block.go b/internal/pkg/bulk/block.go new file mode 100644 index 000000000..671703690 --- /dev/null +++ b/internal/pkg/bulk/block.go @@ -0,0 +1,81 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package bulk + +import ( + "github.com/elastic/fleet-server/v7/internal/pkg/danger" +) + +type Buf = danger.Buf + +// bulkT is generally allocated in the bulk engines's 'blkPool' +// However, the multiOp API's will allocate directly in large blocks. + +type bulkT struct { + action actionT // requested actions + flags flagsT // execution flags + idx int32 // idx of originating request, used in mulitOp + ch chan respT // response channel, caller is waiting synchronously + buf Buf // json payload to be sent to elastic + next *bulkT // pointer to next bulkT, used for fast internal queueing +} + +type flagsT int8 + +const ( + flagRefresh flagsT = 1 << iota +) + +func (ft flagsT) Has(f flagsT) bool { + return ft&f != 0 +} + +func (ft *flagsT) Set(f flagsT) { + *ft = *ft | f +} + +type actionT int8 + +const ( + ActionCreate actionT = iota + ActionDelete + ActionIndex + ActionUpdate + ActionRead + ActionSearch +) + +var actionStrings = []string{ + "create", + "delete", + "index", + "update", + "read", + "search", +} + +func (a actionT) String() string { + return actionStrings[a] +} + +func (blk *bulkT) reset() { + blk.action = 0 + blk.flags = 0 + blk.idx = 0 + blk.buf.Reset() + blk.next = nil +} + +func newBlk() interface{} { + return &bulkT{ + ch: make(chan respT, 1), + } +} + +type respT struct { + err error + idx int32 + data interface{} +} diff --git a/internal/pkg/bulk/bulk.go b/internal/pkg/bulk/bulk.go deleted file mode 100644 index de563e18c..000000000 --- a/internal/pkg/bulk/bulk.go +++ /dev/null @@ -1,822 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package bulk - -import ( - "bytes" - "context" - "encoding/json" - "fmt" - "strconv" - "time" - - "github.com/elastic/fleet-server/v7/internal/pkg/config" - "github.com/elastic/fleet-server/v7/internal/pkg/es" - - "github.com/elastic/go-elasticsearch/v8" - "github.com/elastic/go-elasticsearch/v8/esapi" - "github.com/rs/zerolog/log" - "golang.org/x/sync/semaphore" -) - -type BulkOp struct { - Id string - Index string - Body []byte -} - -type Bulk interface { - Create(ctx context.Context, index, id string, body []byte, opts ...Opt) (string, error) - Index(ctx context.Context, index, id string, body []byte, opts ...Opt) (string, error) - Update(ctx context.Context, index, id string, body []byte, opts ...Opt) error - Read(ctx context.Context, index, id string, opts ...Opt) ([]byte, error) - // Delete (ctx context.Context, index, id string, opts ...Opt) error - - MUpdate(ctx context.Context, ops []BulkOp, opts ...Opt) error - - Search(ctx context.Context, index []string, body []byte, opts ...Opt) (*es.ResultT, error) - - Client() *elasticsearch.Client -} - -type Action string - -func (a Action) Str() string { return string(a) } - -const ( - ActionCreate Action = "create" - ActionDelete = "delete" - ActionIndex = "index" - ActionUpdate = "update" - ActionRead = "read" - ActionSearch = "search" -) - -const kModBulk = "bulk" - -type respT struct { - idx int - err error - data interface{} -} - -type bulkT struct { - idx int - action Action - ch chan respT - data []byte - opts optionsT -} - -type Bulker struct { - es *elasticsearch.Client - ch chan bulkT -} - -const ( - rPrefix = "{\"docs\": [" - rSuffix = "]}" - - defaultFlushInterval = time.Second * 5 - defaultFlushThresholdCnt = 32768 - defaultFlushThresholdSz = 1024 * 1024 * 10 - defaultMaxPending = 32 - defaultQueuePrealloc = 64 -) - -func InitES(ctx context.Context, cfg *config.Config, opts ...BulkOpt) (*elasticsearch.Client, Bulk, error) { - - es, err := es.NewClient(ctx, cfg, false) - if err != nil { - return nil, nil, err - } - - opts = append(opts, - WithFlushInterval(cfg.Output.Elasticsearch.BulkFlushInterval), - WithFlushThresholdCount(cfg.Output.Elasticsearch.BulkFlushThresholdCount), - WithFlushThresholdSize(cfg.Output.Elasticsearch.BulkFlushThresholdSize), - WithMaxPending(cfg.Output.Elasticsearch.BulkFlushMaxPending), - ) - - blk := NewBulker(es) - go func() { - err := blk.Run(ctx, opts...) - log.Info().Err(err).Msg("Bulker exit") - }() - - return es, blk, nil -} - -func NewBulker(es *elasticsearch.Client) *Bulker { - return &Bulker{ - es: es, - ch: make(chan bulkT), - } -} - -func (b *Bulker) Client() *elasticsearch.Client { - return b.es -} - -func (b *Bulker) parseBulkOpts(opts ...BulkOpt) bulkOptT { - bopt := bulkOptT{ - flushInterval: defaultFlushInterval, - flushThresholdCnt: defaultFlushThresholdCnt, - flushThresholdSz: defaultFlushThresholdSz, - maxPending: defaultMaxPending, - queuePrealloc: defaultQueuePrealloc, - } - - for _, f := range opts { - f(&bopt) - } - - return bopt -} - -// Stop timer, but don't stall on channel. -// API doesn't not seem to work as specified. -func stopTimer(t *time.Timer) { - if !t.Stop() { - select { - case <-t.C: - default: - } - } -} - -type queueT struct { - action Action - queue []bulkT - pending int -} - -const ( - kQueueBulk = iota - kQueueRead - kQueueSearch - kQueueRefresh - kNumQueues -) - -func (b *Bulker) Run(ctx context.Context, opts ...BulkOpt) error { - var err error - - bopts := b.parseBulkOpts(opts...) - - // Create timer in stopped state - timer := time.NewTimer(bopts.flushInterval) - stopTimer(timer) - defer timer.Stop() - - w := semaphore.NewWeighted(int64(bopts.maxPending)) - - queues := make([]*queueT, 0, kNumQueues) - for i := 0; i < kNumQueues; i++ { - var action Action - switch i { - case kQueueRead: - action = ActionRead - case kQueueSearch: - action = ActionSearch - case kQueueBulk, kQueueRefresh: - // Empty action is correct - default: - // Bad programmer - panic("Unknown bulk queue") - } - - queues = append(queues, &queueT{ - action: action, - queue: make([]bulkT, 0, bopts.queuePrealloc), - }) - } - - var itemCnt int - var byteCnt int - - doFlush := func() error { - - for _, q := range queues { - if q.pending > 0 { - if err := b.flushQueue(ctx, w, q.queue, q.pending, q.action); err != nil { - return err - } - - q.pending = 0 - q.queue = make([]bulkT, 0, bopts.queuePrealloc) - } - } - - // Reset threshold counters - itemCnt = 0 - byteCnt = 0 - - stopTimer(timer) - return nil - } - -LOOP: - for err == nil { - - select { - - case item := <-b.ch: - - queueIdx := kQueueBulk - - switch item.action { - case ActionRead: - queueIdx = kQueueRead - case ActionSearch: - queueIdx = kQueueSearch - default: - if item.opts.Refresh { - queueIdx = kQueueRefresh - } - } - - q := queues[queueIdx] - q.queue = append(q.queue, item) - q.pending += len(item.data) - - // Update threshold counters - itemCnt += 1 - byteCnt += len(item.data) - - // Start timer on first queued item - if itemCnt == 1 { - timer.Reset(bopts.flushInterval) - } - - // Threshold test, short circuit timer on pending count - if itemCnt >= bopts.flushThresholdCnt || byteCnt >= bopts.flushThresholdSz { - log.Trace(). - Str("mod", kModBulk). - Int("itemCnt", itemCnt). - Int("byteCnt", byteCnt). - Msg("Flush on threshold") - - err = doFlush() - } - - case <-timer.C: - log.Trace(). - Str("mod", kModBulk). - Int("itemCnt", itemCnt). - Int("byteCnt", byteCnt). - Msg("Flush on timer") - err = doFlush() - - case <-ctx.Done(): - err = ctx.Err() - break LOOP - - } - - } - - return err -} - -func (b *Bulker) flushQueue(ctx context.Context, w *semaphore.Weighted, queue []bulkT, szPending int, action Action) error { - start := time.Now() - log.Trace(). - Str("mod", kModBulk). - Int("szPending", szPending). - Int("sz", len(queue)). - Str("action", action.Str()). - Msg("flushQueue Wait") - - if err := w.Acquire(ctx, 1); err != nil { - return err - } - - log.Trace(). - Str("mod", kModBulk). - Dur("tdiff", time.Since(start)). - Int("szPending", szPending). - Int("sz", len(queue)). - Str("action", action.Str()). - Msg("flushQueue Acquired") - - go func() { - start := time.Now() - - defer w.Release(1) - - var err error - switch action { - case ActionRead: - err = b.flushRead(ctx, queue, szPending) - case ActionSearch: - err = b.flushSearch(ctx, queue, szPending) - default: - err = b.flushBulk(ctx, queue, szPending) - } - - if err != nil { - failQueue(queue, err) - } - - log.Trace(). - Err(err). - Str("mod", kModBulk). - Int("szPending", szPending). - Int("sz", len(queue)). - Str("action", action.Str()). - Dur("rtt", time.Since(start)). - Msg("flushQueue Done") - - }() - - return nil -} - -func (b *Bulker) flushRead(ctx context.Context, queue []bulkT, szPending int) error { - start := time.Now() - - buf := bytes.NewBufferString(rPrefix) - buf.Grow(szPending + len(rSuffix)) - - // Each item a JSON array element followed by comma - for _, item := range queue { - buf.Write(item.data) - } - - // Need to strip the last element and append the suffix - payload := buf.Bytes() - payload = append(payload[:len(payload)-1], []byte(rSuffix)...) - - // Do actual bulk request; and send response on chan - req := esapi.MgetRequest{ - Body: bytes.NewReader(payload), - } - res, err := req.Do(ctx, b.es) - - if err != nil { - return err - } - - if res.Body != nil { - defer res.Body.Close() - } - - if res.IsError() { - return fmt.Errorf("flush: %s", res.String()) // TODO: Wrap error - } - - var blk MgetResponse - decoder := json.NewDecoder(res.Body) - if err := decoder.Decode(&blk); err != nil { - return fmt.Errorf("flush: error parsing response body: %s", err) // TODO: Wrap error - } - - log.Trace(). - Err(err). - Str("mod", kModBulk). - Dur("rtt", time.Since(start)). - Int("sz", len(blk.Items)). - Msg("flushRead") - - if len(blk.Items) != len(queue) { - return fmt.Errorf("Mget queue length mismatch") - } - - for i, item := range blk.Items { - citem := item - queue[i].ch <- respT{ - idx: queue[i].idx, - err: item.deriveError(), - data: &citem, - } - - } - - return nil -} - -func (b *Bulker) flushSearch(ctx context.Context, queue []bulkT, szPending int) error { - start := time.Now() - - buf := bytes.Buffer{} - buf.Grow(szPending) - - for _, item := range queue { - buf.Write(item.data) - } - - // Do actual bulk request; and send response on chan - req := esapi.MsearchRequest{ - Body: bytes.NewReader(buf.Bytes()), - } - res, err := req.Do(ctx, b.es) - - if err != nil { - return err - } - - if res.Body != nil { - defer res.Body.Close() - } - - if res.IsError() { - return fmt.Errorf("flush: %s", res.String()) // TODO: Wrap error - } - - var blk MsearchResponse - decoder := json.NewDecoder(res.Body) - if err := decoder.Decode(&blk); err != nil { - return fmt.Errorf("flush: error parsing response body: %s", err) // TODO: Wrap error - } - - log.Trace(). - Err(err). - Str("mod", kModBulk). - Dur("rtt", time.Since(start)). - Int("took", blk.Took). - Int("sz", len(blk.Responses)). - Msg("flushSearch") - - if len(blk.Responses) != len(queue) { - return fmt.Errorf("Bulk queue length mismatch") - } - - for i, response := range blk.Responses { - - cResponse := response - queue[i].ch <- respT{ - idx: queue[i].idx, - err: response.deriveError(), - data: &cResponse, - } - } - - return nil -} - -func (b *Bulker) flushBulk(ctx context.Context, queue []bulkT, szPending int) error { - - buf := bytes.Buffer{} - buf.Grow(szPending) - - doRefresh := "false" - for _, item := range queue { - buf.Write(item.data) - if item.opts.Refresh { - doRefresh = "true" - } - } - - // Do actual bulk request; and send response on chan - req := esapi.BulkRequest{ - Body: bytes.NewReader(buf.Bytes()), - Refresh: doRefresh, - } - res, err := req.Do(ctx, b.es) - - if err != nil { - log.Error().Err(err).Str("mod", kModBulk).Msg("Fail req.Do") - return err - } - - if res.Body != nil { - defer res.Body.Close() - } - - if res.IsError() { - log.Error().Str("mod", kModBulk).Str("err", res.String()).Msg("Fail result") - return fmt.Errorf("flush: %s", res.String()) // TODO: Wrap error - } - - var blk BulkIndexerResponse - decoder := json.NewDecoder(res.Body) - if err := decoder.Decode(&blk); err != nil { - log.Error().Err(err).Str("mod", kModBulk).Msg("Decode error") - return fmt.Errorf("flush: error parsing response body: %s", err) // TODO: Wrap error - } - - log.Trace(). - Err(err). - Bool("refresh", doRefresh == "true"). - Str("mod", kModBulk). - Int("took", blk.Took). - Bool("hasErrors", blk.HasErrors). - Int("sz", len(blk.Items)). - Msg("flushBulk") - - if len(blk.Items) != len(queue) { - return fmt.Errorf("Bulk queue length mismatch") - } - - for i, blkItem := range blk.Items { - - for _, item := range blkItem { - - select { - case queue[i].ch <- respT{ - idx: queue[i].idx, - err: item.deriveError(), - data: &item, - }: - default: - panic("Should not happen") - } - - break - } - } - - return nil -} - -func failQueue(queue []bulkT, err error) { - for _, i := range queue { - i.ch <- respT{ - idx: i.idx, - err: err, - } - } -} - -func (b *Bulker) parseOpts(opts ...Opt) optionsT { - var opt optionsT - for _, o := range opts { - o(&opt) - } - return opt -} - -func (b *Bulker) Create(ctx context.Context, index, id string, body []byte, opts ...Opt) (string, error) { - item, err := b.waitBulkAction(ctx, ActionCreate, index, id, body, opts...) - if err != nil { - return "", err - } - - return item.DocumentID, nil -} - -func (b *Bulker) Index(ctx context.Context, index, id string, body []byte, opts ...Opt) (string, error) { - item, err := b.waitBulkAction(ctx, ActionIndex, index, id, body, opts...) - if err != nil { - return "", err - } - return item.DocumentID, nil -} - -func (b *Bulker) Update(ctx context.Context, index, id string, body []byte, opts ...Opt) error { - _, err := b.waitBulkAction(ctx, ActionUpdate, index, id, body, opts...) - return err -} - -func (b *Bulker) waitBulkAction(ctx context.Context, action Action, index, id string, body []byte, opts ...Opt) (*BulkIndexerResponseItem, error) { - opt := b.parseOpts(opts...) - - // Serialize request - var buf bytes.Buffer - - const kSlop = 64 - buf.Grow(len(body) + kSlop) - - if err := b.writeBulkMeta(&buf, action, index, id, opt); err != nil { - return nil, err - } - - if err := b.writeBulkBody(&buf, body); err != nil { - return nil, err - } - - // Dispatch and wait for response - resp := b.dispatch(ctx, action, opt, buf.Bytes()) - if resp.err != nil { - return nil, resp.err - } - - r := resp.data.(*BulkIndexerResponseItem) - return r, nil -} - -func (b *Bulker) Read(ctx context.Context, index, id string, opts ...Opt) ([]byte, error) { - opt := b.parseOpts(opts...) - - // Serialize request - var buf bytes.Buffer - - const kSlop = 64 - buf.Grow(kSlop) - - if err := b.writeMget(&buf, index, id); err != nil { - return nil, err - } - - // Process response - resp := b.dispatch(ctx, ActionRead, opt, buf.Bytes()) - if resp.err != nil { - return nil, resp.err - } - - // Interpret response, looking for generated id - r := resp.data.(*MgetResponseItem) - return r.Source, nil -} - -func (b *Bulker) Search(ctx context.Context, index []string, body []byte, opts ...Opt) (*es.ResultT, error) { - opt := b.parseOpts(opts...) - - // Serialize request - var buf bytes.Buffer - - const kSlop = 64 - buf.Grow(len(body) + kSlop) - - if err := b.writeMsearchMeta(&buf, index); err != nil { - return nil, err - } - - if err := b.writeMsearchBody(&buf, body); err != nil { - return nil, err - } - - // Process response - resp := b.dispatch(ctx, ActionSearch, opt, buf.Bytes()) - if resp.err != nil { - return nil, resp.err - } - - // Interpret response - r := resp.data.(*MsearchResponseItem) - return &es.ResultT{HitsT: r.Hits, Aggregations: r.Aggregations}, nil -} - -func (b *Bulker) writeMsearchMeta(buf *bytes.Buffer, indices []string) error { - if err := b.validateIndices(indices); err != nil { - return err - } - - switch len(indices) { - case 0: - buf.WriteString("{ }\n") - case 1: - buf.WriteString(`{"index": "`) - buf.WriteString(indices[0]) - buf.WriteString("\"}\n") - default: - buf.WriteString(`{"index": `) - if d, err := json.Marshal(indices); err != nil { - return err - } else { - buf.Write(d) - } - buf.WriteString("}\n") - } - - return nil -} - -func (b *Bulker) writeMsearchBody(buf *bytes.Buffer, body []byte) error { - buf.Write(body) - buf.WriteRune('\n') - - return b.validateBody(body) -} - -func (b *Bulker) validateIndex(index string) error { - // TODO: index - return nil -} - -func (b *Bulker) validateIndices(indices []string) error { - for _, i := range indices { - if err := b.validateIndex(i); err != nil { - return err - } - } - return nil -} - -func (b *Bulker) validateMeta(index, id string) error { - // TODO: validate id and index; not quotes anyhow - return nil -} - -// TODO: Fail on non-escaped line feeds -func (b *Bulker) validateBody(body []byte) error { - if !json.Valid(body) { - return es.ErrInvalidBody - } - - return nil -} - -func (b *Bulker) writeMget(buf *bytes.Buffer, index, id string) error { - if err := b.validateMeta(index, id); err != nil { - return err - } - - buf.WriteString(`{"_index":"`) - buf.WriteString(index) - buf.WriteString(`","_id":"`) - buf.WriteString(id) - buf.WriteString(`"},`) - return nil -} - -func (b *Bulker) writeBulkMeta(buf *bytes.Buffer, action Action, index, id string, opts optionsT) error { - if err := b.validateMeta(index, id); err != nil { - return err - } - - buf.WriteString(`{"`) - buf.WriteString(action.Str()) - buf.WriteString(`":{`) - if id != "" { - buf.WriteString(`"_id":"`) - buf.WriteString(id) - buf.WriteString(`",`) - } - if opts.RetryOnConflict > 0 { - buf.WriteString(`"retry_on_conflict":`) - buf.WriteString(strconv.Itoa(opts.RetryOnConflict)) - buf.WriteString(`,`) - } - buf.WriteString(`"_index":"`) - buf.WriteString(index) - buf.WriteString("\"}}\n") - return nil -} - -func (b *Bulker) writeBulkBody(buf *bytes.Buffer, body []byte) error { - if body == nil { - return nil - } - - buf.Write(body) - buf.WriteRune('\n') - - return b.validateBody(body) -} - -func (b *Bulker) dispatch(ctx context.Context, action Action, opts optionsT, data []byte) respT { - start := time.Now() - - ch := make(chan respT, 1) - - item := bulkT{ - 0, - action, - ch, - data, - opts, - } - - // Dispatch to bulk Run loop - select { - case b.ch <- item: - case <-ctx.Done(): - log.Error(). - Err(ctx.Err()). - Str("mod", kModBulk). - Str("action", action.Str()). - Bool("refresh", opts.Refresh). - Dur("rtt", time.Since(start)). - Msg("Dispatch abort queue") - return respT{err: ctx.Err()} - } - - // Wait for response - select { - case resp := <-ch: - log.Trace(). - Str("mod", kModBulk). - Str("action", action.Str()). - Bool("refresh", opts.Refresh). - Dur("rtt", time.Since(start)). - Msg("Dispatch OK") - - return resp - case <-ctx.Done(): - log.Error(). - Err(ctx.Err()). - Str("mod", kModBulk). - Str("action", action.Str()). - Bool("refresh", opts.Refresh). - Dur("rtt", time.Since(start)). - Msg("Dispatch abort response") - } - - return respT{err: ctx.Err()} -} - -type UpdateFields map[string]interface{} - -func (u UpdateFields) Marshal() ([]byte, error) { - doc := struct { - Doc map[string]interface{} `json:"doc"` - }{ - u, - } - - return json.Marshal(doc) -} diff --git a/internal/pkg/bulk/bulk_integration_test.go b/internal/pkg/bulk/bulk_integration_test.go new file mode 100644 index 000000000..5c289da7d --- /dev/null +++ b/internal/pkg/bulk/bulk_integration_test.go @@ -0,0 +1,438 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// +build integration + +package bulk + +import ( + "context" + "encoding/json" + "fmt" + "strconv" + "sync" + "testing" + + "github.com/elastic/fleet-server/v7/internal/pkg/es" + + "github.com/google/go-cmp/cmp" + "github.com/rs/zerolog/log" +) + +func TestBulkCreate(t *testing.T) { + ctx, cn := context.WithCancel(context.Background()) + defer cn() + + index, bulker := SetupIndexWithBulk(ctx, t, testPolicy, WithFlushThresholdCount(1)) + + tests := []struct { + Name string + Index string + Id string + Err error + }{ + { + Name: "Empty Id", + Index: index, + }, + { + Name: "Simple Id", + Index: index, + Id: "elastic", + }, + { + Name: "Single quoted Id", + Index: index, + Id: `'singlequotes'`, + }, + { + Name: "Double quoted Id", + Index: index, + Id: `"doublequotes"`, + Err: ErrNoQuotes, + }, + { + Name: "Empty Index", + Index: "", + Err: es.ErrElastic{ + Status: 500, + Type: "string_index_out_of_bounds_exception", + }, + }, + { + Name: "Unicode Index 豆腐", + Index: string([]byte{0xe8, 0xb1, 0x86, 0xe8, 0x85, 0x90}), + }, + { + Name: "Invalid utf-8", + Index: string([]byte{0xfe, 0xfe, 0xff, 0xff}), + Err: es.ErrElastic{ + Status: 400, + Type: "json_parse_exception", + }, + }, + { + Name: "Malformed Index Uppercase", + Index: "UPPERCASE", + Err: es.ErrElastic{ + Status: 400, + Type: "invalid_index_name_exception", + }, + }, + { + Name: "Malformed Index underscore", + Index: "_nope", + Err: es.ErrElastic{ + Status: 400, + Type: "invalid_index_name_exception", + }, + }, + } + + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + + sample := NewRandomSample() + sampleData := sample.marshal(t) + + // Create + id, err := bulker.Create(ctx, test.Index, test.Id, sampleData) + if !EqualElastic(test.Err, err) { + t.Fatal(err) + } + if err != nil { + return + } + + if test.Id != "" && id != test.Id { + t.Error("Expected specified id") + } else if id == "" { + t.Error("Expected non-empty id") + } + + // Read + var dst testT + dst.read(t, bulker, ctx, test.Index, id) + diff := cmp.Diff(sample, dst) + if diff != "" { + t.Fatal(diff) + } + }) + } +} + +func TestBulkCreateBody(t *testing.T) { + ctx, cn := context.WithCancel(context.Background()) + defer cn() + + index, bulker := SetupIndexWithBulk(ctx, t, testPolicy, WithFlushThresholdCount(1)) + + tests := []struct { + Name string + Body []byte + Err error + }{ + { + "Empty Body", + nil, + nil, + }, + { + "Malformed Body", + []byte("{nope}"), + es.ErrInvalidBody, + }, + { + "Overflow", + []byte(`{"overflow": 99999999999999999999}`), + es.ErrElastic{ + Status: 400, + Type: "mapper_parsing_exception", + }, + }, + { + "Invalid utf-8", + []byte{0x7b, 0x22, 0x6f, 0x6b, 0x22, 0x3a, 0x22, 0xfe, 0xfe, 0xff, 0xff, 0x22, 0x7d}, // {"ok":"${BADUTF8}"} + es.ErrElastic{ + Status: 400, + Type: "mapper_parsing_exception", + }, + }, + } + + for _, test := range tests { + t.Run(test.Name, func(t *testing.T) { + + _, err := bulker.Create(ctx, index, "", test.Body) + if !EqualElastic(test.Err, err) { + t.Fatal(err) + } + if err != nil { + return + } + }) + } +} + +func TestBulkIndex(t *testing.T) { + ctx, cn := context.WithCancel(context.Background()) + defer cn() + + index, bulker := SetupIndexWithBulk(ctx, t, testPolicy, WithFlushThresholdCount(1)) + + sample := NewRandomSample() + + // Index + id, err := bulker.Index(ctx, index, "", sample.marshal(t)) + if err != nil { + t.Fatal(err) + } + + // Read + var dst testT + dst.read(t, bulker, ctx, index, id) + diff := cmp.Diff(sample, dst) + if diff != "" { + t.Fatal(diff) + } +} + +func TestBulkUpdate(t *testing.T) { + ctx, cn := context.WithCancel(context.Background()) + defer cn() + + index, bulker := SetupIndexWithBulk(ctx, t, testPolicy) + + sample := NewRandomSample() + + // Create + id, err := bulker.Create(ctx, index, "", sample.marshal(t)) + if err != nil { + t.Fatal(err) + } + + // Update + nVal := "funkycoldmedina" + fields := UpdateFields{"kwval": nVal} + data, err := fields.Marshal() + if err != nil { + t.Fatal(err) + } + + err = bulker.Update(ctx, index, id, data, WithRefresh()) + if err != nil { + t.Fatal(err) + } + + // Read again, validate update + var dst2 testT + dst2.read(t, bulker, ctx, index, id) + + sample.KWVal = nVal + diff := cmp.Diff(sample, dst2) + if diff != "" { + t.Fatal(diff) + } +} + +func TestBulkSearch(t *testing.T) { + ctx, cn := context.WithCancel(context.Background()) + defer cn() + + index, bulker := SetupIndexWithBulk(ctx, t, testPolicy) + + sample := NewRandomSample() + + // Create + _, err := bulker.Create(ctx, index, "", sample.marshal(t), WithRefresh()) + if err != nil { + t.Fatal(err) + } + + // Search + dsl := fmt.Sprintf(`{"query": { "term": {"kwval": "%s"}}}`, sample.KWVal) + + res, err := bulker.Search(ctx, index, []byte(dsl)) + + if err != nil { + t.Fatal(err) + } + + if res == nil { + t.Fatal(nil) + } + + if len(res.Hits) != 1 { + t.Fatal(fmt.Sprintf("hit mismatch: %d", len(res.Hits))) + } + + var dst3 testT + if err = json.Unmarshal(res.Hits[0].Source, &dst3); err != nil { + t.Fatal(err) + } + + diff := cmp.Diff(sample, dst3) + if diff != "" { + t.Fatal(diff) + } +} + +func TestBulkDelete(t *testing.T) { + ctx, cn := context.WithCancel(context.Background()) + defer cn() + + index, bulker := SetupIndexWithBulk(ctx, t, testPolicy) + + sample := NewRandomSample() + + // Create + id, err := bulker.Create(ctx, index, "", sample.marshal(t)) + if err != nil { + t.Fatal(err) + } + + // Delete + err = bulker.Delete(ctx, index, id) + if err != nil { + t.Fatal(err) + } + + data, err := bulker.Read(ctx, index, id) + if err != es.ErrElasticNotFound || data != nil { + t.Fatal(err) + } + + // Attempt to delete again, should not be found + err = bulker.Delete(ctx, index, id) + if e, ok := err.(*es.ErrElastic); !ok || e.Status != 404 { + t.Fatal(err) + } +} + +// This runs a series of CRUD operations through elastic. +// Not a particularly useful benchmark, but gives some idea of memory overhead. + +func benchmarkCreate(n int, b *testing.B) { + b.ReportAllocs() + defer (QuietLogger())() + + ctx, cn := context.WithCancel(context.Background()) + defer cn() + + index, bulker := SetupIndexWithBulk(ctx, b, testPolicy, WithFlushThresholdCount(n)) + + var wait sync.WaitGroup + wait.Add(n) + for i := 0; i < n; i++ { + + go func() { + defer wait.Done() + + sample := NewRandomSample() + sampleData := sample.marshal(b) + + for j := 0; j < b.N; j++ { + + // Create + _, err := bulker.Create(ctx, index, "", sampleData) + if err != nil { + b.Fatal(err) + } + } + }() + } + + wait.Wait() +} + +func BenchmarkCreate(b *testing.B) { + + benchmarks := []int{1, 64, 8192, 16384, 32768, 65536} + + for _, n := range benchmarks { + + bindFunc := func(n int) func(b *testing.B) { + return func(b *testing.B) { + benchmarkCreate(n, b) + } + } + b.Run(strconv.Itoa(n), bindFunc(n)) + } +} + +// This runs a series of CRUD operations through elastic. +// Not a particularly useful benchmark, but gives some idea of memory overhead. + +func benchmarkCRUD(n int, b *testing.B) { + b.ReportAllocs() + defer (QuietLogger())() + + ctx, cn := context.WithCancel(context.Background()) + defer cn() + + index, bulker := SetupIndexWithBulk(ctx, b, testPolicy, WithFlushThresholdCount(n)) + + fieldUpdate := UpdateFields{"kwval": "funkycoldmedina"} + fieldData, err := fieldUpdate.Marshal() + if err != nil { + b.Fatal(err) + } + + var wait sync.WaitGroup + wait.Add(n) + for i := 0; i < n; i++ { + + go func() { + defer wait.Done() + + sample := NewRandomSample() + sampleData := sample.marshal(b) + + for j := 0; j < b.N; j++ { + + // Create + id, err := bulker.Create(ctx, index, "", sampleData) + if err != nil { + b.Fatal(err) + } + + // Read + _, err = bulker.Read(ctx, index, id) + if err != nil { + b.Fatal(err) + } + + // Update + err = bulker.Update(ctx, index, id, fieldData) + if err != nil { + b.Fatal(err) + } + + // Delete + err = bulker.Delete(ctx, index, id) + if err != nil { + log.Info().Str("index", index).Str("id", id).Msg("dlete fail") + b.Fatal(err) + } + } + }() + } + + wait.Wait() +} + +func BenchmarkCRUD(b *testing.B) { + + benchmarks := []int{1, 64, 8192, 16384, 32768, 65536} + + for _, n := range benchmarks { + + bindFunc := func(n int) func(b *testing.B) { + return func(b *testing.B) { + benchmarkCRUD(n, b) + } + } + b.Run(strconv.Itoa(n), bindFunc(n)) + } +} diff --git a/internal/pkg/bulk/bulk_test.go b/internal/pkg/bulk/bulk_test.go new file mode 100644 index 000000000..f9fe9d183 --- /dev/null +++ b/internal/pkg/bulk/bulk_test.go @@ -0,0 +1,382 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package bulk + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "io/ioutil" + "net/http" + "strconv" + "sync" + "testing" + "time" + + "github.com/rs/zerolog/log" +) + +// TODO: +// WithREfresh() options +// Delete not found? + +type stubTransport struct { + cb func(*http.Request) (*http.Response, error) +} + +func (s *stubTransport) Perform(req *http.Request) (*http.Response, error) { + return s.cb(req) +} + +type mockBulkTransport struct { + b *testing.B +} + +func (m *mockBulkTransport) Perform(req *http.Request) (*http.Response, error) { + + type mockFrameT struct { + Index json.RawMessage `json:"index,omitempty"` + Delete json.RawMessage `json:"delete,omitempty"` + Create json.RawMessage `json:"create,omitempty"` + Update json.RawMessage `json:"update,omitempty"` + } + + type mockEmptyT struct { + } + + mockResponse := []byte(`{"index":{"_index":"test","_type":"_doc","_id":"1","_version":1,"result":"created","_shards":{"total":2,"successful":1,"failed":0},"status":201,"_seq_no":0,"_primary_term":1}},`) + + var body bytes.Buffer + + // Write framing + body.WriteString(`{"items": [`) + + cnt := 0 + + skip := false + decoder := json.NewDecoder(req.Body) + for decoder.More() { + if skip { + skip = false + var e mockEmptyT + if err := decoder.Decode(&e); err != nil { + return nil, err + } + } else { + var frame mockFrameT + if err := decoder.Decode(&frame); err != nil { + return nil, err + } + + // Which op + switch { + case frame.Index != nil: + skip = true + case frame.Delete != nil: + case frame.Create != nil: + skip = true + case frame.Update != nil: + skip = true + default: + return nil, errors.New("Unknown op") + } + + // write mocked response + _, err := body.Write(mockResponse) + + if err != nil { + return nil, err + } + + cnt += 1 + } + } + + if cnt > 0 { + body.Truncate(body.Len() - 1) + } + + // Write trailer + body.WriteString(`], "took": 1, "errors": false}`) + + resp := &http.Response{ + Request: req, + StatusCode: 200, + Status: "200 OK", + Proto: "HTTP/1.1", + ProtoMajor: 1, + ProtoMinor: 1, + Body: ioutil.NopCloser(&body), + } + + return resp, nil +} + +// API should exit quickly if cancelled. +// Note: In the real world, the transaction may already be in flight, +// cancelling a call does not mean the transaction did not occur. +func TestCancelCtx(t *testing.T) { + + // create a bulker, but don't bother running it + bulker := NewBulker(nil) + + tests := []struct { + name string + test func(t *testing.T, ctx context.Context) + }{ + { + "create", + func(t *testing.T, ctx context.Context) { + id, err := bulker.Create(ctx, "testidx", "", []byte(`{"hey":"now"}`)) + + if id != "" { + t.Error("Expected empty id on context cancel:", id) + } + + if err != context.Canceled { + t.Error("Expected context cancel err: ", err) + } + }, + }, + { + "read", + func(t *testing.T, ctx context.Context) { + data, err := bulker.Read(ctx, "testidx", "11") + + if data != nil { + t.Error("Expected empty data on context cancel:", data) + } + + if err != context.Canceled { + t.Error("Expected context cancel err: ", err) + } + }, + }, + { + "update", + func(t *testing.T, ctx context.Context) { + err := bulker.Update(ctx, "testidx", "11", []byte(`{"now":"hey"}`)) + + if err != context.Canceled { + t.Error("Expected context cancel err: ", err) + } + }, + }, + { + "delete", + func(t *testing.T, ctx context.Context) { + err := bulker.Delete(ctx, "testidx", "11") + + if err != context.Canceled { + t.Error("Expected context cancel err: ", err) + } + }, + }, + { + "index", + func(t *testing.T, ctx context.Context) { + id, err := bulker.Index(ctx, "testidx", "", []byte(`{"hey":"now"}`)) + + if id != "" { + t.Error("Expected empty id on context cancel:", id) + } + + if err != context.Canceled { + t.Error("Expected context cancel err: ", err) + } + }, + }, + { + "search", + func(t *testing.T, ctx context.Context) { + res, err := bulker.Search(ctx, "testidx", []byte(`{"hey":"now"}`)) + + if res != nil { + t.Error("Expected empty result on context cancel:", res) + } + + if err != context.Canceled { + t.Error("Expected context cancel err: ", err) + } + }, + }, + { + "mcreate", + func(t *testing.T, ctx context.Context) { + res, err := bulker.MCreate(ctx, []MultiOp{{Index: "testidx", Body: []byte(`{"hey":"now"}`)}}) + + if res != nil { + t.Error("Expected empty result on context cancel:", res) + } + + if err != context.Canceled { + t.Error("Expected context cancel err: ", err) + } + }, + }, + { + "mindex", + func(t *testing.T, ctx context.Context) { + res, err := bulker.MIndex(ctx, []MultiOp{{Index: "testidx", Body: []byte(`{"hey":"now"}`)}}) + + if res != nil { + t.Error("Expected empty result on context cancel:", res) + } + + if err != context.Canceled { + t.Error("Expected context cancel err: ", err) + } + }, + }, + { + "mupdate", + func(t *testing.T, ctx context.Context) { + res, err := bulker.MUpdate(ctx, []MultiOp{{Index: "testidx", Id: "umm", Body: []byte(`{"hey":"now"}`)}}) + + if res != nil { + t.Error("Expected empty result on context cancel:", res) + } + + if err != context.Canceled { + t.Error("Expected context cancel err: ", err) + } + }, + }, + { + "mdelete", + func(t *testing.T, ctx context.Context) { + res, err := bulker.MDelete(ctx, []MultiOp{{Index: "testidx", Id: "myid"}}) + + if res != nil { + t.Error("Expected empty result on context cancel:", res) + } + + if err != context.Canceled { + t.Error("Expected context cancel err: ", err) + } + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + + ctx, cancelF := context.WithCancel(context.Background()) + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + + test.test(t, ctx) + }() + + time.Sleep(time.Millisecond) + cancelF() + + wg.Wait() + }) + } +} + +func benchmarkMockBulk(b *testing.B, samples [][]byte) { + b.ReportAllocs() + defer (QuietLogger())() + + mock := &mockBulkTransport{} + + ctx, cancelF := context.WithCancel(context.Background()) + defer cancelF() + + bulker := NewBulker(mock) + + n := len(samples) + + var waitBulker sync.WaitGroup + waitBulker.Add(1) + go func() { + defer waitBulker.Done() + if err := bulker.Run(ctx, WithFlushThresholdCount(n)); err != context.Canceled { + b.Error(err) + } + }() + + fieldUpdate := UpdateFields{"kwval": "funkycoldmedina"} + fieldData, err := fieldUpdate.Marshal() + if err != nil { + b.Fatal(err) + } + + index := "fakeIndex" + + var wait sync.WaitGroup + wait.Add(n) + for i := 0; i < n; i++ { + + go func(sampleData []byte) { + defer wait.Done() + + for j := 0; j < b.N; j++ { + // Create + id, err := bulker.Create(ctx, index, "", sampleData) + if err != nil { + b.Error(err) + } + // Index + _, err = bulker.Index(ctx, index, id, sampleData) + if err != nil { + b.Error(err) + } + + // Update + err = bulker.Update(ctx, index, id, fieldData) + if err != nil { + b.Error(err) + } + + // Delete + err = bulker.Delete(ctx, index, id) + if err != nil { + log.Info().Str("index", index).Str("id", id).Msg("delete fail") + b.Error(err) + } + } + }(samples[i]) + } + + wait.Wait() + cancelF() + waitBulker.Wait() +} + +func BenchmarkMockBulk(b *testing.B) { + + benchmarks := []int{1, 8, 64, 4096, 32768} + + // Create the samples outside the loop to avoid accounting + max := 0 + for _, v := range benchmarks { + if max < v { + max = v + } + } + + samples := make([][]byte, 0, max) + for i := 0; i < max; i++ { + s := NewRandomSample() + samples = append(samples, s.marshal(b)) + } + + for _, n := range benchmarks { + + bindFunc := func(n int) func(b *testing.B) { + return func(b *testing.B) { + benchmarkMockBulk(b, samples[:n]) + } + } + b.Run(strconv.Itoa(n), bindFunc(n)) + } +} diff --git a/internal/pkg/bulk/engine.go b/internal/pkg/bulk/engine.go new file mode 100644 index 000000000..2c660bb36 --- /dev/null +++ b/internal/pkg/bulk/engine.go @@ -0,0 +1,429 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package bulk + +import ( + "context" + "encoding/json" + "errors" + "strings" + "sync" + "time" + + "github.com/elastic/fleet-server/v7/internal/pkg/config" + "github.com/elastic/fleet-server/v7/internal/pkg/es" + + "github.com/elastic/go-elasticsearch/v8" + "github.com/elastic/go-elasticsearch/v8/esapi" + "github.com/rs/zerolog/log" + "golang.org/x/sync/semaphore" +) + +var ( + ErrNoQuotes = errors.New("quoted literal not supported") +) + +type MultiOp struct { + Id string + Index string + Body []byte +} + +type Bulk interface { + + // Synchronous operations run in the bulk engine + Create(ctx context.Context, index, id string, body []byte, opts ...Opt) (string, error) + Read(ctx context.Context, index, id string, opts ...Opt) ([]byte, error) + Update(ctx context.Context, index, id string, body []byte, opts ...Opt) error + Delete(ctx context.Context, index, id string, opts ...Opt) error + Index(ctx context.Context, index, id string, body []byte, opts ...Opt) (string, error) + Search(ctx context.Context, index string, body []byte, opts ...Opt) (*es.ResultT, error) + + // Multi Operation API's run in the bulk engine + MCreate(ctx context.Context, ops []MultiOp, opts ...Opt) ([]BulkIndexerResponseItem, error) + MIndex(ctx context.Context, ops []MultiOp, opts ...Opt) ([]BulkIndexerResponseItem, error) + MUpdate(ctx context.Context, ops []MultiOp, opts ...Opt) ([]BulkIndexerResponseItem, error) + MDelete(ctx context.Context, ops []MultiOp, opts ...Opt) ([]BulkIndexerResponseItem, error) + + // Accessor used to talk to elastic search direcly bypassing bulk engine + Client() *elasticsearch.Client +} + +const kModBulk = "bulk" + +type Bulker struct { + es esapi.Transport + ch chan *bulkT + + blkPool sync.Pool +} + +const ( + defaultFlushInterval = time.Second * 5 + defaultFlushThresholdCnt = 32768 + defaultFlushThresholdSz = 1024 * 1024 * 10 + defaultMaxPending = 32 + defaultBlockQueueSz = 32 // Small capacity to allow multiOp to spin fast +) + +func InitES(ctx context.Context, cfg *config.Config, opts ...BulkOpt) (*elasticsearch.Client, Bulk, error) { + + es, err := es.NewClient(ctx, cfg, false) + if err != nil { + return nil, nil, err + } + + // Options specified on API should override config + nopts := []BulkOpt{ + WithFlushInterval(cfg.Output.Elasticsearch.BulkFlushInterval), + WithFlushThresholdCount(cfg.Output.Elasticsearch.BulkFlushThresholdCount), + WithFlushThresholdSize(cfg.Output.Elasticsearch.BulkFlushThresholdSize), + WithMaxPending(cfg.Output.Elasticsearch.BulkFlushMaxPending), + } + nopts = append(nopts, opts...) + + blk := NewBulker(es) + go func() { + err := blk.Run(ctx, nopts...) + log.Info().Err(err).Msg("Bulker exit") + }() + + return es, blk, nil +} + +func NewBulker(es esapi.Transport) *Bulker { + + poolFunc := func() interface{} { + return &bulkT{ch: make(chan respT, 1)} + } + + return &Bulker{ + es: es, + ch: make(chan *bulkT, defaultBlockQueueSz), + blkPool: sync.Pool{New: poolFunc}, + } +} + +func (b *Bulker) Client() *elasticsearch.Client { + client, ok := b.es.(*elasticsearch.Client) + if !ok { + panic("Client is not an elastic search pointer") + } + return client +} + +func (b *Bulker) parseBulkOpts(opts ...BulkOpt) bulkOptT { + bopt := bulkOptT{ + flushInterval: defaultFlushInterval, + flushThresholdCnt: defaultFlushThresholdCnt, + flushThresholdSz: defaultFlushThresholdSz, + maxPending: defaultMaxPending, + } + + for _, f := range opts { + f(&bopt) + } + + return bopt +} + +// Stop timer, but don't stall on channel. +// API doesn't not seem to work as specified. +func stopTimer(t *time.Timer) { + if !t.Stop() { + select { + case <-t.C: + default: + } + } +} + +func blkToQueueType(blk *bulkT) queueType { + queueIdx := kQueueBulk + + forceRefresh := blk.flags.Has(flagRefresh) + + switch blk.action { + case ActionSearch: + queueIdx = kQueueSearch + case ActionRead: + if forceRefresh { + queueIdx = kQueueRefreshRead + } else { + queueIdx = kQueueRead + } + default: + if forceRefresh { + queueIdx = kQueueRefreshBulk + } + } + + return queueIdx +} + +func (b *Bulker) Run(ctx context.Context, opts ...BulkOpt) error { + var err error + + bopts := b.parseBulkOpts(opts...) + + log.Info().Interface("opts", &bopts).Msg("Run bulker with options") + + // Create timer in stopped state + timer := time.NewTimer(bopts.flushInterval) + stopTimer(timer) + defer timer.Stop() + + w := semaphore.NewWeighted(int64(bopts.maxPending)) + + var queues [kNumQueues]queueT + + var i queueType + for ; i < kNumQueues; i++ { + queues[i].ty = i + } + + var itemCnt int + var byteCnt int + + doFlush := func() error { + + for i := range queues { + q := &queues[i] + if q.pending > 0 { + + // Pass queue structure by value + if err := b.flushQueue(ctx, w, *q); err != nil { + return err + } + + // Reset local queue stored in array + q.cnt = 0 + q.head = nil + q.pending = 0 + } + } + + // Reset threshold counters + itemCnt = 0 + byteCnt = 0 + + return nil + } + + for err == nil { + + select { + + case blk := <-b.ch: + + queueIdx := blkToQueueType(blk) + q := &queues[queueIdx] + + // Prepend block to head of target queue + blk.next = q.head + q.head = blk + + // Update pending count on target queue + q.cnt += 1 + q.pending += blk.buf.Len() + + // Update threshold counters + itemCnt += 1 + byteCnt += blk.buf.Len() + + // Start timer on first queued item + if itemCnt == 1 { + timer.Reset(bopts.flushInterval) + } + + // Threshold test, short circuit timer on pending count + if itemCnt >= bopts.flushThresholdCnt || byteCnt >= bopts.flushThresholdSz { + log.Trace(). + Str("mod", kModBulk). + Int("itemCnt", itemCnt). + Int("byteCnt", byteCnt). + Msg("Flush on threshold") + + err = doFlush() + + stopTimer(timer) + } + + case <-timer.C: + log.Trace(). + Str("mod", kModBulk). + Int("itemCnt", itemCnt). + Int("byteCnt", byteCnt). + Msg("Flush on timer") + err = doFlush() + + case <-ctx.Done(): + err = ctx.Err() + } + + } + + return err +} + +func (b *Bulker) flushQueue(ctx context.Context, w *semaphore.Weighted, queue queueT) error { + start := time.Now() + log.Trace(). + Str("mod", kModBulk). + Int("cnt", queue.cnt). + Int("szPending", queue.pending). + Str("queue", queue.Type()). + Msg("flushQueue Wait") + + if err := w.Acquire(ctx, 1); err != nil { + return err + } + + log.Trace(). + Str("mod", kModBulk). + Int("cnt", queue.cnt). + Dur("tdiff", time.Since(start)). + Int("szPending", queue.pending). + Str("queue", queue.Type()). + Msg("flushQueue Acquired") + + go func() { + start := time.Now() + + defer w.Release(1) + + var err error + switch queue.ty { + case kQueueRead, kQueueRefreshRead: + err = b.flushRead(ctx, queue) + case kQueueSearch: + err = b.flushSearch(ctx, queue) + default: + err = b.flushBulk(ctx, queue) + } + + if err != nil { + failQueue(queue, err) + } + + log.Trace(). + Err(err). + Str("mod", kModBulk). + Int("cnt", queue.cnt). + Int("szPending", queue.pending). + Str("queue", queue.Type()). + Dur("rtt", time.Since(start)). + Msg("flushQueue Done") + + }() + + return nil +} + +func failQueue(queue queueT, err error) { + for n := queue.head; n != nil; { + next := n.next // 'n' is invalid immediately on channel send + n.ch <- respT{ + err: err, + } + n = next + } +} + +func (b *Bulker) parseOpts(opts ...Opt) optionsT { + var opt optionsT + for _, o := range opts { + o(&opt) + } + return opt +} + +func (b *Bulker) newBlk(action actionT, opts optionsT) *bulkT { + blk := b.blkPool.Get().(*bulkT) + blk.action = action + if opts.Refresh { + blk.flags.Set(flagRefresh) + } + return blk +} + +func (b *Bulker) freeBlk(blk *bulkT) { + blk.reset() + b.blkPool.Put(blk) +} + +func (b *Bulker) validateIndex(index string) error { + // TODO: index + return nil +} + +func (b *Bulker) validateIndices(indices []string) error { + for _, i := range indices { + if err := b.validateIndex(i); err != nil { + return err + } + } + return nil +} + +func (b *Bulker) validateMeta(index, id string) error { + + // Quotes on id are legal, but weird. Disallow for now. + if strings.IndexByte(index, '"') != -1 || strings.IndexByte(id, '"') != -1 { + return ErrNoQuotes + } + return nil +} + +// TODO: Fail on non-escaped line feeds +func (b *Bulker) validateBody(body []byte) error { + if !json.Valid(body) { + return es.ErrInvalidBody + } + + return nil +} + +func (b *Bulker) dispatch(ctx context.Context, blk *bulkT) respT { + start := time.Now() + + // Dispatch to bulk Run loop + select { + case b.ch <- blk: + case <-ctx.Done(): + log.Error(). + Err(ctx.Err()). + Str("mod", kModBulk). + Str("action", blk.action.String()). + Bool("refresh", blk.flags.Has(flagRefresh)). + Dur("rtt", time.Since(start)). + Msg("Dispatch abort queue") + return respT{err: ctx.Err()} + } + + // Wait for response + select { + case resp := <-blk.ch: + log.Trace(). + Err(resp.err). + Str("mod", kModBulk). + Str("action", blk.action.String()). + Bool("refresh", blk.flags.Has(flagRefresh)). + Dur("rtt", time.Since(start)). + Msg("Dispatch OK") + + return resp + case <-ctx.Done(): + log.Error(). + Err(ctx.Err()). + Str("mod", kModBulk). + Str("action", blk.action.String()). + Bool("refresh", blk.flags.Has(flagRefresh)). + Dur("rtt", time.Since(start)). + Msg("Dispatch abort response") + } + + return respT{err: ctx.Err()} +} diff --git a/internal/pkg/bulk/helpers.go b/internal/pkg/bulk/helpers.go new file mode 100644 index 000000000..893275c50 --- /dev/null +++ b/internal/pkg/bulk/helpers.go @@ -0,0 +1,43 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package bulk + +import ( + "encoding/json" + + "github.com/elastic/fleet-server/v7/internal/pkg/es" + "github.com/elastic/go-elasticsearch/v8/esapi" + "github.com/rs/zerolog/log" +) + +type UpdateFields map[string]interface{} + +func (u UpdateFields) Marshal() ([]byte, error) { + doc := struct { + Doc map[string]interface{} `json:"doc"` + }{ + u, + } + + return json.Marshal(doc) +} + +// Attempt to interpret the response as an elastic error, +// otherwise return generic elastic error. +func parseError(res *esapi.Response) error { + + var e struct { + Err *es.ErrorT `json:"error"` + } + + decoder := json.NewDecoder(res.Body) + + if err := decoder.Decode(&e); err != nil { + log.Error().Err(err).Msg("Cannot decode error body") + return err + } + + return es.TranslateError(res.StatusCode, e.Err) +} diff --git a/internal/pkg/bulk/multi.go b/internal/pkg/bulk/multi.go deleted file mode 100644 index 704f4ea4d..000000000 --- a/internal/pkg/bulk/multi.go +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package bulk - -import ( - "bytes" - "context" - - "github.com/rs/zerolog/log" -) - -func (b *Bulker) MUpdate(ctx context.Context, ops []BulkOp, opts ...Opt) error { - _, err := b.multiWaitBulkAction(ctx, ActionUpdate, ops) - return err -} - -func (b *Bulker) multiWaitBulkAction(ctx context.Context, action Action, ops []BulkOp, opts ...Opt) ([]BulkIndexerResponseItem, error) { - opt := b.parseOpts(opts...) - - // Serialize requests - nops := make([]BulkOp, 0, len(ops)) - for _, op := range ops { - - // Prealloc buffer - const kSlop = 64 - var buf bytes.Buffer - buf.Grow(len(op.Body) + kSlop) - - if err := b.writeBulkMeta(&buf, action, op.Index, op.Id, opt); err != nil { - return nil, err - } - - if err := b.writeBulkBody(&buf, op.Body); err != nil { - return nil, err - } - - nops = append(nops, BulkOp{ - Id: op.Id, - Index: op.Index, - Body: buf.Bytes(), - }) - } - - // Dispatch and wait for response - resps, err := b.multiDispatch(ctx, action, opt, nops) - if err != nil { - return nil, err - } - - items := make([]BulkIndexerResponseItem, len(resps)) - for i, r := range resps { - if r.err != nil { - // TODO: well this is not great; handle this better - log.Error().Err(r.err).Msg("Fail muliDispatch") - return nil, r.err - } - items[i] = *r.data.(*BulkIndexerResponseItem) - } - - return items, nil -} - -func (b *Bulker) multiDispatch(ctx context.Context, action Action, opts optionsT, ops []BulkOp) ([]respT, error) { - var err error - - ch := make(chan respT, len(ops)) - - for i, op := range ops { - item := bulkT{ - i, - action, - ch, - op.Body, - opts, - } - - // Dispatch to bulk Run loop - select { - case b.ch <- item: - case <-ctx.Done(): - return nil, ctx.Err() - } - } - - // Wait for response - responses := make([]respT, 0, len(ops)) - -LOOP: - for len(responses) < len(ops) { - select { - case resp := <-ch: - responses = append(responses, resp) - case <-ctx.Done(): - err = ctx.Err() - responses = nil - break LOOP - } - } - - return responses, err -} diff --git a/internal/pkg/bulk/opBulk.go b/internal/pkg/bulk/opBulk.go new file mode 100644 index 000000000..cbd9dfaf9 --- /dev/null +++ b/internal/pkg/bulk/opBulk.go @@ -0,0 +1,265 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package bulk + +import ( + "bytes" + "context" + "fmt" + "time" + + "github.com/elastic/go-elasticsearch/v8/esapi" + "github.com/mailru/easyjson" + "github.com/rs/zerolog/log" +) + +func (b *Bulker) Create(ctx context.Context, index, id string, body []byte, opts ...Opt) (string, error) { + item, err := b.waitBulkAction(ctx, ActionCreate, index, id, body, opts...) + if err != nil { + return "", err + } + + return item.DocumentID, nil +} + +func (b *Bulker) Index(ctx context.Context, index, id string, body []byte, opts ...Opt) (string, error) { + item, err := b.waitBulkAction(ctx, ActionIndex, index, id, body, opts...) + if err != nil { + return "", err + } + return item.DocumentID, nil +} + +func (b *Bulker) Update(ctx context.Context, index, id string, body []byte, opts ...Opt) error { + _, err := b.waitBulkAction(ctx, ActionUpdate, index, id, body, opts...) + return err +} + +func (b *Bulker) Delete(ctx context.Context, index, id string, opts ...Opt) error { + _, err := b.waitBulkAction(ctx, ActionDelete, index, id, nil, opts...) + return err +} + +func (b *Bulker) waitBulkAction(ctx context.Context, action actionT, index, id string, body []byte, opts ...Opt) (*BulkIndexerResponseItem, error) { + var opt optionsT + if len(opts) > 0 { + opt = b.parseOpts(opts...) + } + + blk := b.newBlk(action, opt) + + // Serialize request + const kSlop = 64 + blk.buf.Grow(len(body) + kSlop) + + if err := b.writeBulkMeta(&blk.buf, action.String(), index, id, opt.RetryOnConflict); err != nil { + return nil, err + } + + if err := b.writeBulkBody(&blk.buf, action, body); err != nil { + return nil, err + } + + // Dispatch and wait for response + resp := b.dispatch(ctx, blk) + if resp.err != nil { + return nil, resp.err + } + b.freeBlk(blk) + + r := resp.data.(*BulkIndexerResponseItem) + return r, nil +} + +func (b *Bulker) writeMget(buf *Buf, index, id string) error { + if err := b.validateMeta(index, id); err != nil { + return err + } + + buf.WriteString(`{"_index":"`) + buf.WriteString(index) + buf.WriteString(`","_id":"`) + buf.WriteString(id) + buf.WriteString(`"},`) + return nil +} + +func (b *Bulker) writeBulkMeta(buf *Buf, action, index, id, retry string) error { + if err := b.validateMeta(index, id); err != nil { + return err + } + + buf.WriteString(`{"`) + buf.WriteString(action) + buf.WriteString(`":{`) + if id != "" { + buf.WriteString(`"_id":"`) + buf.WriteString(id) + buf.WriteString(`",`) + } + if retry != "" { + buf.WriteString(`"retry_on_conflict":`) + buf.WriteString(retry) + buf.WriteString(`,`) + } + + buf.WriteString(`"_index":"`) + buf.WriteString(index) + buf.WriteString("\"}}\n") + + return nil +} + +func (b *Bulker) writeBulkBody(buf *Buf, action actionT, body []byte) error { + if len(body) == 0 { + if action == ActionDelete { + return nil + } + + // Weird to index, create, or update empty, but will allow + buf.WriteString("{}\n") + return nil + } + + if err := b.validateBody(body); err != nil { + return err + } + + buf.Write(body) + buf.WriteRune('\n') + return nil +} + +func (b *Bulker) calcBulkSz(action, idx, id, retry string, body []byte) int { + const kFraming = 19 + metaSz := kFraming + len(action) + len(idx) + + if retry != "" { + metaSz += 21 + len(retry) + } + + var idSz int + if id != "" { + const kIdFraming = 9 + idSz = kIdFraming + len(id) + } + + var bodySz int + if len(body) != 0 { + const kBodyFraming = 1 + bodySz = kBodyFraming + len(body) + } + + return metaSz + idSz + bodySz +} + +func (b *Bulker) flushBulk(ctx context.Context, queue queueT) error { + start := time.Now() + + const kRoughEstimatePerItem = 200 + + bufSz := queue.cnt * kRoughEstimatePerItem + if bufSz < queue.pending { + bufSz = queue.pending + } + + var buf bytes.Buffer + buf.Grow(bufSz) + + queueCnt := 0 + for n := queue.head; n != nil; n = n.next { + buf.Write(n.buf.Bytes()) + queueCnt += 1 + } + + // Do actual bulk request; defer to the client + req := esapi.BulkRequest{ + Body: bytes.NewReader(buf.Bytes()), + } + + if queue.ty == kQueueRefreshBulk { + req.Refresh = "true" + } + + res, err := req.Do(ctx, b.es) + + if err != nil { + log.Error().Err(err).Str("mod", kModBulk).Msg("Fail BulkRequest req.Do") + return err + } + + if res.Body != nil { + defer res.Body.Close() + } + + if res.IsError() { + log.Error().Str("mod", kModBulk).Str("err", res.String()).Msg("Fail BulkRequest result") + return parseError(res) + } + + // Reuse buffer + buf.Reset() + + bodySz, err := buf.ReadFrom(res.Body) + if err != nil { + log.Error(). + Err(err). + Str("mod", kModBulk). + Msg("Response error") + return err + } + + var blk bulkIndexerResponse + blk.Items = make([]bulkStubItem, 0, queueCnt) + + if err = easyjson.Unmarshal(buf.Bytes(), &blk); err != nil { + log.Error(). + Err(err). + Str("mod", kModBulk). + Msg("Unmarshal error") + return err + } + + log.Trace(). + Err(err). + Bool("refresh", queue.ty == kQueueRefreshBulk). + Str("mod", kModBulk). + Int("took", blk.Took). + Dur("rtt", time.Since(start)). + Bool("hasErrors", blk.HasErrors). + Int("cnt", len(blk.Items)). + Int("bufSz", bufSz). + Int64("bodySz", bodySz). + Msg("flushBulk") + + if len(blk.Items) != queueCnt { + return fmt.Errorf("Bulk queue length mismatch") + } + + // WARNING: Once we start pushing items to + // the queue, the node pointers are invalid. + // Do NOT return a non-nil value or failQueue + // up the stack will fail. + + n := queue.head + for i, _ := range blk.Items { + next := n.next // 'n' is invalid immediately on channel send + + item := blk.Items[i].Choose() + select { + case n.ch <- respT{ + err: item.deriveError(), + idx: n.idx, + data: item, + }: + default: + panic("Unexpected blocked response channel on flushBulk") + } + + n = next + } + + return nil +} diff --git a/internal/pkg/bulk/opMulti.go b/internal/pkg/bulk/opMulti.go new file mode 100644 index 000000000..3017c85f8 --- /dev/null +++ b/internal/pkg/bulk/opMulti.go @@ -0,0 +1,127 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package bulk + +import ( + "context" + "errors" +) + +func (b *Bulker) MCreate(ctx context.Context, ops []MultiOp, opts ...Opt) ([]BulkIndexerResponseItem, error) { + return b.multiWaitBulkOp(ctx, ActionCreate, ops) +} + +func (b *Bulker) MIndex(ctx context.Context, ops []MultiOp, opts ...Opt) ([]BulkIndexerResponseItem, error) { + return b.multiWaitBulkOp(ctx, ActionIndex, ops) +} + +func (b *Bulker) MUpdate(ctx context.Context, ops []MultiOp, opts ...Opt) ([]BulkIndexerResponseItem, error) { + return b.multiWaitBulkOp(ctx, ActionUpdate, ops) +} + +func (b *Bulker) MDelete(ctx context.Context, ops []MultiOp, opts ...Opt) ([]BulkIndexerResponseItem, error) { + return b.multiWaitBulkOp(ctx, ActionDelete, ops) +} + +func (b *Bulker) multiWaitBulkOp(ctx context.Context, action actionT, ops []MultiOp, opts ...Opt) ([]BulkIndexerResponseItem, error) { + if len(ops) == 0 { + return nil, nil + } + + const kMaxBulk = (1 << 32) - 1 + + if len(ops) > kMaxBulk { + return nil, errors.New("too many bulk ops") + } + + opt := b.parseOpts(opts...) + + // Contract is that consumer never blocks, so must preallocate. + // Could consider making the response channel *respT to limit memory usage. + ch := make(chan respT, len(ops)) + + actionStr := action.String() + + // O(n) Determine how much space we need + var byteCnt int + for _, op := range ops { + byteCnt += b.calcBulkSz(actionStr, op.Index, op.Id, opt.RetryOnConflict, op.Body) + } + + // Create one bulk buffer to serialize each piece. + // This decreases pressure on the heap. If we calculculate wrong, + // the Buf objectect has the property that previously cached slices + // are still valid. However, underestimating the buffer size + // can lead to mulitple copies, which undermines the optimization. + var bulkBuf Buf + bulkBuf.Grow(byteCnt) + + // Serialize requests + bulks := make([]bulkT, len(ops)) + for i := range ops { + + bufIdx := bulkBuf.Len() + + op := &ops[i] + + if err := b.writeBulkMeta(&bulkBuf, actionStr, op.Index, op.Id, opt.RetryOnConflict); err != nil { + return nil, err + } + + if err := b.writeBulkBody(&bulkBuf, action, op.Body); err != nil { + return nil, err + } + + bodySlice := bulkBuf.Bytes()[bufIdx:] + + bulk := &bulks[i] + bulk.ch = ch + bulk.idx = int32(i) + bulk.action = action + bulk.buf.Set(bodySlice) + if opt.Refresh { + bulk.flags.Set(flagRefresh) + } + } + + // Dispatch requests + if err := b.multiDispatch(ctx, bulks); err != nil { + return nil, err + } + + // Wait for response and populate return slice + var lastErr error + items := make([]BulkIndexerResponseItem, len(ops)) + + for i := 0; i < len(ops); i++ { + select { + case r := <-ch: + if r.err != nil { + lastErr = r.err + } + if r.data != nil { + items[r.idx] = *r.data.(*BulkIndexerResponseItem) + } + case <-ctx.Done(): + return nil, ctx.Err() + } + } + + return items, lastErr +} + +func (b *Bulker) multiDispatch(ctx context.Context, blks []bulkT) error { + + // Dispatch to bulk Run loop; Iterate by reference. + for i := range blks { + select { + case b.ch <- &blks[i]: + case <-ctx.Done(): + return ctx.Err() + } + } + + return nil +} diff --git a/internal/pkg/bulk/opMulti_integration_test.go b/internal/pkg/bulk/opMulti_integration_test.go new file mode 100644 index 000000000..16df21bcd --- /dev/null +++ b/internal/pkg/bulk/opMulti_integration_test.go @@ -0,0 +1,85 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// +build integration + +package bulk + +import ( + "context" + "strconv" + "testing" + "time" + + "github.com/rs/zerolog" +) + +// This runs a series of CRUD operations through elastic. +// Not a particularly useful benchmark, but gives some idea of memory overhead. + +func benchmarkMultiUpdate(n int, b *testing.B) { + b.ReportAllocs() + defer (QuietLogger())() + + l := zerolog.GlobalLevel() + defer zerolog.SetGlobalLevel(l) + + zerolog.SetGlobalLevel(zerolog.ErrorLevel) + + ctx, cn := context.WithCancel(context.Background()) + defer cn() + + index, bulker := SetupIndexWithBulk(ctx, b, testPolicy, WithFlushThresholdCount(n), WithFlushInterval(time.Millisecond*10)) + + // Create N samples + var ops []MultiOp + for i := 0; i < n; i++ { + sample := NewRandomSample() + ops = append(ops, MultiOp{ + Index: index, + Body: sample.marshal(b), + }) + } + + items, err := bulker.MCreate(ctx, ops) + if err != nil { + b.Fatal(err) + } + + for j := 0; j < b.N; j++ { + fields := UpdateFields{ + "dateval": time.Now().Format(time.RFC3339), + } + + body, err := fields.Marshal() + if err != nil { + b.Fatal(err) + } + + for i := range ops { + ops[i].Id = items[i].DocumentID + ops[i].Body = body + } + + _, err = bulker.MUpdate(ctx, ops) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkMultiUpdate(b *testing.B) { + + benchmarks := []int{1, 64, 8192, 37268, 131072} + + for _, n := range benchmarks { + + bindFunc := func(n int) func(b *testing.B) { + return func(b *testing.B) { + benchmarkMultiUpdate(n, b) + } + } + b.Run(strconv.Itoa(n), bindFunc(n)) + } +} diff --git a/internal/pkg/bulk/opMulti_test.go b/internal/pkg/bulk/opMulti_test.go new file mode 100644 index 000000000..26fca3252 --- /dev/null +++ b/internal/pkg/bulk/opMulti_test.go @@ -0,0 +1,61 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package bulk + +import ( + "context" + "strconv" + "testing" +) + +const payload = `{"_id" : "1", "_index" : "test"}` + +// Test throughput of creating multiOps +func BenchmarkMultiUpdateMock(b *testing.B) { + defer (QuietLogger())() + + // Allocate, but don't run. Stub the client. + bulker := NewBulker(nil) + defer close(bulker.ch) + + go func() { + for v := range bulker.ch { + v.ch <- respT{nil, v.idx, nil} + } + }() + + body := []byte(payload) + + benchmarks := []int{1, 8, 64, 4096, 32768, 131072} + + // Create the samples outside the loop to avoid accounting + max := 0 + for _, v := range benchmarks { + if max < v { + max = v + } + } + + // Create the ops + ops := make([]MultiOp, 0, max) + for i := 0; i < max; i++ { + ops = append(ops, MultiOp{ + Id: "abba", + Index: "bogus", + Body: body, + }) + } + + for _, n := range benchmarks { + b.Run(strconv.Itoa(n), func(b *testing.B) { + b.ReportAllocs() + ctx := context.Background() + for i := 0; i < b.N; i++ { + bulker.MUpdate(ctx, ops[:n]) + } + }) + } + +} diff --git a/internal/pkg/bulk/opRead.go b/internal/pkg/bulk/opRead.go new file mode 100644 index 000000000..3a8d04fc7 --- /dev/null +++ b/internal/pkg/bulk/opRead.go @@ -0,0 +1,155 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package bulk + +import ( + "bytes" + "context" + "fmt" + "time" + + "github.com/elastic/go-elasticsearch/v8/esapi" + "github.com/mailru/easyjson" + "github.com/rs/zerolog/log" +) + +const ( + rPrefix = "{\"docs\": [" + rSuffix = "]}" +) + +func (b *Bulker) Read(ctx context.Context, index, id string, opts ...Opt) ([]byte, error) { + var opt optionsT + if len(opts) > 0 { + opt = b.parseOpts(opts...) + } + + blk := b.newBlk(ActionRead, opt) + + // Serialize request + const kSlop = 64 + blk.buf.Grow(kSlop) + + if err := b.writeMget(&blk.buf, index, id); err != nil { + return nil, err + } + + // Process response + resp := b.dispatch(ctx, blk) + if resp.err != nil { + return nil, resp.err + } + b.freeBlk(blk) + + // Interpret response, looking for generated id + r := resp.data.(*MgetResponseItem) + return r.Source, nil +} + +func (b *Bulker) flushRead(ctx context.Context, queue queueT) error { + start := time.Now() + + const kRoughEstimatePerItem = 256 + + bufSz := queue.cnt * kRoughEstimatePerItem + if bufSz < queue.pending+len(rSuffix) { + bufSz = queue.pending + len(rSuffix) + } + + buf := bytes.NewBufferString(rPrefix) + buf.Grow(bufSz) + + // Each item a JSON array element followed by comma + queueCnt := 0 + for n := queue.head; n != nil; n = n.next { + buf.Write(n.buf.Bytes()) + queueCnt += 1 + } + + // Need to strip the last element and append the suffix + payload := buf.Bytes() + payload = append(payload[:len(payload)-1], []byte(rSuffix)...) + + // Do actual bulk request; and send response on chan + req := esapi.MgetRequest{ + Body: bytes.NewReader(payload), + } + + var refresh bool + if queue.ty == kQueueRefreshRead { + refresh = true + req.Refresh = &refresh + } + + res, err := req.Do(ctx, b.es) + + if err != nil { + log.Error().Err(err).Str("mod", kModBulk).Msg("Fail MgetRequest req.Do") + return err + } + + if res.Body != nil { + defer res.Body.Close() + } + + if res.IsError() { + log.Error().Str("mod", kModBulk).Str("err", res.String()).Msg("Fail MgetRequest result") + return parseError(res) + } + + // Reuse buffer + buf.Reset() + + bodySz, err := buf.ReadFrom(res.Body) + if err != nil { + log.Error().Err(err).Str("mod", kModBulk).Msg("Response error") + } + + // prealloc slice + var blk MgetResponse + blk.Items = make([]MgetResponseItem, 0, queueCnt) + + if err = easyjson.Unmarshal(buf.Bytes(), &blk); err != nil { + log.Error().Err(err).Str("mod", kModBulk).Msg("Unmarshal error") + return err + } + + log.Trace(). + Err(err). + Bool("refresh", refresh). + Str("mod", kModBulk). + Dur("rtt", time.Since(start)). + Int("cnt", len(blk.Items)). + Int("bufSz", bufSz). + Int64("bodySz", bodySz). + Msg("flushRead") + + if len(blk.Items) != queueCnt { + return fmt.Errorf("Mget queue length mismatch") + } + + // WARNING: Once we start pushing items to + // the queue, the node pointers are invalid. + // Do NOT return a non-nil value or failQueue + // up the stack will fail. + + n := queue.head + for i := range blk.Items { + next := n.next // 'n' is invalid immediately on channel send + item := &blk.Items[i] + select { + case n.ch <- respT{ + err: item.deriveError(), + idx: n.idx, + data: item, + }: + default: + panic("Unexpected blocked response channel on flushRead") + } + n = next + } + + return nil +} diff --git a/internal/pkg/bulk/opSearch.go b/internal/pkg/bulk/opSearch.go new file mode 100644 index 000000000..c8d4f1b17 --- /dev/null +++ b/internal/pkg/bulk/opSearch.go @@ -0,0 +1,185 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package bulk + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "time" + + "github.com/elastic/fleet-server/v7/internal/pkg/es" + "github.com/elastic/go-elasticsearch/v8/esapi" + "github.com/mailru/easyjson" + "github.com/rs/zerolog/log" +) + +func (b *Bulker) Search(ctx context.Context, index string, body []byte, opts ...Opt) (*es.ResultT, error) { + var opt optionsT + if len(opts) > 0 { + opt = b.parseOpts(opts...) + } + + blk := b.newBlk(ActionSearch, opt) + + // Serialize request + const kSlop = 64 + blk.buf.Grow(len(body) + kSlop) + + if err := b.writeMsearchMeta(&blk.buf, index, opt.Indices); err != nil { + return nil, err + } + + if err := b.writeMsearchBody(&blk.buf, body); err != nil { + return nil, err + } + + // Process response + resp := b.dispatch(ctx, blk) + if resp.err != nil { + return nil, resp.err + } + b.freeBlk(blk) + + // Interpret response + r := resp.data.(*MsearchResponseItem) + return &es.ResultT{HitsT: r.Hits, Aggregations: r.Aggregations}, nil +} + +func (b *Bulker) writeMsearchMeta(buf *Buf, index string, moreIndices []string) error { + if err := b.validateIndex(index); err != nil { + return err + } + + if len(moreIndices) > 0 { + if err := b.validateIndices(moreIndices); err != nil { + return err + } + + indices := []string{index} + indices = append(indices, moreIndices...) + + buf.WriteString(`{"index": `) + if d, err := json.Marshal(indices); err != nil { + return err + } else { + buf.Write(d) + } + buf.WriteString("}\n") + } else if len(index) == 0 { + buf.WriteString("{ }\n") + } else { + buf.WriteString(`{"index": "`) + buf.WriteString(index) + buf.WriteString("\"}\n") + } + + return nil +} + +func (b *Bulker) writeMsearchBody(buf *Buf, body []byte) error { + buf.Write(body) + buf.WriteRune('\n') + + return b.validateBody(body) +} + +func (b *Bulker) flushSearch(ctx context.Context, queue queueT) error { + start := time.Now() + + const kRoughEstimatePerItem = 256 + + bufSz := queue.cnt * kRoughEstimatePerItem + if bufSz < queue.pending { + bufSz = queue.pending + } + + var buf bytes.Buffer + buf.Grow(bufSz) + + queueCnt := 0 + for n := queue.head; n != nil; n = n.next { + buf.Write(n.buf.Bytes()) + + queueCnt += 1 + } + + // Do actual bulk request; and send response on chan + req := esapi.MsearchRequest{ + Body: bytes.NewReader(buf.Bytes()), + } + res, err := req.Do(ctx, b.es) + + if err != nil { + return err + } + + if res.Body != nil { + defer res.Body.Close() + } + + if res.IsError() { + log.Error().Err(err).Str("mod", kModBulk).Msg("Fail writeMsearchBody") + return parseError(res) + } + + // Reuse buffer + buf.Reset() + + bodySz, err := buf.ReadFrom(res.Body) + if err != nil { + log.Error().Err(err).Str("mod", kModBulk).Msg("MsearchResponse error") + return err + } + + // prealloc slice + var blk MsearchResponse + blk.Responses = make([]MsearchResponseItem, 0, queueCnt) + + if err = easyjson.Unmarshal(buf.Bytes(), &blk); err != nil { + log.Error().Err(err).Str("mod", kModBulk).Msg("Unmarshal error") + return err + } + + log.Trace(). + Err(err). + Str("mod", kModBulk). + Dur("rtt", time.Since(start)). + Int("took", blk.Took). + Int("cnt", len(blk.Responses)). + Int("bufSz", bufSz). + Int64("bodySz", bodySz). + Msg("flushSearch") + + if len(blk.Responses) != queueCnt { + return fmt.Errorf("Bulk queue length mismatch") + } + + // WARNING: Once we start pushing items to + // the queue, the node pointers are invalid. + // Do NOT return a non-nil value or failQueue + // up the stack will fail. + + n := queue.head + for i := range blk.Responses { + next := n.next // 'n' is invalid immediately on channel send + + response := &blk.Responses[i] + + select { + case n.ch <- respT{ + err: response.deriveError(), + idx: n.idx, + data: response, + }: + default: + panic("Unexpected blocked response channel on flushSearch") + } + n = next + } + + return nil +} diff --git a/internal/pkg/bulk/opt.go b/internal/pkg/bulk/opt.go index c3aa367d7..f1925390a 100644 --- a/internal/pkg/bulk/opt.go +++ b/internal/pkg/bulk/opt.go @@ -5,6 +5,8 @@ package bulk import ( + "github.com/rs/zerolog" + "strconv" "time" ) @@ -13,7 +15,8 @@ import ( type optionsT struct { Refresh bool - RetryOnConflict int + RetryOnConflict string + Indices []string } type Opt func(*optionsT) @@ -26,7 +29,14 @@ func WithRefresh() Opt { func WithRetryOnConflict(n int) Opt { return func(opt *optionsT) { - opt.RetryOnConflict = n + opt.RetryOnConflict = strconv.Itoa(n) + } +} + +// Applicable to search +func WithIndex(idx string) Opt { + return func(opt *optionsT) { + opt.Indices = append(opt.Indices, idx) } } @@ -38,7 +48,6 @@ type bulkOptT struct { flushThresholdCnt int flushThresholdSz int maxPending int - queuePrealloc int } type BulkOpt func(*bulkOptT) @@ -57,7 +66,7 @@ func WithFlushThresholdCount(cnt int) BulkOpt { } } -// Cummulative size of pending transactions that will force flush before interval +// Cummulative size in bytes of pending transactions that will force flush before interval func WithFlushThresholdSize(sz int) BulkOpt { return func(opt *bulkOptT) { opt.flushThresholdSz = sz @@ -70,3 +79,10 @@ func WithMaxPending(max int) BulkOpt { opt.maxPending = max } } + +func (o *bulkOptT) MarshalZerologObject(e *zerolog.Event) { + e.Dur("flushInterval", o.flushInterval) + e.Int("flushThresholdCnt", o.flushThresholdCnt) + e.Int("flushThresholdSz", o.flushThresholdSz) + e.Int("maxPending", o.maxPending) +} diff --git a/internal/pkg/bulk/queue.go b/internal/pkg/bulk/queue.go new file mode 100644 index 000000000..60f7b0bee --- /dev/null +++ b/internal/pkg/bulk/queue.go @@ -0,0 +1,39 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package bulk + +type queueT struct { + ty queueType + cnt int + head *bulkT + pending int +} + +type queueType int + +const ( + kQueueBulk queueType = iota + kQueueRead + kQueueSearch + kQueueRefreshBulk + kQueueRefreshRead + kNumQueues +) + +func (q queueT) Type() string { + switch q.ty { + case kQueueBulk: + return "bulk" + case kQueueRead: + return "read" + case kQueueSearch: + return "search" + case kQueueRefreshBulk: + return "refreshBulk" + case kQueueRefreshRead: + return "refreshRead" + } + panic("unknown") +} diff --git a/internal/pkg/bulk/schema.go b/internal/pkg/bulk/schema.go index bd6df93dd..fe68c9717 100644 --- a/internal/pkg/bulk/schema.go +++ b/internal/pkg/bulk/schema.go @@ -6,13 +6,37 @@ package bulk import ( "encoding/json" + "errors" "github.com/elastic/fleet-server/v7/internal/pkg/es" ) -type BulkIndexerResponse struct { - Took int `json:"took"` - HasErrors bool `json:"errors"` - Items []map[string]BulkIndexerResponseItem `json:"items,omitempty"` +type bulkStubItem struct { + Index *BulkIndexerResponseItem `json:"index"` + Delete *BulkIndexerResponseItem `json:"delete"` + Create *BulkIndexerResponseItem `json:"create"` + Update *BulkIndexerResponseItem `json:"update"` +} + +func (bi bulkStubItem) Choose() *BulkIndexerResponseItem { + switch { + case bi.Update != nil: + return bi.Update + case bi.Create != nil: + return bi.Create + case bi.Index != nil: + return bi.Index + case bi.Delete != nil: + return bi.Delete + } + + return nil +} + +//easyjson:json +type bulkIndexerResponse struct { + Took int `json:"took"` + HasErrors bool `json:"errors"` + Items []bulkStubItem `json:"items,omitempty"` } // Comment out fields we don't use; no point decoding. @@ -31,9 +55,18 @@ type BulkIndexerResponseItem struct { // Failed int `json:"failed"` // } `json:"_shards"` - Error es.ErrorT `json:"error,omitempty"` + Error *es.ErrorT `json:"error,omitempty"` } +func (b *BulkIndexerResponseItem) deriveError() error { + if b == nil { + return errors.New("Unknown bulk operator") + } + + return es.TranslateError(b.Status, b.Error) +} + +//easyjson:json type MgetResponse struct { Items []MgetResponseItem `json:"docs"` } @@ -72,18 +105,15 @@ type MsearchResponseItem struct { Hits es.HitsT `json:"hits"` Aggregations map[string]es.Aggregation `json:"aggregations,omitempty"` - Error es.ErrorT `json:"error,omitempty"` + Error *es.ErrorT `json:"error,omitempty"` } +//easyjson:json type MsearchResponse struct { Responses []MsearchResponseItem `json:"responses"` Took int `json:"took"` } -func (b *BulkIndexerResponseItem) deriveError() error { - return es.TranslateError(b.Status, b.Error) -} - func (b *MsearchResponseItem) deriveError() error { return es.TranslateError(b.Status, b.Error) } diff --git a/internal/pkg/bulk/schema_easyjson.go b/internal/pkg/bulk/schema_easyjson.go new file mode 100644 index 000000000..7b1dd1bd2 --- /dev/null +++ b/internal/pkg/bulk/schema_easyjson.go @@ -0,0 +1,1261 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// Code generated by easyjson for marshaling/unmarshaling. DO NOT EDIT. + +package bulk + +import ( + json "encoding/json" + es "github.com/elastic/fleet-server/v7/internal/pkg/es" + easyjson "github.com/mailru/easyjson" + jlexer "github.com/mailru/easyjson/jlexer" + jwriter "github.com/mailru/easyjson/jwriter" +) + +// suppress unused package warning +var ( + _ *json.RawMessage + _ *jlexer.Lexer + _ *jwriter.Writer + _ easyjson.Marshaler +) + +func easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgBulk(in *jlexer.Lexer, out *bulkIndexerResponse) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "took": + out.Took = int(in.Int()) + case "errors": + out.HasErrors = bool(in.Bool()) + case "items": + if in.IsNull() { + in.Skip() + out.Items = nil + } else { + in.Delim('[') + if out.Items == nil { + if !in.IsDelim(']') { + out.Items = make([]bulkStubItem, 0, 2) + } else { + out.Items = []bulkStubItem{} + } + } else { + out.Items = (out.Items)[:0] + } + for !in.IsDelim(']') { + var v1 bulkStubItem + easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgBulk1(in, &v1) + out.Items = append(out.Items, v1) + in.WantComma() + } + in.Delim(']') + } + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgBulk(out *jwriter.Writer, in bulkIndexerResponse) { + out.RawByte('{') + first := true + _ = first + { + const prefix string = ",\"took\":" + out.RawString(prefix[1:]) + out.Int(int(in.Took)) + } + { + const prefix string = ",\"errors\":" + out.RawString(prefix) + out.Bool(bool(in.HasErrors)) + } + if len(in.Items) != 0 { + const prefix string = ",\"items\":" + out.RawString(prefix) + { + out.RawByte('[') + for v2, v3 := range in.Items { + if v2 > 0 { + out.RawByte(',') + } + easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgBulk1(out, v3) + } + out.RawByte(']') + } + } + out.RawByte('}') +} + +// MarshalJSON supports json.Marshaler interface +func (v bulkIndexerResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgBulk(&w, v) + return w.Buffer.BuildBytes(), w.Error +} + +// MarshalEasyJSON supports easyjson.Marshaler interface +func (v bulkIndexerResponse) MarshalEasyJSON(w *jwriter.Writer) { + easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgBulk(w, v) +} + +// UnmarshalJSON supports json.Unmarshaler interface +func (v *bulkIndexerResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgBulk(&r, v) + return r.Error() +} + +// UnmarshalEasyJSON supports easyjson.Unmarshaler interface +func (v *bulkIndexerResponse) UnmarshalEasyJSON(l *jlexer.Lexer) { + easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgBulk(l, v) +} +func easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgBulk1(in *jlexer.Lexer, out *bulkStubItem) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "index": + if in.IsNull() { + in.Skip() + out.Index = nil + } else { + if out.Index == nil { + out.Index = new(BulkIndexerResponseItem) + } + easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgBulk2(in, out.Index) + } + case "delete": + if in.IsNull() { + in.Skip() + out.Delete = nil + } else { + if out.Delete == nil { + out.Delete = new(BulkIndexerResponseItem) + } + easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgBulk2(in, out.Delete) + } + case "create": + if in.IsNull() { + in.Skip() + out.Create = nil + } else { + if out.Create == nil { + out.Create = new(BulkIndexerResponseItem) + } + easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgBulk2(in, out.Create) + } + case "update": + if in.IsNull() { + in.Skip() + out.Update = nil + } else { + if out.Update == nil { + out.Update = new(BulkIndexerResponseItem) + } + easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgBulk2(in, out.Update) + } + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgBulk1(out *jwriter.Writer, in bulkStubItem) { + out.RawByte('{') + first := true + _ = first + { + const prefix string = ",\"index\":" + out.RawString(prefix[1:]) + if in.Index == nil { + out.RawString("null") + } else { + easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgBulk2(out, *in.Index) + } + } + { + const prefix string = ",\"delete\":" + out.RawString(prefix) + if in.Delete == nil { + out.RawString("null") + } else { + easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgBulk2(out, *in.Delete) + } + } + { + const prefix string = ",\"create\":" + out.RawString(prefix) + if in.Create == nil { + out.RawString("null") + } else { + easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgBulk2(out, *in.Create) + } + } + { + const prefix string = ",\"update\":" + out.RawString(prefix) + if in.Update == nil { + out.RawString("null") + } else { + easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgBulk2(out, *in.Update) + } + } + out.RawByte('}') +} +func easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgBulk2(in *jlexer.Lexer, out *BulkIndexerResponseItem) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "_id": + out.DocumentID = string(in.String()) + case "status": + out.Status = int(in.Int()) + case "error": + if in.IsNull() { + in.Skip() + out.Error = nil + } else { + if out.Error == nil { + out.Error = new(es.ErrorT) + } + easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgEs(in, out.Error) + } + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgBulk2(out *jwriter.Writer, in BulkIndexerResponseItem) { + out.RawByte('{') + first := true + _ = first + { + const prefix string = ",\"_id\":" + out.RawString(prefix[1:]) + out.String(string(in.DocumentID)) + } + { + const prefix string = ",\"status\":" + out.RawString(prefix) + out.Int(int(in.Status)) + } + if in.Error != nil { + const prefix string = ",\"error\":" + out.RawString(prefix) + easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgEs(out, *in.Error) + } + out.RawByte('}') +} +func easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgEs(in *jlexer.Lexer, out *es.ErrorT) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "type": + out.Type = string(in.String()) + case "reason": + out.Reason = string(in.String()) + case "caused_by": + easyjsonCef4e921Decode(in, &out.Cause) + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgEs(out *jwriter.Writer, in es.ErrorT) { + out.RawByte('{') + first := true + _ = first + { + const prefix string = ",\"type\":" + out.RawString(prefix[1:]) + out.String(string(in.Type)) + } + { + const prefix string = ",\"reason\":" + out.RawString(prefix) + out.String(string(in.Reason)) + } + { + const prefix string = ",\"caused_by\":" + out.RawString(prefix) + easyjsonCef4e921Encode(out, in.Cause) + } + out.RawByte('}') +} +func easyjsonCef4e921Decode(in *jlexer.Lexer, out *struct { + Type string `json:"type"` + Reason string `json:"reason"` +}) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "type": + out.Type = string(in.String()) + case "reason": + out.Reason = string(in.String()) + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjsonCef4e921Encode(out *jwriter.Writer, in struct { + Type string `json:"type"` + Reason string `json:"reason"` +}) { + out.RawByte('{') + first := true + _ = first + { + const prefix string = ",\"type\":" + out.RawString(prefix[1:]) + out.String(string(in.Type)) + } + { + const prefix string = ",\"reason\":" + out.RawString(prefix) + out.String(string(in.Reason)) + } + out.RawByte('}') +} +func easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgBulk3(in *jlexer.Lexer, out *MsearchResponse) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "responses": + if in.IsNull() { + in.Skip() + out.Responses = nil + } else { + in.Delim('[') + if out.Responses == nil { + if !in.IsDelim(']') { + out.Responses = make([]MsearchResponseItem, 0, 0) + } else { + out.Responses = []MsearchResponseItem{} + } + } else { + out.Responses = (out.Responses)[:0] + } + for !in.IsDelim(']') { + var v4 MsearchResponseItem + easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgBulk4(in, &v4) + out.Responses = append(out.Responses, v4) + in.WantComma() + } + in.Delim(']') + } + case "took": + out.Took = int(in.Int()) + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgBulk3(out *jwriter.Writer, in MsearchResponse) { + out.RawByte('{') + first := true + _ = first + { + const prefix string = ",\"responses\":" + out.RawString(prefix[1:]) + if in.Responses == nil && (out.Flags&jwriter.NilSliceAsEmpty) == 0 { + out.RawString("null") + } else { + out.RawByte('[') + for v5, v6 := range in.Responses { + if v5 > 0 { + out.RawByte(',') + } + easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgBulk4(out, v6) + } + out.RawByte(']') + } + } + { + const prefix string = ",\"took\":" + out.RawString(prefix) + out.Int(int(in.Took)) + } + out.RawByte('}') +} + +// MarshalJSON supports json.Marshaler interface +func (v MsearchResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgBulk3(&w, v) + return w.Buffer.BuildBytes(), w.Error +} + +// MarshalEasyJSON supports easyjson.Marshaler interface +func (v MsearchResponse) MarshalEasyJSON(w *jwriter.Writer) { + easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgBulk3(w, v) +} + +// UnmarshalJSON supports json.Unmarshaler interface +func (v *MsearchResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgBulk3(&r, v) + return r.Error() +} + +// UnmarshalEasyJSON supports easyjson.Unmarshaler interface +func (v *MsearchResponse) UnmarshalEasyJSON(l *jlexer.Lexer) { + easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgBulk3(l, v) +} +func easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgBulk4(in *jlexer.Lexer, out *MsearchResponseItem) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "status": + out.Status = int(in.Int()) + case "took": + out.Took = uint64(in.Uint64()) + case "timed_out": + out.TimedOut = bool(in.Bool()) + case "_shards": + easyjsonCef4e921Decode1(in, &out.Shards) + case "hits": + easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgEs1(in, &out.Hits) + case "aggregations": + if in.IsNull() { + in.Skip() + } else { + in.Delim('{') + if !in.IsDelim('}') { + out.Aggregations = make(map[string]es.Aggregation) + } else { + out.Aggregations = nil + } + for !in.IsDelim('}') { + key := string(in.String()) + in.WantColon() + var v7 es.Aggregation + easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgEs2(in, &v7) + (out.Aggregations)[key] = v7 + in.WantComma() + } + in.Delim('}') + } + case "error": + if in.IsNull() { + in.Skip() + out.Error = nil + } else { + if out.Error == nil { + out.Error = new(es.ErrorT) + } + easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgEs(in, out.Error) + } + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgBulk4(out *jwriter.Writer, in MsearchResponseItem) { + out.RawByte('{') + first := true + _ = first + { + const prefix string = ",\"status\":" + out.RawString(prefix[1:]) + out.Int(int(in.Status)) + } + { + const prefix string = ",\"took\":" + out.RawString(prefix) + out.Uint64(uint64(in.Took)) + } + { + const prefix string = ",\"timed_out\":" + out.RawString(prefix) + out.Bool(bool(in.TimedOut)) + } + { + const prefix string = ",\"_shards\":" + out.RawString(prefix) + easyjsonCef4e921Encode1(out, in.Shards) + } + { + const prefix string = ",\"hits\":" + out.RawString(prefix) + easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgEs1(out, in.Hits) + } + if len(in.Aggregations) != 0 { + const prefix string = ",\"aggregations\":" + out.RawString(prefix) + { + out.RawByte('{') + v8First := true + for v8Name, v8Value := range in.Aggregations { + if v8First { + v8First = false + } else { + out.RawByte(',') + } + out.String(string(v8Name)) + out.RawByte(':') + easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgEs2(out, v8Value) + } + out.RawByte('}') + } + } + if in.Error != nil { + const prefix string = ",\"error\":" + out.RawString(prefix) + easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgEs(out, *in.Error) + } + out.RawByte('}') +} +func easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgEs2(in *jlexer.Lexer, out *es.Aggregation) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "value": + out.Value = float64(in.Float64()) + case "doc_count_error_upper_bound": + out.DocCountErrorUpperBound = int64(in.Int64()) + case "sum_other_doc_count": + out.SumOtherDocCount = int64(in.Int64()) + case "buckets": + if in.IsNull() { + in.Skip() + out.Buckets = nil + } else { + in.Delim('[') + if out.Buckets == nil { + if !in.IsDelim(']') { + out.Buckets = make([]es.Bucket, 0, 2) + } else { + out.Buckets = []es.Bucket{} + } + } else { + out.Buckets = (out.Buckets)[:0] + } + for !in.IsDelim(']') { + var v9 es.Bucket + if data := in.Raw(); in.Ok() { + in.AddError((v9).UnmarshalJSON(data)) + } + out.Buckets = append(out.Buckets, v9) + in.WantComma() + } + in.Delim(']') + } + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgEs2(out *jwriter.Writer, in es.Aggregation) { + out.RawByte('{') + first := true + _ = first + { + const prefix string = ",\"value\":" + out.RawString(prefix[1:]) + out.Float64(float64(in.Value)) + } + { + const prefix string = ",\"doc_count_error_upper_bound\":" + out.RawString(prefix) + out.Int64(int64(in.DocCountErrorUpperBound)) + } + { + const prefix string = ",\"sum_other_doc_count\":" + out.RawString(prefix) + out.Int64(int64(in.SumOtherDocCount)) + } + if len(in.Buckets) != 0 { + const prefix string = ",\"buckets\":" + out.RawString(prefix) + { + out.RawByte('[') + for v10, v11 := range in.Buckets { + if v10 > 0 { + out.RawByte(',') + } + easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgEs3(out, v11) + } + out.RawByte(']') + } + } + out.RawByte('}') +} +func easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgEs3(in *jlexer.Lexer, out *es.Bucket) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "key": + out.Key = string(in.String()) + case "doc_count": + out.DocCount = int64(in.Int64()) + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgEs3(out *jwriter.Writer, in es.Bucket) { + out.RawByte('{') + first := true + _ = first + { + const prefix string = ",\"key\":" + out.RawString(prefix[1:]) + out.String(string(in.Key)) + } + { + const prefix string = ",\"doc_count\":" + out.RawString(prefix) + out.Int64(int64(in.DocCount)) + } + out.RawByte('}') +} +func easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgEs1(in *jlexer.Lexer, out *es.HitsT) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "hits": + if in.IsNull() { + in.Skip() + out.Hits = nil + } else { + in.Delim('[') + if out.Hits == nil { + if !in.IsDelim(']') { + out.Hits = make([]es.HitT, 0, 0) + } else { + out.Hits = []es.HitT{} + } + } else { + out.Hits = (out.Hits)[:0] + } + for !in.IsDelim(']') { + var v12 es.HitT + easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgEs4(in, &v12) + out.Hits = append(out.Hits, v12) + in.WantComma() + } + in.Delim(']') + } + case "total": + easyjsonCef4e921Decode2(in, &out.Total) + case "max_score": + if in.IsNull() { + in.Skip() + out.MaxScore = nil + } else { + if out.MaxScore == nil { + out.MaxScore = new(float64) + } + *out.MaxScore = float64(in.Float64()) + } + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgEs1(out *jwriter.Writer, in es.HitsT) { + out.RawByte('{') + first := true + _ = first + { + const prefix string = ",\"hits\":" + out.RawString(prefix[1:]) + if in.Hits == nil && (out.Flags&jwriter.NilSliceAsEmpty) == 0 { + out.RawString("null") + } else { + out.RawByte('[') + for v13, v14 := range in.Hits { + if v13 > 0 { + out.RawByte(',') + } + easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgEs4(out, v14) + } + out.RawByte(']') + } + } + { + const prefix string = ",\"total\":" + out.RawString(prefix) + easyjsonCef4e921Encode2(out, in.Total) + } + { + const prefix string = ",\"max_score\":" + out.RawString(prefix) + if in.MaxScore == nil { + out.RawString("null") + } else { + out.Float64(float64(*in.MaxScore)) + } + } + out.RawByte('}') +} +func easyjsonCef4e921Decode2(in *jlexer.Lexer, out *struct { + Relation string `json:"relation"` + Value uint64 `json:"value"` +}) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "relation": + out.Relation = string(in.String()) + case "value": + out.Value = uint64(in.Uint64()) + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjsonCef4e921Encode2(out *jwriter.Writer, in struct { + Relation string `json:"relation"` + Value uint64 `json:"value"` +}) { + out.RawByte('{') + first := true + _ = first + { + const prefix string = ",\"relation\":" + out.RawString(prefix[1:]) + out.String(string(in.Relation)) + } + { + const prefix string = ",\"value\":" + out.RawString(prefix) + out.Uint64(uint64(in.Value)) + } + out.RawByte('}') +} +func easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgEs4(in *jlexer.Lexer, out *es.HitT) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "_id": + out.Id = string(in.String()) + case "_seq_no": + out.SeqNo = int64(in.Int64()) + case "version": + out.Version = int64(in.Int64()) + case "_index": + out.Index = string(in.String()) + case "_source": + if data := in.Raw(); in.Ok() { + in.AddError((out.Source).UnmarshalJSON(data)) + } + case "_score": + if in.IsNull() { + in.Skip() + out.Score = nil + } else { + if out.Score == nil { + out.Score = new(float64) + } + *out.Score = float64(in.Float64()) + } + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgEs4(out *jwriter.Writer, in es.HitT) { + out.RawByte('{') + first := true + _ = first + { + const prefix string = ",\"_id\":" + out.RawString(prefix[1:]) + out.String(string(in.Id)) + } + { + const prefix string = ",\"_seq_no\":" + out.RawString(prefix) + out.Int64(int64(in.SeqNo)) + } + { + const prefix string = ",\"version\":" + out.RawString(prefix) + out.Int64(int64(in.Version)) + } + { + const prefix string = ",\"_index\":" + out.RawString(prefix) + out.String(string(in.Index)) + } + { + const prefix string = ",\"_source\":" + out.RawString(prefix) + out.Raw((in.Source).MarshalJSON()) + } + { + const prefix string = ",\"_score\":" + out.RawString(prefix) + if in.Score == nil { + out.RawString("null") + } else { + out.Float64(float64(*in.Score)) + } + } + out.RawByte('}') +} +func easyjsonCef4e921Decode1(in *jlexer.Lexer, out *struct { + Total uint64 `json:"total"` + Successful uint64 `json:"successful"` + Skipped uint64 `json:"skipped"` + Failed uint64 `json:"failed"` +}) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "total": + out.Total = uint64(in.Uint64()) + case "successful": + out.Successful = uint64(in.Uint64()) + case "skipped": + out.Skipped = uint64(in.Uint64()) + case "failed": + out.Failed = uint64(in.Uint64()) + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjsonCef4e921Encode1(out *jwriter.Writer, in struct { + Total uint64 `json:"total"` + Successful uint64 `json:"successful"` + Skipped uint64 `json:"skipped"` + Failed uint64 `json:"failed"` +}) { + out.RawByte('{') + first := true + _ = first + { + const prefix string = ",\"total\":" + out.RawString(prefix[1:]) + out.Uint64(uint64(in.Total)) + } + { + const prefix string = ",\"successful\":" + out.RawString(prefix) + out.Uint64(uint64(in.Successful)) + } + { + const prefix string = ",\"skipped\":" + out.RawString(prefix) + out.Uint64(uint64(in.Skipped)) + } + { + const prefix string = ",\"failed\":" + out.RawString(prefix) + out.Uint64(uint64(in.Failed)) + } + out.RawByte('}') +} +func easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgBulk5(in *jlexer.Lexer, out *MgetResponse) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "docs": + if in.IsNull() { + in.Skip() + out.Items = nil + } else { + in.Delim('[') + if out.Items == nil { + if !in.IsDelim(']') { + out.Items = make([]MgetResponseItem, 0, 2) + } else { + out.Items = []MgetResponseItem{} + } + } else { + out.Items = (out.Items)[:0] + } + for !in.IsDelim(']') { + var v15 MgetResponseItem + easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgBulk6(in, &v15) + out.Items = append(out.Items, v15) + in.WantComma() + } + in.Delim(']') + } + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgBulk5(out *jwriter.Writer, in MgetResponse) { + out.RawByte('{') + first := true + _ = first + { + const prefix string = ",\"docs\":" + out.RawString(prefix[1:]) + if in.Items == nil && (out.Flags&jwriter.NilSliceAsEmpty) == 0 { + out.RawString("null") + } else { + out.RawByte('[') + for v16, v17 := range in.Items { + if v16 > 0 { + out.RawByte(',') + } + easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgBulk6(out, v17) + } + out.RawByte(']') + } + } + out.RawByte('}') +} + +// MarshalJSON supports json.Marshaler interface +func (v MgetResponse) MarshalJSON() ([]byte, error) { + w := jwriter.Writer{} + easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgBulk5(&w, v) + return w.Buffer.BuildBytes(), w.Error +} + +// MarshalEasyJSON supports easyjson.Marshaler interface +func (v MgetResponse) MarshalEasyJSON(w *jwriter.Writer) { + easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgBulk5(w, v) +} + +// UnmarshalJSON supports json.Unmarshaler interface +func (v *MgetResponse) UnmarshalJSON(data []byte) error { + r := jlexer.Lexer{Data: data} + easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgBulk5(&r, v) + return r.Error() +} + +// UnmarshalEasyJSON supports easyjson.Unmarshaler interface +func (v *MgetResponse) UnmarshalEasyJSON(l *jlexer.Lexer) { + easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgBulk5(l, v) +} +func easyjsonCef4e921DecodeGithubComElasticFleetServerV7InternalPkgBulk6(in *jlexer.Lexer, out *MgetResponseItem) { + isTopLevel := in.IsStart() + if in.IsNull() { + if isTopLevel { + in.Consumed() + } + in.Skip() + return + } + in.Delim('{') + for !in.IsDelim('}') { + key := in.UnsafeFieldName(false) + in.WantColon() + if in.IsNull() { + in.Skip() + in.WantComma() + continue + } + switch key { + case "found": + out.Found = bool(in.Bool()) + case "_source": + if data := in.Raw(); in.Ok() { + in.AddError((out.Source).UnmarshalJSON(data)) + } + default: + in.SkipRecursive() + } + in.WantComma() + } + in.Delim('}') + if isTopLevel { + in.Consumed() + } +} +func easyjsonCef4e921EncodeGithubComElasticFleetServerV7InternalPkgBulk6(out *jwriter.Writer, in MgetResponseItem) { + out.RawByte('{') + first := true + _ = first + { + const prefix string = ",\"found\":" + out.RawString(prefix[1:]) + out.Bool(bool(in.Found)) + } + { + const prefix string = ",\"_source\":" + out.RawString(prefix) + out.Raw((in.Source).MarshalJSON()) + } + out.RawByte('}') +} diff --git a/internal/pkg/bulk/setup_test.go b/internal/pkg/bulk/setup_test.go new file mode 100644 index 000000000..3c0f4504b --- /dev/null +++ b/internal/pkg/bulk/setup_test.go @@ -0,0 +1,168 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package bulk + +import ( + "context" + "encoding/base64" + "encoding/json" + "math/rand" + "testing" + "time" + + "github.com/elastic/go-ucfg/yaml" + "github.com/rs/xid" + + "github.com/Pallinder/go-randomdata" + "github.com/elastic/fleet-server/v7/internal/pkg/config" + "github.com/elastic/fleet-server/v7/internal/pkg/es" + "github.com/elastic/fleet-server/v7/internal/pkg/testing/esutil" + "github.com/rs/zerolog" +) + +var defaultCfg config.Config +var defaultCfgData = []byte(` +output: + elasticsearch: + hosts: '${ELASTICSEARCH_HOSTS:localhost:9200}' + username: '${ELASTICSEARCH_USERNAME:elastic}' + password: '${ELASTICSEARCH_PASSWORD:changeme}' +fleet: + agent: + id: 1e4954ce-af37-4731-9f4a-407b08e69e42 +`) + +const testPolicy = `{ + "properties": { + "intval": { + "type": "integer" + }, + "objval": { + "type": "object" + }, + "boolval": { + "type": "boolean" + }, + "kwval": { + "type": "keyword" + }, + "binaryval": { + "type": "binary" + }, + "dateval": { + "type": "date" + } + } +}` + +type subT struct { + SubString string `json:"substring"` +} + +type testT struct { + IntVal int `json:"intval"` + ObjVal subT `json:"objval"` + BoolVal bool `json:"boolval"` + KWVal string `json:"kwval"` + BinaryVal string `json:"binaryval"` + DateVal string `json:"dateval"` +} + +func NewRandomSample() testT { + + return testT{ + IntVal: int(rand.Int31()), + ObjVal: subT{SubString: randomdata.SillyName()}, + BoolVal: (rand.Intn(1) == 1), + KWVal: randomdata.SillyName(), + BinaryVal: base64.StdEncoding.EncodeToString([]byte(randomdata.SillyName())), + DateVal: time.Now().Format(time.RFC3339), + } +} + +func (ts testT) marshal(t testing.TB) []byte { + data, err := json.Marshal(&ts) + if err != nil { + t.Fatal(err) + } + return data +} + +func (ts *testT) read(t testing.TB, bulker Bulk, ctx context.Context, index, id string) { + data, err := bulker.Read(ctx, index, id) + if err != nil { + t.Fatal(err) + } + + err = json.Unmarshal(data, ts) + if err != nil { + t.Fatal(err) + } +} + +func init() { + c, err := yaml.NewConfig(defaultCfgData, config.DefaultOptions...) + if err != nil { + panic(err) + } + err = c.Unpack(&defaultCfg, config.DefaultOptions...) + if err != nil { + panic(err) + } +} + +func SetupBulk(ctx context.Context, t testing.TB, opts ...BulkOpt) Bulk { + t.Helper() + _, bulker, err := InitES(ctx, &defaultCfg, opts...) + if err != nil { + t.Fatal(err) + } + return bulker +} + +func SetupIndex(ctx context.Context, t testing.TB, bulker Bulk, mapping string) string { + t.Helper() + index := xid.New().String() + err := esutil.EnsureIndex(ctx, bulker.Client(), index, mapping) + if err != nil { + t.Fatal(err) + } + return index +} + +func SetupIndexWithBulk(ctx context.Context, t testing.TB, mapping string, opts ...BulkOpt) (string, Bulk) { + t.Helper() + bulker := SetupBulk(ctx, t, opts...) + index := SetupIndex(ctx, t, bulker, mapping) + return index, bulker +} + +func QuietLogger() func() { + l := zerolog.GlobalLevel() + + zerolog.SetGlobalLevel(zerolog.ErrorLevel) + + return func() { + zerolog.SetGlobalLevel(l) + } +} + +func EqualElastic(werr, gerr error) bool { + if werr == gerr { + return true + } + + wantErr, ok1 := werr.(es.ErrElastic) + gotErr, ok2 := gerr.(*es.ErrElastic) + + if !ok2 { + if tryAgain, ok3 := gerr.(es.ErrElastic); ok3 { + gotErr = &tryAgain + ok2 = true + } + } + + return (ok1 && ok2 && wantErr.Status == gotErr.Status && wantErr.Type == gotErr.Type) +} diff --git a/internal/pkg/checkin/bulk.go b/internal/pkg/checkin/bulk.go new file mode 100644 index 000000000..8f20d0294 --- /dev/null +++ b/internal/pkg/checkin/bulk.go @@ -0,0 +1,233 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package checkin + +import ( + "context" + "encoding/json" + "sync" + "time" + + "github.com/elastic/fleet-server/v7/internal/pkg/bulk" + "github.com/elastic/fleet-server/v7/internal/pkg/dl" + "github.com/elastic/fleet-server/v7/internal/pkg/sqn" + + "github.com/rs/zerolog/log" +) + +const defaultFlushInterval = 10 * time.Second + +type optionsT struct { + flushInterval time.Duration +} + +type Opt func(*optionsT) + +func WithFlushInterval(d time.Duration) Opt { + return func(opt *optionsT) { + opt.flushInterval = d + } +} + +type extraT struct { + meta []byte + seqNo sqn.SeqNo +} + +// Minimize the size of this structure. +// There will be 10's of thousands of items +// in the map at any point. +type pendingT struct { + ts string + extra *extraT +} + +type Bulk struct { + opts optionsT + bulker bulk.Bulk + mut sync.Mutex + pending map[string]pendingT + + ts string + unix int64 +} + +func NewBulk(bulker bulk.Bulk, opts ...Opt) *Bulk { + parsedOpts := parseOpts(opts...) + + return &Bulk{ + opts: parsedOpts, + bulker: bulker, + pending: make(map[string]pendingT), + } +} + +func parseOpts(opts ...Opt) optionsT { + + outOpts := optionsT{ + flushInterval: defaultFlushInterval, + } + + for _, f := range opts { + f(&outOpts) + } + + return outOpts +} + +// Generate and cache timestamp on seconds change. +// Avoid thousands of formats of an identical string. +func (bc *Bulk) timestamp() string { + + // WARNING: Expects mutex locked. + now := time.Now() + if now.Unix() != bc.unix { + bc.unix = now.Unix() + bc.ts = now.UTC().Format(time.RFC3339) + } + + return bc.ts +} + +// WARNING: Bulk will take ownership of fields, +// so do not use after passing in. +func (bc *Bulk) CheckIn(id string, meta []byte, seqno sqn.SeqNo) error { + + // Separate out the extra data to minimize + // the memory footprint of the 90% case of just + // updating the timestamp. + var extra *extraT + if meta != nil || seqno.IsSet() { + extra = &extraT{ + meta: meta, + seqNo: seqno, + } + } + + bc.mut.Lock() + + bc.pending[id] = pendingT{ + ts: bc.timestamp(), + extra: extra, + } + + bc.mut.Unlock() + return nil +} + +func (bc *Bulk) Run(ctx context.Context) error { + + tick := time.NewTicker(bc.opts.flushInterval) + defer tick.Stop() + + var err error +LOOP: + for { + select { + case <-tick.C: + if err = bc.flush(ctx); err != nil { + log.Error().Err(err).Msg("Eat bulk checkin error; Keep on truckin'") + err = nil + } + + case <-ctx.Done(): + err = ctx.Err() + break LOOP + } + } + + return err +} + +func (bc *Bulk) flush(ctx context.Context) error { + start := time.Now() + + bc.mut.Lock() + pending := bc.pending + bc.pending = make(map[string]pendingT, len(pending)) + bc.mut.Unlock() + + if len(pending) == 0 { + return nil + } + + updates := make([]bulk.MultiOp, 0, len(pending)) + + simpleCache := make(map[string][]byte) + + nowTimestamp := start.UTC().Format(time.RFC3339) + + var err error + var needRefresh bool + for id, pendingData := range pending { + + // In the simple case, there are no fields and no seqNo. + // When that is true, we can reuse an already generated + // JSON body containing just the timestamp updates. + var body []byte + if pendingData.extra == nil { + var ok bool + body, ok = simpleCache[pendingData.ts] + if !ok { + fields := bulk.UpdateFields{ + dl.FieldLastCheckin: pendingData.ts, + dl.FieldUpdatedAt: nowTimestamp, + } + if body, err = fields.Marshal(); err != nil { + return err + } + simpleCache[pendingData.ts] = body + } + } else { + + fields := bulk.UpdateFields{ + dl.FieldLastCheckin: pendingData.ts, // Set the checkin timestamp + dl.FieldUpdatedAt: nowTimestamp, // Set "updated_at" to the current timestamp + } + + // Update local metadata if provided + if pendingData.extra.meta != nil { + // Surprise: The json encodeer compacts this raw JSON during + // the encode process, so there my be unexpected memory overhead: + // https://github.com/golang/go/blob/go1.16.3/src/encoding/json/encode.go#L499 + fields[dl.FieldLocalMetadata] = json.RawMessage(pendingData.extra.meta) + } + + // If seqNo changed, set the field appropriately + if pendingData.extra.seqNo.IsSet() { + fields[dl.FieldActionSeqNo] = pendingData.extra.seqNo + + // Only refresh if seqNo changed; dropping metadata not important. + needRefresh = true + } + + if body, err = fields.Marshal(); err != nil { + return err + } + } + + updates = append(updates, bulk.MultiOp{ + Id: id, + Body: body, + Index: dl.FleetAgents, + }) + } + + var opts []bulk.Opt + if needRefresh { + opts = append(opts, bulk.WithRefresh()) + } + + _, err = bc.bulker.MUpdate(ctx, updates, opts...) + + log.Trace(). + Err(err). + Dur("rtt", time.Since(start)). + Int("cnt", len(updates)). + Bool("refresh", needRefresh). + Msg("Flush updates") + + return err +} diff --git a/internal/pkg/checkin/bulk_test.go b/internal/pkg/checkin/bulk_test.go new file mode 100644 index 000000000..fc881e16a --- /dev/null +++ b/internal/pkg/checkin/bulk_test.go @@ -0,0 +1,209 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package checkin + +import ( + "bytes" + "context" + "encoding/json" + "testing" + "time" + + "github.com/elastic/fleet-server/v7/internal/pkg/bulk" + "github.com/elastic/fleet-server/v7/internal/pkg/dl" + "github.com/elastic/fleet-server/v7/internal/pkg/sqn" + "github.com/google/go-cmp/cmp" + + tst "github.com/elastic/fleet-server/v7/internal/pkg/testing" + "github.com/rs/xid" + "github.com/rs/zerolog" +) + +type CustomBulk struct { + tst.MockBulk + + ops []bulk.MultiOp +} + +func (m *CustomBulk) MUpdate(ctx context.Context, ops []bulk.MultiOp, opts ...bulk.Opt) ([]bulk.BulkIndexerResponseItem, error) { + m.ops = append(m.ops, ops...) + return nil, nil +} + +// Test simple, +// Test with fields +// Test with seq no + +func TestBulkSimple(t *testing.T) { + start := time.Now() + + var mockBulk CustomBulk + + bc := NewBulk(&mockBulk) + + cases := []struct { + desc string + id string + meta []byte + seqno sqn.SeqNo + }{ + { + "Simple case", + "simpleId", + nil, + nil, + }, + { + "Singled field case", + "singleFieldId", + []byte(`{"hey":"now"}`), + nil, + }, + { + "Multi field case", + "multiFieldId", + []byte(`{"hey":"now","brown":"cow"}`), + nil, + }, + { + "Multi field nested case", + "multiFieldNestedId", + []byte(`{"hey":"now","wee":{"little":"doggie"}}`), + nil, + }, + { + "Simple case with seqNo", + "simpleseqno", + nil, + sqn.SeqNo{1, 2, 3, 4}, + }, + { + "Field case with seqNo", + "simpleseqno", + []byte(`{"uncle":"fester"}`), + sqn.SeqNo{5, 6, 7, 8}, + }, + } + + for _, c := range cases { + t.Run(c.desc, func(t *testing.T) { + + if err := bc.CheckIn(c.id, c.meta, c.seqno); err != nil { + t.Fatal(err) + } + + if err := bc.flush(context.Background()); err != nil { + t.Fatal(err) + } + + if len(mockBulk.ops) != 1 { + t.Fatal("Expected one op") + } + + op := mockBulk.ops[0] + + mockBulk.ops = nil + + // deserialize the response + if op.Id != c.id { + t.Error("Wrong id") + } + + if op.Index != dl.FleetAgents { + t.Error("Wrong index") + } + + type updateT struct { + LastCheckin string `json:"last_checkin"` + UpdatedAt string `json:"updated_at"` + Meta json.RawMessage `json:"local_metadata"` + SeqNo sqn.SeqNo `json:"action_seq_no"` + } + + m := make(map[string]updateT) + if err := json.Unmarshal(op.Body, &m); err != nil { + t.Error(err) + } + + sub, ok := m["doc"] + if !ok { + t.Fatal("expected doc") + } + + validateTimestamp(t, start.Truncate(time.Second), sub.LastCheckin) + validateTimestamp(t, start.Truncate(time.Second), sub.UpdatedAt) + + if c.seqno != nil { + if cdiff := cmp.Diff(c.seqno, sub.SeqNo); cdiff != "" { + t.Error(cdiff) + } + } + + if c.meta != nil && bytes.Compare(c.meta, sub.Meta) != 0 { + t.Error("meta doesn't match up") + } + + }) + } +} + +func validateTimestamp(t *testing.T, start time.Time, ts string) { + + if t1, err := time.Parse(time.RFC3339, ts); err != nil { + t.Error("expected rfc3999") + } else if start.After(t1) { + t.Error("timestamp in the past") + } +} + +func benchmarkBulk(n int, flush bool, b *testing.B) { + b.ReportAllocs() + + l := zerolog.GlobalLevel() + defer zerolog.SetGlobalLevel(l) + + zerolog.SetGlobalLevel(zerolog.ErrorLevel) + + var mockBulk tst.MockBulk + + bc := NewBulk(mockBulk) + + ids := make([]string, 0, n) + for i := 0; i < n; i++ { + id := xid.New().String() + ids = append(ids, id) + } + + for i := 0; i < b.N; i++ { + + for _, id := range ids { + err := bc.CheckIn(id, nil, nil) + if err != nil { + b.Fatal(err) + } + } + + if flush { + err := bc.flush(context.Background()) + if err != nil { + b.Fatal(err) + } + } + } +} + +func BenchmarkBulk_1(b *testing.B) { benchmarkBulk(1, false, b) } +func BenchmarkBulk_64(b *testing.B) { benchmarkBulk(64, false, b) } +func BenchmarkBulk_8192(b *testing.B) { benchmarkBulk(8192, false, b) } +func BenchmarkBulk_37268(b *testing.B) { benchmarkBulk(37268, false, b) } +func BenchmarkBulk_131072(b *testing.B) { benchmarkBulk(131072, false, b) } +func BenchmarkBulk_262144(b *testing.B) { benchmarkBulk(262144, false, b) } + +func BenchmarkBulkFlush_1(b *testing.B) { benchmarkBulk(1, true, b) } +func BenchmarkBulkFlush_64(b *testing.B) { benchmarkBulk(64, true, b) } +func BenchmarkBulkFlush_8192(b *testing.B) { benchmarkBulk(8192, true, b) } +func BenchmarkBulkFlush_37268(b *testing.B) { benchmarkBulk(37268, true, b) } +func BenchmarkBulkFlush_131072(b *testing.B) { benchmarkBulk(131072, true, b) } +func BenchmarkBulkFlush_262144(b *testing.B) { benchmarkBulk(262144, true, b) } diff --git a/internal/pkg/danger/buf.go b/internal/pkg/danger/buf.go new file mode 100644 index 000000000..36daaa6f1 --- /dev/null +++ b/internal/pkg/danger/buf.go @@ -0,0 +1,83 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package danger + +// Effectively golang's string builder with a Reset option + +import ( + "unicode/utf8" +) + +type Buf struct { + buf []byte +} + +func (b *Buf) Bytes() []byte { + return b.buf +} + +func (b *Buf) Set(s []byte) { + b.buf = s +} + +func (b *Buf) Len() int { return len(b.buf) } + +func (b *Buf) Cap() int { return cap(b.buf) } + +func (b *Buf) Reset() { + b.buf = b.buf[:0] +} + +func (b *Buf) grow(n int) { + buf := make([]byte, len(b.buf), 2*cap(b.buf)+n) + copy(buf, b.buf) + b.buf = buf +} + +func (b *Buf) Grow(n int) { + if n < 0 { + panic("danger.Buf.Grow: negative count") + } + if cap(b.buf)-len(b.buf) < n { + b.grow(n) + } +} + +// Write appends the contents of p to b's buffer. +// Write always returns len(p), nil. +func (b *Buf) Write(p []byte) (int, error) { + b.buf = append(b.buf, p...) + return len(p), nil +} + +// WriteByte appends the byte c to b's buffer. +// The returned error is always nil. +func (b *Buf) WriteByte(c byte) error { + b.buf = append(b.buf, c) + return nil +} + +// WriteRune appends the UTF-8 encoding of Unicode code point r to b's buffer. +// It returns the length of r and a nil error. +func (b *Buf) WriteRune(r rune) (int, error) { + if r < utf8.RuneSelf { + b.buf = append(b.buf, byte(r)) + return 1, nil + } + l := len(b.buf) + if cap(b.buf)-l < utf8.UTFMax { + b.grow(utf8.UTFMax) + } + n := utf8.EncodeRune(b.buf[l:l+utf8.UTFMax], r) + b.buf = b.buf[:l+n] + return n, nil +} + +// WriteString appends the contents of s to b's buffer. +// It returns the length of s and a nil error. +func (b *Buf) WriteString(s string) (int, error) { + b.buf = append(b.buf, s...) + return len(s), nil +} diff --git a/internal/pkg/danger/buf_test.go b/internal/pkg/danger/buf_test.go new file mode 100644 index 000000000..d0618aea1 --- /dev/null +++ b/internal/pkg/danger/buf_test.go @@ -0,0 +1,43 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package danger + +import ( + "crypto/rand" + "testing" +) + +// Validate that if a buffer needs to grow during a write, +// Previous cached pointers into underlying data are still valid. +func TestBufGrowWhileWrite(t *testing.T) { + + nBytes := 1024 * 1024 + src := make([]byte, 1024*1024) + _, err := rand.Read(src) + if err != nil { + t.Fatal(err) + } + + ptrs := make([][]byte, 0, nBytes) + + var dst Buf + for i := 0; i < nBytes; i++ { + + if err = dst.WriteByte(src[i]); err != nil { + t.Fatal(err) + } + + ptr := dst.Bytes()[i:] + + ptrs = append(ptrs, ptr) + } + + for i, p := range ptrs { + + if p[0] != src[i] { + t.Fatal("Mismatch: ", i) + } + } +} diff --git a/internal/pkg/dl/action_results_integration_test.go b/internal/pkg/dl/action_results_integration_test.go index 417c38e84..c2f0ccf48 100644 --- a/internal/pkg/dl/action_results_integration_test.go +++ b/internal/pkg/dl/action_results_integration_test.go @@ -104,7 +104,7 @@ func TestActionResultsStored(t *testing.T) { index, bulker, acrs := setupActionResults(ctx, t) - res, err := bulker.Search(ctx, []string{index}, []byte("{}")) + res, err := bulker.Search(ctx, index, []byte("{}")) if err != nil { t.Fatal(err) } diff --git a/internal/pkg/dl/constants.go b/internal/pkg/dl/constants.go index f2ad891f5..7d0d6df99 100644 --- a/internal/pkg/dl/constants.go +++ b/internal/pkg/dl/constants.go @@ -31,6 +31,8 @@ const ( FieldPolicyId = "policy_id" FieldRevisionIdx = "revision_idx" FieldCoordinatorIdx = "coordinator_idx" + FieldLastCheckin = "last_checkin" + FieldLocalMetadata = "local_metadata" FieldPolicyRevisionIdx = "policy_revision_idx" FieldPolicyCoordinatorIdx = "policy_coordinator_idx" FieldDefaultApiKey = "default_api_key" diff --git a/internal/pkg/dl/policies.go b/internal/pkg/dl/policies.go index f12199619..9a65eddae 100644 --- a/internal/pkg/dl/policies.go +++ b/internal/pkg/dl/policies.go @@ -36,7 +36,7 @@ func prepareQueryLatestPolicies() []byte { // QueryLatestPolices gets the latest revision for a policy func QueryLatestPolicies(ctx context.Context, bulker bulk.Bulk, opt ...Option) ([]model.Policy, error) { o := newOption(FleetPolicies, opt...) - res, err := bulker.Search(ctx, []string{o.indexName}, tmplQueryLatestPolicies) + res, err := bulker.Search(ctx, o.indexName, tmplQueryLatestPolicies) if err != nil { return nil, err } diff --git a/internal/pkg/dl/policies_leader.go b/internal/pkg/dl/policies_leader.go index 53bb6f8d6..0d23a3233 100644 --- a/internal/pkg/dl/policies_leader.go +++ b/internal/pkg/dl/policies_leader.go @@ -49,7 +49,7 @@ func SearchPolicyLeaders(ctx context.Context, bulker bulk.Bulk, ids []string, op if err != nil { return } - res, err := bulker.Search(ctx, []string{o.indexName}, data) + res, err := bulker.Search(ctx, o.indexName, data) if err != nil { if errors.Is(err, es.ErrIndexNotFound) { log.Debug().Str("index", o.indexName).Msg(es.ErrIndexNotFound.Error()) diff --git a/internal/pkg/dl/search.go b/internal/pkg/dl/search.go index 216c3e96f..0672b145a 100644 --- a/internal/pkg/dl/search.go +++ b/internal/pkg/dl/search.go @@ -18,7 +18,7 @@ func Search(ctx context.Context, bulker bulk.Bulk, tmpl *dsl.Tmpl, index string, return nil, err } - res, err := bulker.Search(ctx, []string{index}, query) + res, err := bulker.Search(ctx, index, query) if err != nil { return nil, err } @@ -31,7 +31,7 @@ func SearchWithOneParam(ctx context.Context, bulker bulk.Bulk, tmpl *dsl.Tmpl, i if err != nil { return nil, err } - res, err := bulker.Search(ctx, []string{index}, query) + res, err := bulker.Search(ctx, index, query) if err != nil { return nil, err } diff --git a/internal/pkg/es/delete.go b/internal/pkg/es/delete.go index 720bc9f7d..a27cffaa4 100644 --- a/internal/pkg/es/delete.go +++ b/internal/pkg/es/delete.go @@ -26,7 +26,7 @@ func DeleteIndices(ctx context.Context, es *elasticsearch.Client, indices []stri return err } if !ares.Acknowledged { - err = TranslateError(res.StatusCode, ares.Error) + err = TranslateError(res.StatusCode, &ares.Error) } return err diff --git a/internal/pkg/es/error.go b/internal/pkg/es/error.go index 008097546..7e39c5ce3 100644 --- a/internal/pkg/es/error.go +++ b/internal/pkg/es/error.go @@ -9,6 +9,8 @@ import ( "fmt" ) +// TODO: Why do we have both ErrElastic and ErrorT? Very strange. + type ErrElastic struct { Status int Type string @@ -42,10 +44,15 @@ var ( ErrNotFound = errors.New("not found") ) -func TranslateError(status int, e ErrorT) error { +func TranslateError(status int, e *ErrorT) error { if status == 200 || status == 201 { return nil } + if e == nil { + return &ErrElastic{ + Status: status, + } + } var err error switch e.Type { diff --git a/internal/pkg/es/info.go b/internal/pkg/es/info.go index 46fe4df21..d5bf00239 100644 --- a/internal/pkg/es/info.go +++ b/internal/pkg/es/info.go @@ -39,7 +39,7 @@ func FetchESVersion(ctx context.Context, esCli *elasticsearch.Client) (version s } // Check error - err = TranslateError(res.StatusCode, sres.Error) + err = TranslateError(res.StatusCode, &sres.Error) if err != nil { return } diff --git a/internal/pkg/monitor/global_checkpoint.go b/internal/pkg/monitor/global_checkpoint.go index 9d08024aa..ef93be0a7 100644 --- a/internal/pkg/monitor/global_checkpoint.go +++ b/internal/pkg/monitor/global_checkpoint.go @@ -80,7 +80,7 @@ func processGlobalCheckpointResponse(res *esapi.Response) (seqno sqn.SeqNo, err } // Check error - err = esh.TranslateError(res.StatusCode, sres.Error) + err = esh.TranslateError(res.StatusCode, &sres.Error) if err != nil { return nil, err } diff --git a/internal/pkg/monitor/monitor.go b/internal/pkg/monitor/monitor.go index 8bd4ee27c..1485ef441 100644 --- a/internal/pkg/monitor/monitor.go +++ b/internal/pkg/monitor/monitor.go @@ -325,7 +325,7 @@ func (m *simpleMonitorT) search(ctx context.Context, tmpl *dsl.Tmpl, params map[ } if res.IsError() { - err = es.TranslateError(res.StatusCode, esres.Error) + err = es.TranslateError(res.StatusCode, &esres.Error) } if err != nil { diff --git a/internal/pkg/testing/bulk.go b/internal/pkg/testing/bulk.go index 1b8fb12fe..afb5ab50c 100644 --- a/internal/pkg/testing/bulk.go +++ b/internal/pkg/testing/bulk.go @@ -42,11 +42,27 @@ func (m MockBulk) Read(ctx context.Context, index, id string, opts ...bulk.Opt) return nil, nil } -func (m MockBulk) MUpdate(ctx context.Context, ops []bulk.BulkOp, opts ...bulk.Opt) error { +func (m MockBulk) Delete(ctx context.Context, index, id string, opts ...bulk.Opt) error { return nil } -func (m MockBulk) Search(ctx context.Context, index []string, body []byte, opts ...bulk.Opt) (*es.ResultT, error) { +func (m MockBulk) MCreate(ctx context.Context, ops []bulk.MultiOp, opts ...bulk.Opt) ([]bulk.BulkIndexerResponseItem, error) { + return nil, nil +} + +func (m MockBulk) MIndex(ctx context.Context, ops []bulk.MultiOp, opts ...bulk.Opt) ([]bulk.BulkIndexerResponseItem, error) { + return nil, nil +} + +func (m MockBulk) MUpdate(ctx context.Context, ops []bulk.MultiOp, opts ...bulk.Opt) ([]bulk.BulkIndexerResponseItem, error) { + return nil, nil +} + +func (m MockBulk) MDelete(ctx context.Context, ops []bulk.MultiOp, opts ...bulk.Opt) ([]bulk.BulkIndexerResponseItem, error) { + return nil, nil +} + +func (m MockBulk) Search(ctx context.Context, index string, body []byte, opts ...bulk.Opt) (*es.ResultT, error) { return &es.ResultT{}, nil } diff --git a/internal/pkg/ver/check.go b/internal/pkg/ver/check.go index 10e8f85b2..a75962055 100644 --- a/internal/pkg/ver/check.go +++ b/internal/pkg/ver/check.go @@ -50,7 +50,11 @@ func checkCompatibility(fleetVersion, esVersion string) error { } if !verConst.Check(ver) { - log.Error().Err(ErrUnsupportedVersion).Msg("failed elasticsearch version check") + log.Error(). + Err(ErrUnsupportedVersion). + Str("constraint", verConst.String()). + Str("reported", ver.String()). + Msg("failed elasticsearch version check") return ErrUnsupportedVersion } log.Info().Str("fleet_version", fleetVersion).Str("elasticsearch_version", esVersion).Msg("versions are compatible") From 4ee3c3f7e3a573e9d02154c6a0032b4f02853a48 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Fri, 14 May 2021 12:14:50 +0000 Subject: [PATCH 087/240] Fix the comparison between a MaxUint32 and the len of the slice (#352) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit received.. Ok, this seems to be a problem with the int vs uint check that we do. I am not sure why this was raised now / today. I event check older 1.15 version of golang and I had the same issue. Before the fix ``` ❯ make release-manager-snapshot [22:46:57] ./dev-tools/run_with_go_ver /Applications/Xcode.app/Contents/Developer/usr/bin/make release v0.1.0 GOOS=darwin GOARCH=amd64 go build -ldflags="-w -s -X main.Version=8.0.0-SNAPSHOT" -buildmode=pie -o build/binaries/fleet-server-8.0.0-SNAPSHOT-darwin-x86_64/fleet-server . GOOS=darwin GOARCH=arm64 go build -ldflags="-w -s -X main.Version=8.0.0-SNAPSHOT" -buildmode=pie -o build/binaries/fleet-server-8.0.0-SNAPSHOT-darwin-arm64/fleet-server . GOOS=linux GOARCH=386 go build -ldflags="-w -s -X main.Version=8.0.0-SNAPSHOT" -o build/binaries/fleet-server-8.0.0-SNAPSHOT-linux-x86/fleet-server . internal/pkg/bulk/opMulti.go:35:14: constant 4294967295 overflows int make[2]: *** [release-linux/386] Error 2 make[1]: *** [release-manager-release] Error 2 make: *** [release-manager-snapshot] Error 2 ``` After: ``` ❯ make release-manager-snapshot [22:49:11] ./dev-tools/run_with_go_ver /Applications/Xcode.app/Contents/Developer/usr/bin/make release v0.1.0 GOOS=darwin GOARCH=amd64 go build -ldflags="-w -s -X main.Version=8.0.0-SNAPSHOT" -buildmode=pie -o build/binaries/fleet-server-8.0.0-SNAPSHOT-darwin-x86_64/fleet-server . GOOS=darwin GOARCH=arm64 go build -ldflags="-w -s -X main.Version=8.0.0-SNAPSHOT" -buildmode=pie -o build/binaries/fleet-server-8.0.0-SNAPSHOT-darwin-arm64/fleet-server . GOOS=linux GOARCH=386 go build -ldflags="-w -s -X main.Version=8.0.0-SNAPSHOT" -o build/binaries/fleet-server-8.0.0-SNAPSHOT-linux-x86/fleet-server . GOOS=linux GOARCH=amd64 go build -ldflags="-w -s -X main.Version=8.0.0-SNAPSHOT" -buildmode=pie -o build/binaries/fleet-server-8.0.0-SNAPSHOT-linux-x86_64/fleet-server . GOOS=linux GOARCH=arm64 go build -ldflags="-w -s -X main.Version=8.0.0-SNAPSHOT" -buildmode=pie -o build/binaries/fleet-server-8.0.0-SNAPSHOT-linux-arm64/fleet-server . GOOS=windows GOARCH=386 go build -ldflags="-w -s -X main.Version=8.0.0-SNAPSHOT" -buildmode=pie -o build/binaries/fleet-server-8.0.0-SNAPSHOT-windows-x86/fleet-server . GOOS=windows GOARCH=amd64 go build -ldflags="-w -s -X main.Version=8.0.0-SNAPSHOT" -buildmode=pie -o build/binaries/fleet-server-8.0.0-SNAPSHOT-windows-x86_64/fleet-server . >>> elapsed time 31s ``` (cherry picked from commit db80d61b434173bbe121d8d548d31ab8e3063189) Co-authored-by: Pier-Hugues Pellerin --- internal/pkg/bulk/opMulti.go | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/internal/pkg/bulk/opMulti.go b/internal/pkg/bulk/opMulti.go index 3017c85f8..b4fe72e05 100644 --- a/internal/pkg/bulk/opMulti.go +++ b/internal/pkg/bulk/opMulti.go @@ -7,6 +7,7 @@ package bulk import ( "context" "errors" + "math" ) func (b *Bulker) MCreate(ctx context.Context, ops []MultiOp, opts ...Opt) ([]BulkIndexerResponseItem, error) { @@ -30,9 +31,7 @@ func (b *Bulker) multiWaitBulkOp(ctx context.Context, action actionT, ops []Mult return nil, nil } - const kMaxBulk = (1 << 32) - 1 - - if len(ops) > kMaxBulk { + if uint(len(ops)) > math.MaxUint32 { return nil, errors.New("too many bulk ops") } From f97db96470cb0466f9bd101c47b22c036602aa7d Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 17 May 2021 01:15:56 -0400 Subject: [PATCH 088/240] [Automation] Update elastic stack version to 7.14.0-c3a960d1 for testing (#355) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 32f67c857..6eeddba19 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.14.0-e8048b9e-SNAPSHOT +ELASTICSEARCH_VERSION=7.14.0-c3a960d1-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 78cdb8e9dcf322a731e5cf2baabdd0c4f6fe0f01 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Mon, 17 May 2021 10:06:21 -0400 Subject: [PATCH 089/240] Remove the remnants of fleet indices bootstrapping for the integration tests (#360) (#362) (cherry picked from commit 970b4237f78f58fcdc7121ce8e0ce7d292a9b683) Co-authored-by: Aleksandr Maus --- Makefile | 1 - dev-tools/integration/main.go | 2483 ---------------------- internal/pkg/testing/esutil/bootstrap.go | 52 +- 3 files changed, 1 insertion(+), 2535 deletions(-) delete mode 100644 dev-tools/integration/main.go diff --git a/Makefile b/Makefile index 3601e568e..f7be55144 100644 --- a/Makefile +++ b/Makefile @@ -184,5 +184,4 @@ test-int: prepare-test-context ## - Run integration tests with full setup (slow .PHONY: test-int-set test-int-set: ## - Run integration tests without setup # Initialize indices one before running all the tests - ELASTICSEARCH_HOSTS=${TEST_ELASTICSEARCH_HOSTS} go run ./dev-tools/integration/main.go ELASTICSEARCH_HOSTS=${TEST_ELASTICSEARCH_HOSTS} go test -v -tags=integration -count=1 -race ./... diff --git a/dev-tools/integration/main.go b/dev-tools/integration/main.go deleted file mode 100644 index e43ed35d1..000000000 --- a/dev-tools/integration/main.go +++ /dev/null @@ -1,2483 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package main - -import ( - "context" - "errors" - "fmt" - - "github.com/elastic/fleet-server/v7/internal/pkg/config" - "github.com/elastic/fleet-server/v7/internal/pkg/es" - "github.com/elastic/fleet-server/v7/internal/pkg/testing/esutil" - - "github.com/rs/zerolog/log" -) - -func checkErr(err error) { - if err != nil { - panic(err) - } -} - -// Setup for integration testing -// Create the indices and data streams -func main() { - fmt.Println("Setting up the indices") - - cfg, err := config.LoadFile("fleet-server.yml") - checkErr(err) - - ctx := context.Background() - es, err := es.NewClient(ctx, cfg, false) - checkErr(err) - - err = esutil.EnsureESIndices(ctx, es) - checkErr(err) - - // Create .kibana index for integration tests - // This temporarily until all the parts are unplugged from .kibana - // Otherwise the fleet server fails to start at the moment - const name = ".kibana" - err = esutil.EnsureIndex(ctx, es, name, kibanaMapping) - if errors.Is(err, esutil.ErrResourceAlreadyExists) { - log.Info().Str("name", name).Msg("Index already exists") - err = nil - } - checkErr(err) -} - -const kibanaMapping = `{ - "dynamic" : "strict", - "properties" : { - "action" : { - "properties" : { - "actionTypeId" : { - "type" : "keyword" - }, - "config" : { - "type" : "object", - "enabled" : false - }, - "name" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - }, - "secrets" : { - "type" : "binary" - } - } - }, - "action_task_params" : { - "properties" : { - "actionId" : { - "type" : "keyword" - }, - "apiKey" : { - "type" : "binary" - }, - "params" : { - "type" : "object", - "enabled" : false - } - } - }, - "alert" : { - "properties" : { - "actions" : { - "type" : "nested", - "properties" : { - "actionRef" : { - "type" : "keyword" - }, - "actionTypeId" : { - "type" : "keyword" - }, - "group" : { - "type" : "keyword" - }, - "params" : { - "type" : "object", - "enabled" : false - } - } - }, - "alertTypeId" : { - "type" : "keyword" - }, - "apiKey" : { - "type" : "binary" - }, - "apiKeyOwner" : { - "type" : "keyword" - }, - "consumer" : { - "type" : "keyword" - }, - "createdAt" : { - "type" : "date" - }, - "createdBy" : { - "type" : "keyword" - }, - "enabled" : { - "type" : "boolean" - }, - "executionStatus" : { - "properties" : { - "error" : { - "properties" : { - "message" : { - "type" : "keyword" - }, - "reason" : { - "type" : "keyword" - } - } - }, - "lastExecutionDate" : { - "type" : "date" - }, - "status" : { - "type" : "keyword" - } - } - }, - "meta" : { - "properties" : { - "versionApiKeyLastmodified" : { - "type" : "keyword" - } - } - }, - "muteAll" : { - "type" : "boolean" - }, - "mutedInstanceIds" : { - "type" : "keyword" - }, - "name" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - }, - "params" : { - "type" : "object", - "enabled" : false - }, - "schedule" : { - "properties" : { - "interval" : { - "type" : "keyword" - } - } - }, - "scheduledTaskId" : { - "type" : "keyword" - }, - "tags" : { - "type" : "keyword" - }, - "throttle" : { - "type" : "keyword" - }, - "updatedAt" : { - "type" : "date" - }, - "updatedBy" : { - "type" : "keyword" - } - } - }, - "api_key_pending_invalidation" : { - "properties" : { - "apiKeyId" : { - "type" : "keyword" - }, - "createdAt" : { - "type" : "date" - } - } - }, - "apm-indices" : { - "properties" : { - "apm_oss" : { - "properties" : { - "errorIndices" : { - "type" : "keyword" - }, - "metricsIndices" : { - "type" : "keyword" - }, - "onboardingIndices" : { - "type" : "keyword" - }, - "sourcemapIndices" : { - "type" : "keyword" - }, - "spanIndices" : { - "type" : "keyword" - }, - "transactionIndices" : { - "type" : "keyword" - } - } - } - } - }, - "apm-telemetry" : { - "type" : "object", - "dynamic" : "false" - }, - "app_search_telemetry" : { - "type" : "object", - "dynamic" : "false" - }, - "application_usage_daily" : { - "dynamic" : "false", - "properties" : { - "timestamp" : { - "type" : "date" - } - } - }, - "application_usage_totals" : { - "type" : "object", - "dynamic" : "false" - }, - "application_usage_transactional" : { - "type" : "object", - "dynamic" : "false" - }, - "background-session" : { - "properties" : { - "created" : { - "type" : "date" - }, - "expires" : { - "type" : "date" - }, - "idMapping" : { - "type" : "object", - "enabled" : false - }, - "initialState" : { - "type" : "object", - "enabled" : false - }, - "name" : { - "type" : "keyword" - }, - "restoreState" : { - "type" : "object", - "enabled" : false - }, - "status" : { - "type" : "keyword" - } - } - }, - "book" : { - "properties" : { - "author" : { - "type" : "keyword" - }, - "readIt" : { - "type" : "boolean" - }, - "title" : { - "type" : "keyword" - } - } - }, - "canvas-element" : { - "dynamic" : "false", - "properties" : { - "@created" : { - "type" : "date" - }, - "@timestamp" : { - "type" : "date" - }, - "content" : { - "type" : "text" - }, - "help" : { - "type" : "text" - }, - "image" : { - "type" : "text" - }, - "name" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - } - } - }, - "canvas-workpad" : { - "dynamic" : "false", - "properties" : { - "@created" : { - "type" : "date" - }, - "@timestamp" : { - "type" : "date" - }, - "name" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - } - } - }, - "canvas-workpad-template" : { - "dynamic" : "false", - "properties" : { - "help" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - }, - "name" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - }, - "tags" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - }, - "template_key" : { - "type" : "keyword" - } - } - }, - "cases" : { - "properties" : { - "closed_at" : { - "type" : "date" - }, - "closed_by" : { - "properties" : { - "email" : { - "type" : "keyword" - }, - "full_name" : { - "type" : "keyword" - }, - "username" : { - "type" : "keyword" - } - } - }, - "connector" : { - "properties" : { - "fields" : { - "properties" : { - "key" : { - "type" : "text" - }, - "value" : { - "type" : "text" - } - } - }, - "id" : { - "type" : "keyword" - }, - "name" : { - "type" : "text" - }, - "type" : { - "type" : "keyword" - } - } - }, - "created_at" : { - "type" : "date" - }, - "created_by" : { - "properties" : { - "email" : { - "type" : "keyword" - }, - "full_name" : { - "type" : "keyword" - }, - "username" : { - "type" : "keyword" - } - } - }, - "description" : { - "type" : "text" - }, - "external_service" : { - "properties" : { - "connector_id" : { - "type" : "keyword" - }, - "connector_name" : { - "type" : "keyword" - }, - "external_id" : { - "type" : "keyword" - }, - "external_title" : { - "type" : "text" - }, - "external_url" : { - "type" : "text" - }, - "pushed_at" : { - "type" : "date" - }, - "pushed_by" : { - "properties" : { - "email" : { - "type" : "keyword" - }, - "full_name" : { - "type" : "keyword" - }, - "username" : { - "type" : "keyword" - } - } - } - } - }, - "status" : { - "type" : "keyword" - }, - "tags" : { - "type" : "keyword" - }, - "title" : { - "type" : "keyword" - }, - "updated_at" : { - "type" : "date" - }, - "updated_by" : { - "properties" : { - "email" : { - "type" : "keyword" - }, - "full_name" : { - "type" : "keyword" - }, - "username" : { - "type" : "keyword" - } - } - } - } - }, - "cases-comments" : { - "properties" : { - "alertId" : { - "type" : "keyword" - }, - "comment" : { - "type" : "text" - }, - "created_at" : { - "type" : "date" - }, - "created_by" : { - "properties" : { - "email" : { - "type" : "keyword" - }, - "full_name" : { - "type" : "keyword" - }, - "username" : { - "type" : "keyword" - } - } - }, - "index" : { - "type" : "keyword" - }, - "pushed_at" : { - "type" : "date" - }, - "pushed_by" : { - "properties" : { - "email" : { - "type" : "keyword" - }, - "full_name" : { - "type" : "keyword" - }, - "username" : { - "type" : "keyword" - } - } - }, - "type" : { - "type" : "keyword" - }, - "updated_at" : { - "type" : "date" - }, - "updated_by" : { - "properties" : { - "email" : { - "type" : "keyword" - }, - "full_name" : { - "type" : "keyword" - }, - "username" : { - "type" : "keyword" - } - } - } - } - }, - "cases-configure" : { - "properties" : { - "closure_type" : { - "type" : "keyword" - }, - "connector" : { - "properties" : { - "fields" : { - "properties" : { - "key" : { - "type" : "text" - }, - "value" : { - "type" : "text" - } - } - }, - "id" : { - "type" : "keyword" - }, - "name" : { - "type" : "text" - }, - "type" : { - "type" : "keyword" - } - } - }, - "created_at" : { - "type" : "date" - }, - "created_by" : { - "properties" : { - "email" : { - "type" : "keyword" - }, - "full_name" : { - "type" : "keyword" - }, - "username" : { - "type" : "keyword" - } - } - }, - "updated_at" : { - "type" : "date" - }, - "updated_by" : { - "properties" : { - "email" : { - "type" : "keyword" - }, - "full_name" : { - "type" : "keyword" - }, - "username" : { - "type" : "keyword" - } - } - } - } - }, - "cases-user-actions" : { - "properties" : { - "action" : { - "type" : "keyword" - }, - "action_at" : { - "type" : "date" - }, - "action_by" : { - "properties" : { - "email" : { - "type" : "keyword" - }, - "full_name" : { - "type" : "keyword" - }, - "username" : { - "type" : "keyword" - } - } - }, - "action_field" : { - "type" : "keyword" - }, - "new_value" : { - "type" : "text" - }, - "old_value" : { - "type" : "text" - } - } - }, - "config" : { - "dynamic" : "false", - "properties" : { - "buildNum" : { - "type" : "keyword" - } - } - }, - "dashboard" : { - "properties" : { - "description" : { - "type" : "text" - }, - "hits" : { - "type" : "integer", - "index" : false, - "doc_values" : false - }, - "kibanaSavedObjectMeta" : { - "properties" : { - "searchSourceJSON" : { - "type" : "text", - "index" : false - } - } - }, - "optionsJSON" : { - "type" : "text", - "index" : false - }, - "panelsJSON" : { - "type" : "text", - "index" : false - }, - "refreshInterval" : { - "properties" : { - "display" : { - "type" : "keyword", - "index" : false, - "doc_values" : false - }, - "pause" : { - "type" : "boolean", - "doc_values" : false, - "index" : false - }, - "section" : { - "type" : "integer", - "index" : false, - "doc_values" : false - }, - "value" : { - "type" : "integer", - "index" : false, - "doc_values" : false - } - } - }, - "timeFrom" : { - "type" : "keyword", - "index" : false, - "doc_values" : false - }, - "timeRestore" : { - "type" : "boolean", - "doc_values" : false, - "index" : false - }, - "timeTo" : { - "type" : "keyword", - "index" : false, - "doc_values" : false - }, - "title" : { - "type" : "text" - }, - "version" : { - "type" : "integer" - } - } - }, - "endpoint:user-artifact" : { - "properties" : { - "body" : { - "type" : "binary" - }, - "compressionAlgorithm" : { - "type" : "keyword", - "index" : false - }, - "created" : { - "type" : "date", - "index" : false - }, - "decodedSha256" : { - "type" : "keyword", - "index" : false - }, - "decodedSize" : { - "type" : "long", - "index" : false - }, - "encodedSha256" : { - "type" : "keyword" - }, - "encodedSize" : { - "type" : "long", - "index" : false - }, - "encryptionAlgorithm" : { - "type" : "keyword", - "index" : false - }, - "identifier" : { - "type" : "keyword" - } - } - }, - "endpoint:user-artifact-manifest" : { - "properties" : { - "created" : { - "type" : "date", - "index" : false - }, - "ids" : { - "type" : "keyword", - "index" : false - }, - "schemaVersion" : { - "type" : "keyword" - }, - "semanticVersion" : { - "type" : "keyword", - "index" : false - } - } - }, - "enterprise_search_telemetry" : { - "type" : "object", - "dynamic" : "false" - }, - "epm-packages" : { - "properties" : { - "es_index_patterns" : { - "type" : "object", - "enabled" : false - }, - "install_source" : { - "type" : "keyword" - }, - "install_started_at" : { - "type" : "date" - }, - "install_status" : { - "type" : "keyword" - }, - "install_version" : { - "type" : "keyword" - }, - "installed_es" : { - "type" : "nested", - "properties" : { - "id" : { - "type" : "keyword" - }, - "type" : { - "type" : "keyword" - } - } - }, - "installed_kibana" : { - "type" : "nested", - "properties" : { - "id" : { - "type" : "keyword" - }, - "type" : { - "type" : "keyword" - } - } - }, - "internal" : { - "type" : "boolean" - }, - "name" : { - "type" : "keyword" - }, - "removable" : { - "type" : "boolean" - }, - "version" : { - "type" : "keyword" - } - } - }, - "exception-list" : { - "properties" : { - "_tags" : { - "type" : "keyword" - }, - "comments" : { - "properties" : { - "comment" : { - "type" : "keyword" - }, - "created_at" : { - "type" : "keyword" - }, - "created_by" : { - "type" : "keyword" - }, - "id" : { - "type" : "keyword" - }, - "updated_at" : { - "type" : "keyword" - }, - "updated_by" : { - "type" : "keyword" - } - } - }, - "created_at" : { - "type" : "keyword" - }, - "created_by" : { - "type" : "keyword" - }, - "description" : { - "type" : "keyword" - }, - "entries" : { - "properties" : { - "entries" : { - "properties" : { - "field" : { - "type" : "keyword" - }, - "operator" : { - "type" : "keyword" - }, - "type" : { - "type" : "keyword" - }, - "value" : { - "type" : "keyword", - "fields" : { - "text" : { - "type" : "text" - } - } - } - } - }, - "field" : { - "type" : "keyword" - }, - "list" : { - "properties" : { - "id" : { - "type" : "keyword" - }, - "type" : { - "type" : "keyword" - } - } - }, - "operator" : { - "type" : "keyword" - }, - "type" : { - "type" : "keyword" - }, - "value" : { - "type" : "keyword", - "fields" : { - "text" : { - "type" : "text" - } - } - } - } - }, - "immutable" : { - "type" : "boolean" - }, - "item_id" : { - "type" : "keyword" - }, - "list_id" : { - "type" : "keyword" - }, - "list_type" : { - "type" : "keyword" - }, - "meta" : { - "type" : "keyword" - }, - "name" : { - "type" : "keyword" - }, - "os_types" : { - "type" : "keyword" - }, - "tags" : { - "type" : "keyword" - }, - "tie_breaker_id" : { - "type" : "keyword" - }, - "type" : { - "type" : "keyword" - }, - "updated_by" : { - "type" : "keyword" - }, - "version" : { - "type" : "keyword" - } - } - }, - "exception-list-agnostic" : { - "properties" : { - "_tags" : { - "type" : "keyword" - }, - "comments" : { - "properties" : { - "comment" : { - "type" : "keyword" - }, - "created_at" : { - "type" : "keyword" - }, - "created_by" : { - "type" : "keyword" - }, - "id" : { - "type" : "keyword" - }, - "updated_at" : { - "type" : "keyword" - }, - "updated_by" : { - "type" : "keyword" - } - } - }, - "created_at" : { - "type" : "keyword" - }, - "created_by" : { - "type" : "keyword" - }, - "description" : { - "type" : "keyword" - }, - "entries" : { - "properties" : { - "entries" : { - "properties" : { - "field" : { - "type" : "keyword" - }, - "operator" : { - "type" : "keyword" - }, - "type" : { - "type" : "keyword" - }, - "value" : { - "type" : "keyword", - "fields" : { - "text" : { - "type" : "text" - } - } - } - } - }, - "field" : { - "type" : "keyword" - }, - "list" : { - "properties" : { - "id" : { - "type" : "keyword" - }, - "type" : { - "type" : "keyword" - } - } - }, - "operator" : { - "type" : "keyword" - }, - "type" : { - "type" : "keyword" - }, - "value" : { - "type" : "keyword", - "fields" : { - "text" : { - "type" : "text" - } - } - } - } - }, - "immutable" : { - "type" : "boolean" - }, - "item_id" : { - "type" : "keyword" - }, - "list_id" : { - "type" : "keyword" - }, - "list_type" : { - "type" : "keyword" - }, - "meta" : { - "type" : "keyword" - }, - "name" : { - "type" : "keyword" - }, - "os_types" : { - "type" : "keyword" - }, - "tags" : { - "type" : "keyword" - }, - "tie_breaker_id" : { - "type" : "keyword" - }, - "type" : { - "type" : "keyword" - }, - "updated_by" : { - "type" : "keyword" - }, - "version" : { - "type" : "keyword" - } - } - }, - "file-upload-telemetry" : { - "properties" : { - "filesUploadedTotalCount" : { - "type" : "long" - } - } - }, - "fleet-agent-actions" : { - "properties" : { - "ack_data" : { - "type" : "text" - }, - "agent_id" : { - "type" : "keyword" - }, - "created_at" : { - "type" : "date" - }, - "data" : { - "type" : "binary" - }, - "policy_id" : { - "type" : "keyword" - }, - "policy_revision" : { - "type" : "integer" - }, - "sent_at" : { - "type" : "date" - }, - "type" : { - "type" : "keyword" - } - } - }, - "fleet-agent-events" : { - "properties" : { - "action_id" : { - "type" : "keyword" - }, - "agent_id" : { - "type" : "keyword" - }, - "data" : { - "type" : "text" - }, - "message" : { - "type" : "text" - }, - "payload" : { - "type" : "text" - }, - "policy_id" : { - "type" : "keyword" - }, - "stream_id" : { - "type" : "keyword" - }, - "subtype" : { - "type" : "keyword" - }, - "timestamp" : { - "type" : "date" - }, - "type" : { - "type" : "keyword" - } - } - }, - "fleet-agents" : { - "properties" : { - "access_api_key_id" : { - "type" : "keyword" - }, - "active" : { - "type" : "boolean" - }, - "current_error_events" : { - "type" : "text", - "index" : false - }, - "default_api_key" : { - "type" : "binary" - }, - "default_api_key_id" : { - "type" : "keyword" - }, - "enrolled_at" : { - "type" : "date" - }, - "last_checkin" : { - "type" : "date" - }, - "last_checkin_status" : { - "type" : "keyword" - }, - "last_updated" : { - "type" : "date" - }, - "local_metadata" : { - "type" : "flattened" - }, - "packages" : { - "type" : "keyword" - }, - "policy_id" : { - "type" : "keyword" - }, - "policy_revision" : { - "type" : "integer" - }, - "shared_id" : { - "type" : "keyword" - }, - "type" : { - "type" : "keyword" - }, - "unenrolled_at" : { - "type" : "date" - }, - "unenrollment_started_at" : { - "type" : "date" - }, - "updated_at" : { - "type" : "date" - }, - "upgrade_started_at" : { - "type" : "date" - }, - "upgraded_at" : { - "type" : "date" - }, - "user_provided_metadata" : { - "type" : "flattened" - }, - "version" : { - "type" : "keyword" - } - } - }, - "fleet-enrollment-api-keys" : { - "properties" : { - "active" : { - "type" : "boolean" - }, - "api_key" : { - "type" : "binary" - }, - "api_key_id" : { - "type" : "keyword" - }, - "created_at" : { - "type" : "date" - }, - "expire_at" : { - "type" : "date" - }, - "name" : { - "type" : "keyword" - }, - "policy_id" : { - "type" : "keyword" - }, - "type" : { - "type" : "keyword" - }, - "updated_at" : { - "type" : "date" - } - } - }, - "graph-workspace" : { - "properties" : { - "description" : { - "type" : "text" - }, - "kibanaSavedObjectMeta" : { - "properties" : { - "searchSourceJSON" : { - "type" : "text" - } - } - }, - "numLinks" : { - "type" : "integer" - }, - "numVertices" : { - "type" : "integer" - }, - "title" : { - "type" : "text" - }, - "version" : { - "type" : "integer" - }, - "wsState" : { - "type" : "text" - } - } - }, - "index-pattern" : { - "dynamic" : "false", - "properties" : { - "title" : { - "type" : "text" - }, - "type" : { - "type" : "keyword" - } - } - }, - "infrastructure-ui-source" : { - "type" : "object", - "dynamic" : "false" - }, - "ingest-agent-policies" : { - "properties" : { - "description" : { - "type" : "text" - }, - "is_default" : { - "type" : "boolean" - }, - "monitoring_enabled" : { - "type" : "keyword", - "index" : false - }, - "name" : { - "type" : "keyword" - }, - "namespace" : { - "type" : "keyword" - }, - "package_policies" : { - "type" : "keyword" - }, - "revision" : { - "type" : "integer" - }, - "status" : { - "type" : "keyword" - }, - "updated_at" : { - "type" : "date" - }, - "updated_by" : { - "type" : "keyword" - } - } - }, - "ingest-outputs" : { - "properties" : { - "ca_sha256" : { - "type" : "keyword", - "index" : false - }, - "config" : { - "type" : "flattened" - }, - "config_yaml" : { - "type" : "text" - }, - "fleet_enroll_password" : { - "type" : "binary" - }, - "fleet_enroll_username" : { - "type" : "binary" - }, - "hosts" : { - "type" : "keyword" - }, - "is_default" : { - "type" : "boolean" - }, - "name" : { - "type" : "keyword" - }, - "type" : { - "type" : "keyword" - } - } - }, - "ingest-package-policies" : { - "properties" : { - "created_at" : { - "type" : "date" - }, - "created_by" : { - "type" : "keyword" - }, - "description" : { - "type" : "text" - }, - "enabled" : { - "type" : "boolean" - }, - "inputs" : { - "type" : "nested", - "enabled" : false, - "properties" : { - "config" : { - "type" : "flattened" - }, - "enabled" : { - "type" : "boolean" - }, - "streams" : { - "type" : "nested", - "properties" : { - "compiled_stream" : { - "type" : "flattened" - }, - "config" : { - "type" : "flattened" - }, - "data_stream" : { - "properties" : { - "dataset" : { - "type" : "keyword" - }, - "type" : { - "type" : "keyword" - } - } - }, - "enabled" : { - "type" : "boolean" - }, - "id" : { - "type" : "keyword" - }, - "vars" : { - "type" : "flattened" - } - } - }, - "type" : { - "type" : "keyword" - }, - "vars" : { - "type" : "flattened" - } - } - }, - "name" : { - "type" : "keyword" - }, - "namespace" : { - "type" : "keyword" - }, - "output_id" : { - "type" : "keyword" - }, - "package" : { - "properties" : { - "name" : { - "type" : "keyword" - }, - "title" : { - "type" : "keyword" - }, - "version" : { - "type" : "keyword" - } - } - }, - "policy_id" : { - "type" : "keyword" - }, - "revision" : { - "type" : "integer" - }, - "updated_at" : { - "type" : "date" - }, - "updated_by" : { - "type" : "keyword" - } - } - }, - "ingest_manager_settings" : { - "properties" : { - "agent_auto_upgrade" : { - "type" : "keyword" - }, - "has_seen_add_data_notice" : { - "type" : "boolean", - "index" : false - }, - "kibana_ca_sha256" : { - "type" : "keyword" - }, - "kibana_urls" : { - "type" : "keyword" - }, - "package_auto_upgrade" : { - "type" : "keyword" - } - } - }, - "inventory-view" : { - "type" : "object", - "dynamic" : "false" - }, - "kql-telemetry" : { - "properties" : { - "optInCount" : { - "type" : "long" - }, - "optOutCount" : { - "type" : "long" - } - } - }, - "lens" : { - "properties" : { - "description" : { - "type" : "text" - }, - "expression" : { - "type" : "keyword", - "index" : false, - "doc_values" : false - }, - "state" : { - "type" : "flattened" - }, - "title" : { - "type" : "text" - }, - "visualizationType" : { - "type" : "keyword" - } - } - }, - "lens-ui-telemetry" : { - "properties" : { - "count" : { - "type" : "integer" - }, - "date" : { - "type" : "date" - }, - "name" : { - "type" : "keyword" - }, - "type" : { - "type" : "keyword" - } - } - }, - "map" : { - "properties" : { - "description" : { - "type" : "text" - }, - "layerListJSON" : { - "type" : "text" - }, - "mapStateJSON" : { - "type" : "text" - }, - "title" : { - "type" : "text" - }, - "uiStateJSON" : { - "type" : "text" - }, - "version" : { - "type" : "integer" - } - } - }, - "maps-telemetry" : { - "type" : "object", - "enabled" : false - }, - "metrics-explorer-view" : { - "type" : "object", - "dynamic" : "false" - }, - "migrationVersion" : { - "dynamic" : "true", - "properties" : { - "config" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "space" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - } - } - }, - "ml-job" : { - "properties" : { - "datafeed_id" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - }, - "job_id" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword" - } - } - }, - "type" : { - "type" : "keyword" - } - } - }, - "ml-telemetry" : { - "properties" : { - "file_data_visualizer" : { - "properties" : { - "index_creation_count" : { - "type" : "long" - } - } - } - } - }, - "monitoring-telemetry" : { - "properties" : { - "reportedClusterUuids" : { - "type" : "keyword" - } - } - }, - "namespace" : { - "type" : "keyword" - }, - "namespaces" : { - "type" : "keyword" - }, - "originId" : { - "type" : "keyword" - }, - "query" : { - "properties" : { - "description" : { - "type" : "text" - }, - "filters" : { - "type" : "object", - "enabled" : false - }, - "query" : { - "properties" : { - "language" : { - "type" : "keyword" - }, - "query" : { - "type" : "keyword", - "index" : false - } - } - }, - "timefilter" : { - "type" : "object", - "enabled" : false - }, - "title" : { - "type" : "text" - } - } - }, - "references" : { - "type" : "nested", - "properties" : { - "id" : { - "type" : "keyword" - }, - "name" : { - "type" : "keyword" - }, - "type" : { - "type" : "keyword" - } - } - }, - "sample-data-telemetry" : { - "properties" : { - "installCount" : { - "type" : "long" - }, - "unInstallCount" : { - "type" : "long" - } - } - }, - "search" : { - "properties" : { - "columns" : { - "type" : "keyword", - "index" : false, - "doc_values" : false - }, - "description" : { - "type" : "text" - }, - "hits" : { - "type" : "integer", - "index" : false, - "doc_values" : false - }, - "kibanaSavedObjectMeta" : { - "properties" : { - "searchSourceJSON" : { - "type" : "text", - "index" : false - } - } - }, - "sort" : { - "type" : "keyword", - "index" : false, - "doc_values" : false - }, - "title" : { - "type" : "text" - }, - "version" : { - "type" : "integer" - } - } - }, - "search-telemetry" : { - "type" : "object", - "dynamic" : "false" - }, - "siem-detection-engine-rule-actions" : { - "properties" : { - "actions" : { - "properties" : { - "action_type_id" : { - "type" : "keyword" - }, - "group" : { - "type" : "keyword" - }, - "id" : { - "type" : "keyword" - }, - "params" : { - "type" : "object", - "enabled" : false - } - } - }, - "alertThrottle" : { - "type" : "keyword" - }, - "ruleAlertId" : { - "type" : "keyword" - }, - "ruleThrottle" : { - "type" : "keyword" - } - } - }, - "siem-detection-engine-rule-status" : { - "properties" : { - "alertId" : { - "type" : "keyword" - }, - "bulkCreateTimeDurations" : { - "type" : "float" - }, - "gap" : { - "type" : "text" - }, - "lastFailureAt" : { - "type" : "date" - }, - "lastFailureMessage" : { - "type" : "text" - }, - "lastLookBackDate" : { - "type" : "date" - }, - "lastSuccessAt" : { - "type" : "date" - }, - "lastSuccessMessage" : { - "type" : "text" - }, - "searchAfterTimeDurations" : { - "type" : "float" - }, - "status" : { - "type" : "keyword" - }, - "statusDate" : { - "type" : "date" - } - } - }, - "siem-ui-timeline" : { - "properties" : { - "columns" : { - "properties" : { - "aggregatable" : { - "type" : "boolean" - }, - "category" : { - "type" : "keyword" - }, - "columnHeaderType" : { - "type" : "keyword" - }, - "description" : { - "type" : "text" - }, - "example" : { - "type" : "text" - }, - "id" : { - "type" : "keyword" - }, - "indexes" : { - "type" : "keyword" - }, - "name" : { - "type" : "text" - }, - "placeholder" : { - "type" : "text" - }, - "searchable" : { - "type" : "boolean" - }, - "type" : { - "type" : "keyword" - } - } - }, - "created" : { - "type" : "date" - }, - "createdBy" : { - "type" : "text" - }, - "dataProviders" : { - "properties" : { - "and" : { - "properties" : { - "enabled" : { - "type" : "boolean" - }, - "excluded" : { - "type" : "boolean" - }, - "id" : { - "type" : "keyword" - }, - "kqlQuery" : { - "type" : "text" - }, - "name" : { - "type" : "text" - }, - "queryMatch" : { - "properties" : { - "displayField" : { - "type" : "text" - }, - "displayValue" : { - "type" : "text" - }, - "field" : { - "type" : "text" - }, - "operator" : { - "type" : "text" - }, - "value" : { - "type" : "text" - } - } - }, - "type" : { - "type" : "text" - } - } - }, - "enabled" : { - "type" : "boolean" - }, - "excluded" : { - "type" : "boolean" - }, - "id" : { - "type" : "keyword" - }, - "kqlQuery" : { - "type" : "text" - }, - "name" : { - "type" : "text" - }, - "queryMatch" : { - "properties" : { - "displayField" : { - "type" : "text" - }, - "displayValue" : { - "type" : "text" - }, - "field" : { - "type" : "text" - }, - "operator" : { - "type" : "text" - }, - "value" : { - "type" : "text" - } - } - }, - "type" : { - "type" : "text" - } - } - }, - "dateRange" : { - "properties" : { - "end" : { - "type" : "date" - }, - "start" : { - "type" : "date" - } - } - }, - "description" : { - "type" : "text" - }, - "eventType" : { - "type" : "keyword" - }, - "excludedRowRendererIds" : { - "type" : "text" - }, - "favorite" : { - "properties" : { - "favoriteDate" : { - "type" : "date" - }, - "fullName" : { - "type" : "text" - }, - "keySearch" : { - "type" : "text" - }, - "userName" : { - "type" : "text" - } - } - }, - "filters" : { - "properties" : { - "exists" : { - "type" : "text" - }, - "match_all" : { - "type" : "text" - }, - "meta" : { - "properties" : { - "alias" : { - "type" : "text" - }, - "controlledBy" : { - "type" : "text" - }, - "disabled" : { - "type" : "boolean" - }, - "field" : { - "type" : "text" - }, - "formattedValue" : { - "type" : "text" - }, - "index" : { - "type" : "keyword" - }, - "key" : { - "type" : "keyword" - }, - "negate" : { - "type" : "boolean" - }, - "params" : { - "type" : "text" - }, - "type" : { - "type" : "keyword" - }, - "value" : { - "type" : "text" - } - } - }, - "missing" : { - "type" : "text" - }, - "query" : { - "type" : "text" - }, - "range" : { - "type" : "text" - }, - "script" : { - "type" : "text" - } - } - }, - "indexNames" : { - "type" : "text" - }, - "kqlMode" : { - "type" : "keyword" - }, - "kqlQuery" : { - "properties" : { - "filterQuery" : { - "properties" : { - "kuery" : { - "properties" : { - "expression" : { - "type" : "text" - }, - "kind" : { - "type" : "keyword" - } - } - }, - "serializedQuery" : { - "type" : "text" - } - } - } - } - }, - "savedQueryId" : { - "type" : "keyword" - }, - "sort" : { - "properties" : { - "columnId" : { - "type" : "keyword" - }, - "sortDirection" : { - "type" : "keyword" - } - } - }, - "status" : { - "type" : "keyword" - }, - "templateTimelineId" : { - "type" : "text" - }, - "templateTimelineVersion" : { - "type" : "integer" - }, - "timelineType" : { - "type" : "keyword" - }, - "title" : { - "type" : "text" - }, - "updated" : { - "type" : "date" - }, - "updatedBy" : { - "type" : "text" - } - } - }, - "siem-ui-timeline-note" : { - "properties" : { - "created" : { - "type" : "date" - }, - "createdBy" : { - "type" : "text" - }, - "eventId" : { - "type" : "keyword" - }, - "note" : { - "type" : "text" - }, - "timelineId" : { - "type" : "keyword" - }, - "updated" : { - "type" : "date" - }, - "updatedBy" : { - "type" : "text" - } - } - }, - "siem-ui-timeline-pinned-event" : { - "properties" : { - "created" : { - "type" : "date" - }, - "createdBy" : { - "type" : "text" - }, - "eventId" : { - "type" : "keyword" - }, - "timelineId" : { - "type" : "keyword" - }, - "updated" : { - "type" : "date" - }, - "updatedBy" : { - "type" : "text" - } - } - }, - "space" : { - "properties" : { - "_reserved" : { - "type" : "boolean" - }, - "color" : { - "type" : "keyword" - }, - "description" : { - "type" : "text" - }, - "disabledFeatures" : { - "type" : "keyword" - }, - "imageUrl" : { - "type" : "text", - "index" : false - }, - "initials" : { - "type" : "keyword" - }, - "name" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 2048 - } - } - } - } - }, - "tag" : { - "properties" : { - "color" : { - "type" : "text" - }, - "description" : { - "type" : "text" - }, - "name" : { - "type" : "text" - } - } - }, - "telemetry" : { - "properties" : { - "allowChangingOptInStatus" : { - "type" : "boolean" - }, - "enabled" : { - "type" : "boolean" - }, - "lastReported" : { - "type" : "date" - }, - "lastVersionChecked" : { - "type" : "keyword" - }, - "reportFailureCount" : { - "type" : "integer" - }, - "reportFailureVersion" : { - "type" : "keyword" - }, - "sendUsageFrom" : { - "type" : "keyword" - }, - "userHasSeenNotice" : { - "type" : "boolean" - } - } - }, - "timelion-sheet" : { - "properties" : { - "description" : { - "type" : "text" - }, - "hits" : { - "type" : "integer" - }, - "kibanaSavedObjectMeta" : { - "properties" : { - "searchSourceJSON" : { - "type" : "text" - } - } - }, - "timelion_chart_height" : { - "type" : "integer" - }, - "timelion_columns" : { - "type" : "integer" - }, - "timelion_interval" : { - "type" : "keyword" - }, - "timelion_other_interval" : { - "type" : "keyword" - }, - "timelion_rows" : { - "type" : "integer" - }, - "timelion_sheet" : { - "type" : "text" - }, - "title" : { - "type" : "text" - }, - "version" : { - "type" : "integer" - } - } - }, - "todo" : { - "properties" : { - "icon" : { - "type" : "keyword" - }, - "task" : { - "type" : "text" - }, - "title" : { - "type" : "keyword" - } - } - }, - "tsvb-validation-telemetry" : { - "properties" : { - "failedRequests" : { - "type" : "long" - } - } - }, - "type" : { - "type" : "keyword" - }, - "ui-metric" : { - "properties" : { - "count" : { - "type" : "integer" - } - } - }, - "updated_at" : { - "type" : "date" - }, - "upgrade-assistant-reindex-operation" : { - "properties" : { - "errorMessage" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "indexName" : { - "type" : "keyword" - }, - "lastCompletedStep" : { - "type" : "long" - }, - "locked" : { - "type" : "date" - }, - "newIndexName" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "reindexOptions" : { - "properties" : { - "openAndClose" : { - "type" : "boolean" - }, - "queueSettings" : { - "properties" : { - "queuedAt" : { - "type" : "long" - }, - "startedAt" : { - "type" : "long" - } - } - } - } - }, - "reindexTaskId" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 256 - } - } - }, - "reindexTaskPercComplete" : { - "type" : "float" - }, - "runningReindexCount" : { - "type" : "integer" - }, - "status" : { - "type" : "integer" - } - } - }, - "upgrade-assistant-telemetry" : { - "properties" : { - "features" : { - "properties" : { - "deprecation_logging" : { - "properties" : { - "enabled" : { - "type" : "boolean", - "null_value" : true - } - } - } - } - }, - "ui_open" : { - "properties" : { - "cluster" : { - "type" : "long", - "null_value" : 0 - }, - "indices" : { - "type" : "long", - "null_value" : 0 - }, - "overview" : { - "type" : "long", - "null_value" : 0 - } - } - }, - "ui_reindex" : { - "properties" : { - "close" : { - "type" : "long", - "null_value" : 0 - }, - "open" : { - "type" : "long", - "null_value" : 0 - }, - "start" : { - "type" : "long", - "null_value" : 0 - }, - "stop" : { - "type" : "long", - "null_value" : 0 - } - } - } - } - }, - "uptime-dynamic-settings" : { - "type" : "object", - "dynamic" : "false" - }, - "url" : { - "properties" : { - "accessCount" : { - "type" : "long" - }, - "accessDate" : { - "type" : "date" - }, - "createDate" : { - "type" : "date" - }, - "url" : { - "type" : "text", - "fields" : { - "keyword" : { - "type" : "keyword", - "ignore_above" : 2048 - } - } - } - } - }, - "visualization" : { - "properties" : { - "description" : { - "type" : "text" - }, - "kibanaSavedObjectMeta" : { - "properties" : { - "searchSourceJSON" : { - "type" : "text", - "index" : false - } - } - }, - "savedSearchRefName" : { - "type" : "keyword", - "index" : false, - "doc_values" : false - }, - "title" : { - "type" : "text" - }, - "uiStateJSON" : { - "type" : "text", - "index" : false - }, - "version" : { - "type" : "integer" - }, - "visState" : { - "type" : "text", - "index" : false - } - } - }, - "workplace_search_telemetry" : { - "type" : "object", - "dynamic" : "false" - } - } - }` diff --git a/internal/pkg/testing/esutil/bootstrap.go b/internal/pkg/testing/esutil/bootstrap.go index f8242971b..6b7c01f34 100644 --- a/internal/pkg/testing/esutil/bootstrap.go +++ b/internal/pkg/testing/esutil/bootstrap.go @@ -7,60 +7,10 @@ package esutil import ( "context" - "github.com/elastic/fleet-server/v7/internal/pkg/es" "github.com/elastic/go-elasticsearch/v8" ) -// Temporary ES indices bootstrapping until we move this logic to a proper place -// The plans at the moment possibly handle at ES plugin - -type indexConfig struct { - mapping string - datastream bool -} - -var indexConfigs = map[string]indexConfig{ - // Commenting out the boostrapping for now here, just in case if it needs to be "enabled" again. - // Will remove all the boostrapping code completely later once all is fully integrated - ".fleet-actions-results": {mapping: es.MappingActionResult, datastream: true}, -} - -// Bootstrap creates .fleet-actions data stream -func EnsureESIndices(ctx context.Context, cli *elasticsearch.Client) error { - for name, idxcfg := range indexConfigs { - err := EnsureDatastream(ctx, cli, name, idxcfg) - if err != nil { - return err - } - } - return nil -} - -func EnsureDatastream(ctx context.Context, cli *elasticsearch.Client, name string, idxcfg indexConfig) error { - if idxcfg.datastream { - err := EnsureILMPolicy(ctx, cli, name) - if err != nil { - return err - } - } - - err := EnsureTemplate(ctx, cli, name, idxcfg.mapping, idxcfg.datastream) - if err != nil { - return err - } - - if idxcfg.datastream { - err = CreateDatastream(ctx, cli, name) - } else { - err = CreateIndex(ctx, cli, name) - } - if err != nil { - return err - } - - return nil -} - +// EnsureIndex sets up the index if it doesn't exists, utilized for integration tests at the moment func EnsureIndex(ctx context.Context, cli *elasticsearch.Client, name, mapping string) error { err := EnsureTemplate(ctx, cli, name, mapping, false) if err != nil { From 2a99ed4002762ad21e1ca79407243f40d5b2d57c Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Mon, 17 May 2021 17:30:38 +0000 Subject: [PATCH 090/240] Fix issue with config reloading. (#365) (#366) (cherry picked from commit 85f829203f74f525505b9abe4492d4d7f4883f56) Co-authored-by: Blake Rouse --- cmd/fleet/main.go | 10 ++++----- fleet-server.yml | 54 +++++++++++++++++++++++------------------------ 2 files changed, 32 insertions(+), 32 deletions(-) diff --git a/cmd/fleet/main.go b/cmd/fleet/main.go index 95b705d5a..a2a87dae1 100644 --- a/cmd/fleet/main.go +++ b/cmd/fleet/main.go @@ -592,19 +592,19 @@ func (f *FleetServer) runServer(ctx context.Context, cfg *config.Config) (err er bc := checkin.NewBulk(bulker) g.Go(loggedRunFunc(ctx, "Bulk checkin", bc.Run)) - ct := NewCheckinT(f.verCon, &f.cfg.Inputs[0].Server, f.cache, bc, pm, am, ad, tr, bulker) - et, err := NewEnrollerT(f.verCon, &f.cfg.Inputs[0].Server, bulker, f.cache) + ct := NewCheckinT(f.verCon, &cfg.Inputs[0].Server, f.cache, bc, pm, am, ad, tr, bulker) + et, err := NewEnrollerT(f.verCon, &cfg.Inputs[0].Server, bulker, f.cache) if err != nil { return err } - at := NewArtifactT(&f.cfg.Inputs[0].Server, bulker, f.cache) - ack := NewAckT(&f.cfg.Inputs[0].Server, bulker, f.cache) + at := NewArtifactT(&cfg.Inputs[0].Server, bulker, f.cache) + ack := NewAckT(&cfg.Inputs[0].Server, bulker, f.cache) router := NewRouter(bulker, ct, et, at, ack, sm) g.Go(loggedRunFunc(ctx, "Http server", func(ctx context.Context) error { - return runServer(ctx, router, &f.cfg.Inputs[0].Server) + return runServer(ctx, router, &cfg.Inputs[0].Server) })) return g.Wait() diff --git a/fleet-server.yml b/fleet-server.yml index 774009c67..44bc1c368 100644 --- a/fleet-server.yml +++ b/fleet-server.yml @@ -17,36 +17,36 @@ fleet: # server: # host: localhost # port: 8220 +# timeouts: +# checkin_long_poll: 300s # long poll timeout +# profiler: +# enabled: true # enable profiler +# limits: +# policy_throttle: 100ms +# max_connetions: 150 +# checkin_limit: +# interval: 100ms +# burst: 25 +# max: 100 +# artifact_limit: +# interval: 10ms +# burst: 5 +# max: 10 +# ack_limit: +# interval: 10ms +# burst: 20 +# max: 10 +# enroll_limit: +# interval: 50ms +# burst: 10 +# max: 8 +# ssl: +# enabled: true +# certificate: /creds/cert.pem +# key: /creds/key.pem # cache: # num_counters: 500000 # 10x times expected count # max_cost: 50 * 1024 * 1024 # 50MiB cache size -# timeouts: -# checkin_long_poll: 300s # long poll timeout -# profiler: -# enabled: true # enable profiler -# limits: -# policy_throttle: 100ms -# max_connetions: 150 -# checkin_limit: -# interval: 100ms -# burst: 25 -# max: 100 -# artifact_limit: -# interval: 10ms -# burst: 5 -# max: 10 -# ack_limit: -# interval: 10ms -# burst: 20 -# max: 10 -# enroll_limit: -# interval: 50ms -# burst: 10 -# max: 8 -# ssl: -# enabled: true -# certificate: /creds/cert.pem -# key: /creds/key.pem logging: to_stderr: true # Force the logging output to stderr From 6b6587819fd660dc3f67e224d81b9c323dd79ad1 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Mon, 17 May 2021 22:39:41 +0000 Subject: [PATCH 091/240] Conform error and hash fields to ECS. (#363) (cherry picked from commit 4d87c82e28a93e8a7e5b86252e7bd8c1a23c4297) Co-authored-by: Sean Cunningham Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- cmd/fleet/handleCheckin.go | 2 +- internal/pkg/logger/logger.go | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/cmd/fleet/handleCheckin.go b/cmd/fleet/handleCheckin.go index b53ab2110..d9947235a 100644 --- a/cmd/fleet/handleCheckin.go +++ b/cmd/fleet/handleCheckin.go @@ -400,7 +400,7 @@ func processPolicy(ctx context.Context, bulker bulk.Bulk, agentId string, pp *po } zlog.Info(). - Str("hash", defaultRole.Sha2). + Str("hash.sha256", defaultRole.Sha2). Str("apiKeyId", defaultOutputApiKey.Id). Msg("Updating agent record to pick up default output key.") diff --git a/internal/pkg/logger/logger.go b/internal/pkg/logger/logger.go index be6f0d7c5..7ea38f75a 100644 --- a/internal/pkg/logger/logger.go +++ b/internal/pkg/logger/logger.go @@ -80,9 +80,11 @@ func Init(cfg *config.Config) (*Logger, error) { // override the field names for ECS zerolog.LevelFieldName = "log.level" + zerolog.ErrorFieldName = "error.message" zerolog.MessageFieldName = "message" zerolog.TimeFieldFormat = "2006-01-02T15:04:05.999Z" // RFC3339 at millisecond resolution in zulu timezone zerolog.TimestampFieldName = "@timestamp" + if !cfg.Logging.Pretty || !cfg.Logging.ToStderr { zerolog.TimestampFunc = func() time.Time { return time.Now().UTC() } } From 86f83a7a5541f2895130450f9d53122dc2fc565a Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Tue, 18 May 2021 01:15:56 -0400 Subject: [PATCH 092/240] [Automation] Update elastic stack version to 7.14.0-c4ef352d for testing (#369) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 6eeddba19..0f0b73162 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.14.0-c3a960d1-SNAPSHOT +ELASTICSEARCH_VERSION=7.14.0-c4ef352d-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From f11fc6e72bc070884eaf19d5a0ec3dc96cbb4437 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 19 May 2021 01:14:36 -0400 Subject: [PATCH 093/240] [Automation] Update elastic stack version to 7.14.0-406b0128 for testing (#372) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 0f0b73162..ec15cf25b 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.14.0-c4ef352d-SNAPSHOT +ELASTICSEARCH_VERSION=7.14.0-406b0128-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 7701f2fb742d6a43b6b21f27bd53f51bf7d0fc85 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 20 May 2021 01:14:29 -0400 Subject: [PATCH 094/240] [Automation] Update elastic stack version to 7.14.0-b278d172 for testing (#377) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index ec15cf25b..e99dd36b4 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.14.0-406b0128-SNAPSHOT +ELASTICSEARCH_VERSION=7.14.0-b278d172-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 34458e33a6277d821d3e5d27c02ffd3dda8afda2 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Thu, 20 May 2021 15:13:04 +0000 Subject: [PATCH 095/240] Send owner: true on API call to invalid API keys. (#381) (#382) (cherry picked from commit ce3101876a388bda06ed896de8e143f978602f79) Co-authored-by: Blake Rouse --- internal/pkg/apikey/invalidate.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/internal/pkg/apikey/invalidate.go b/internal/pkg/apikey/invalidate.go index 2e47ea6df..1a4b0abde 100644 --- a/internal/pkg/apikey/invalidate.go +++ b/internal/pkg/apikey/invalidate.go @@ -18,9 +18,11 @@ import ( func Invalidate(ctx context.Context, client *elasticsearch.Client, ids ...string) error { payload := struct { - IDs []string `json:"ids,omitempty"` + IDs []string `json:"ids,omitempty"` + Owner bool `json:"owner"` }{ ids, + true, } body, err := json.Marshal(&payload) From 4782323627e935af1345cfacdb6ffd688ff3b955 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Fri, 21 May 2021 01:16:04 -0400 Subject: [PATCH 096/240] [Automation] Update elastic stack version to 7.14.0-a9c3399b for testing (#384) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index e99dd36b4..dfaf6c562 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.14.0-b278d172-SNAPSHOT +ELASTICSEARCH_VERSION=7.14.0-a9c3399b-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From b12d8bdfd75126032da2924d57d0b4ae453e6117 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 24 May 2021 01:17:06 -0400 Subject: [PATCH 097/240] [Automation] Update elastic stack version to 7.14.0-113d5d66 for testing (#389) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index dfaf6c562..8e08d8fb4 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.14.0-a9c3399b-SNAPSHOT +ELASTICSEARCH_VERSION=7.14.0-113d5d66-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 64b6589938ce39bbfa25aff5c6196ffc23eb63e6 Mon Sep 17 00:00:00 2001 From: Sean Cunningham Date: Wed, 19 May 2021 05:45:15 -0400 Subject: [PATCH 098/240] Log version and commit at boot --- Makefile | 3 ++- cmd/fleet/main.go | 29 ++++++++++++++++++++++++----- internal/pkg/logger/logger.go | 8 -------- main.go | 3 ++- 4 files changed, 28 insertions(+), 15 deletions(-) diff --git a/Makefile b/Makefile index f7be55144..a679760e5 100644 --- a/Makefile +++ b/Makefile @@ -18,7 +18,8 @@ VERSION=${DEFAULT_VERSION} endif PLATFORM_TARGETS=$(addprefix release-, $(PLATFORMS)) -LDFLAGS=-w -s -X main.Version=${VERSION} +COMMIT=$(shell git rev-parse --short HEAD) +LDFLAGS=-w -s -X main.Version=${VERSION} -X main.Commit=${COMMIT} CMD_COLOR_ON=\033[32m\xE2\x9c\x93 CMD_COLOR_OFF=\033[0m diff --git a/cmd/fleet/main.go b/cmd/fleet/main.go index a2a87dae1..3c619da54 100644 --- a/cmd/fleet/main.go +++ b/cmd/fleet/main.go @@ -68,7 +68,26 @@ func makeCache(cfg *config.Config) (cache.Cache, error) { return cache.New(cacheCfg) } -func getRunCommand(version string) func(cmd *cobra.Command, args []string) error { +func initLogger(cfg *config.Config, version, commit string) (*logger.Logger, error) { + l, err := logger.Init(cfg) + if err != nil { + return nil, err + } + + log.Info(). + Str("version", version). + Str("commit", commit). + Int("pid", os.Getpid()). + Int("ppid", os.Getppid()). + Str("exe", os.Args[0]). + Strs("args", os.Args[1:]). + Msg("boot") + log.Debug().Strs("env", os.Environ()).Msg("environment") + + return l, err +} + +func getRunCommand(version, commit string) func(cmd *cobra.Command, args []string) error { return func(cmd *cobra.Command, args []string) error { cfgObject := cmd.Flags().Lookup("E").Value.(*config.Flag) cliCfg := cfgObject.Config() @@ -85,7 +104,7 @@ func getRunCommand(version string) func(cmd *cobra.Command, args []string) error if err != nil { return err } - l, err = logger.Init(cfg) + l, err = initLogger(cfg, version, commit) if err != nil { return err } @@ -119,7 +138,7 @@ func getRunCommand(version string) func(cmd *cobra.Command, args []string) error return err } - l, err = logger.Init(cfg) + l, err = initLogger(cfg, version, commit) if err != nil { return err } @@ -147,11 +166,11 @@ func getRunCommand(version string) func(cmd *cobra.Command, args []string) error } } -func NewCommand(version string) *cobra.Command { +func NewCommand(version, commit string) *cobra.Command { cmd := &cobra.Command{ Use: "fleet-server", Short: "Fleet Server controls a fleet of Elastic Agents", - RunE: getRunCommand(version), + RunE: getRunCommand(version, commit), } cmd.Flags().StringP("config", "c", "fleet-server.yml", "Configuration for Fleet Server") cmd.Flags().Bool(kAgentMode, false, "Running under execution of the Elastic Agent") diff --git a/internal/pkg/logger/logger.go b/internal/pkg/logger/logger.go index 7ea38f75a..420b08a6a 100644 --- a/internal/pkg/logger/logger.go +++ b/internal/pkg/logger/logger.go @@ -88,14 +88,6 @@ func Init(cfg *config.Config) (*Logger, error) { if !cfg.Logging.Pretty || !cfg.Logging.ToStderr { zerolog.TimestampFunc = func() time.Time { return time.Now().UTC() } } - - log.Info(). - Int("pid", os.Getpid()). - Int("ppid", os.Getppid()). - Str("exe", os.Args[0]). - Strs("args", os.Args[1:]). - Msg("boot") - log.Debug().Strs("env", os.Environ()).Msg("environment") }) return gLogger, err } diff --git a/main.go b/main.go index e3932fc2a..8b06606a5 100644 --- a/main.go +++ b/main.go @@ -23,10 +23,11 @@ const defaultVersion = "7.14.0" var ( Version string = defaultVersion + Commit string ) func main() { - cmd := fleet.NewCommand(Version) + cmd := fleet.NewCommand(Version, Commit) if err := cmd.Execute(); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) From 7c67d1815e9c9fc23fca0ef304740deea0c51edb Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 26 May 2021 01:16:31 -0400 Subject: [PATCH 099/240] [Automation] Update elastic stack version to 7.14.0-9eca24b7 for testing (#395) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 8e08d8fb4..1b068d3b8 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.14.0-113d5d66-SNAPSHOT +ELASTICSEARCH_VERSION=7.14.0-9eca24b7-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 7c29697c2ee004a13e12c67887b3092f60dd440f Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Wed, 26 May 2021 17:02:29 +0000 Subject: [PATCH 100/240] Update to Go version 1.16.4 (#341) (#397) * Update to Go version 1.16.4 * Add MacOSX version note to readme (cherry picked from commit 75d82420e4dc43a2ccac5228debc995c6482e1a5) # Conflicts: # README.md Co-authored-by: Michel Laterman <82832767+michel-laterman@users.noreply.github.com> --- .go-version | 2 +- README.md | 26 ++++++++++++++++++++++++-- go.mod | 2 +- 3 files changed, 26 insertions(+), 4 deletions(-) diff --git a/.go-version b/.go-version index 4a02d2c31..a23207367 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.16.2 +1.16.4 diff --git a/README.md b/README.md index 0c8325e59..d3131d93c 100644 --- a/README.md +++ b/README.md @@ -31,7 +31,7 @@ yarn es snapshot -E xpack.security.authc.api_key.enabled=true yarn start --no-base-path ``` -As soon as all is running, go to `http://localhost:5601`, enter `elastic/changeme` as credential and navigate to Fleet. Trigger the Fleet setup. As soon as this is completed, copy the `policy id` and `enrollment token` for the fleet-server policy. The policy id can be copied from the URL, the enrollment token can be found in the Enrollment Token list. +As soon as all is running, go to `http://localhost:5601`, enter `elastic/changeme` as credential and navigate to Fleet. Trigger the Fleet setup. As soon as this is completed, copy the `policy id` and `enrollment token` for the fleet-server policy. The policy id can be copied from the URL, the enrollment token can be found in the Enrollment Token list. NOTE: This step can be skipped if the full command below for the Elastic Agent is used. @@ -79,4 +79,26 @@ Replace {YOUR-IP} with the IP address of your machine. ## fleet-server repo -By default the above will download the most recent snapshot build for fleet-server. To use your own development build, run `make release` in the fleet-server repository, go to `build/distributions` and copy the `.tar.gz` and `sha512` file to the `data/elastic-agent-{hash}/downloads` inside the elastic-agent directory. Now you run with your own build of fleet-server. \ No newline at end of file +<<<<<<< HEAD +By default the above will download the most recent snapshot build for fleet-server. To use your own development build, run `make release` in the fleet-server repository, go to `build/distributions` and copy the `.tar.gz` and `sha512` file to the `data/elastic-agent-{hash}/downloads` inside the elastic-agent directory. Now you run with your own build of fleet-server. +======= +By default the above will download the most recent snapshot build for fleet-server. To use your own development build, run `make release` in the fleet-server repository, go to `build/distributions` and copy the `.tar.gz` and `sha512` file to the `data/elastic-agent-{hash}/downloads` inside the elastic-agent directory. Now you run with your own build of fleet-server. + + +## Compatbility and upgrades + +Fleet server is always on the exact same version as Elastic Agent running fleet-server. Any Elastic Agent enrolling into a fleet-server must be the same version or older. Fleet-server communicates with Elasticsearch. Elasticsearch must be on the same version or newer. For Kibana it is assumed it is on the same version as Elasticsearch. With this the compatibility looks as following: + +``` +Elastic Agent <= Elastic Agent with fleet-server) <= Elasticsearch / Kibana +``` + +There might be differences on the bugfix version. + +If an upgrade is done, Elasticsearch / Kibana have to be upgraded first, then Elastic Agent with fleet-server and last the Elastic Agents. + +## MacOSX Version + +The [golang-crossbuild](https://github.com/elastic/golang-crossbuild) produces images used for testing/building. +The `golang-crossbuild:1.16.4-darwin-debian10` image expects the minimum MacOSX version to be 10.14+. +>>>>>>> 75d8242 (Update to Go version 1.16.4 (#341)) diff --git a/go.mod b/go.mod index fbde00dfc..07ebe486f 100644 --- a/go.mod +++ b/go.mod @@ -1,6 +1,6 @@ module github.com/elastic/fleet-server/v7 -go 1.15 +go 1.16 require ( github.com/Pallinder/go-randomdata v1.2.0 From 2e688f02e4356f2b35c82a66bf19d210bafa7209 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 27 May 2021 01:15:23 -0400 Subject: [PATCH 101/240] [Automation] Update elastic stack version to 7.14.0-f385fee6 for testing (#398) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 1b068d3b8..0e50963d5 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.14.0-9eca24b7-SNAPSHOT +ELASTICSEARCH_VERSION=7.14.0-f385fee6-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From b364bf6b74c4fdf7970ea3e5b0812228883893f6 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Thu, 27 May 2021 20:48:12 +0000 Subject: [PATCH 102/240] Add context to errors for better visibility in logs. Add http access logging in debug mode. (#403) (cherry picked from commit d72fcdd6d9c0707fef43918c2398b741cef77130) Co-authored-by: Sean Cunningham --- cmd/fleet/auth.go | 22 ++-- cmd/fleet/error.go | 136 ++++++++++++++++++++++-- cmd/fleet/handleAck.go | 46 +++++---- cmd/fleet/handleArtifacts.go | 33 +++--- cmd/fleet/handleCheckin.go | 83 ++++++++++----- cmd/fleet/handleEnroll.go | 42 +++++--- cmd/fleet/handleStatus.go | 12 ++- cmd/fleet/metrics.go | 90 +++------------- cmd/fleet/router.go | 61 +++++++++-- cmd/fleet/schema.go | 5 +- internal/pkg/action/dispatcher.go | 4 +- internal/pkg/apikey/auth.go | 8 +- internal/pkg/apikey/invalidate.go | 5 +- internal/pkg/logger/ecs.go | 32 ++++++ internal/pkg/logger/http.go | 165 ++++++++++++++++++++++++++++++ 15 files changed, 554 insertions(+), 190 deletions(-) create mode 100644 internal/pkg/logger/ecs.go create mode 100644 internal/pkg/logger/http.go diff --git a/cmd/fleet/auth.go b/cmd/fleet/auth.go index 6e9752869..cf147fec3 100644 --- a/cmd/fleet/auth.go +++ b/cmd/fleet/auth.go @@ -12,6 +12,7 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/apikey" "github.com/elastic/fleet-server/v7/internal/pkg/bulk" "github.com/elastic/fleet-server/v7/internal/pkg/cache" + "github.com/elastic/fleet-server/v7/internal/pkg/logger" "github.com/elastic/fleet-server/v7/internal/pkg/model" "github.com/elastic/go-elasticsearch/v8" @@ -22,8 +23,10 @@ const ( kAPIKeyTTL = 5 * time.Second ) -var ErrApiKeyNotEnabled = errors.New("APIKey not enabled") -var ErrAgentCorrupted = errors.New("agent record corrupted") +var ( + ErrApiKeyNotEnabled = errors.New("APIKey not enabled") + ErrAgentCorrupted = errors.New("agent record corrupted") +) // This authenticates that the provided API key exists and is enabled. // WARNING: This does not validate that the api key is valid for the Fleet Domain. @@ -39,6 +42,8 @@ func authApiKey(r *http.Request, client *elasticsearch.Client, c cache.Cache) (* return key, nil } + reqId := r.Header.Get(logger.HeaderRequestID) + start := time.Now() info, err := key.Authenticate(r.Context(), client) @@ -47,16 +52,18 @@ func authApiKey(r *http.Request, client *elasticsearch.Client, c cache.Cache) (* log.Info(). Err(err). Str("id", key.Id). - Dur("rtt", time.Since(start)). + Str(EcsHttpRequestId, reqId). + Int64(EcsEventDuration, time.Since(start).Nanoseconds()). Msg("ApiKey fail authentication") return nil, err } log.Trace(). Str("id", key.Id). - Dur("rtt", time.Since(start)). - Str("UserName", info.UserName). - Strs("Roles", info.Roles). + Str(EcsHttpRequestId, reqId). + Int64(EcsEventDuration, time.Since(start).Nanoseconds()). + Str("userName", info.UserName). + Strs("roles", info.Roles). Bool("enabled", info.Enabled). RawJSON("meta", info.Metadata). Msg("ApiKey authenticated") @@ -68,7 +75,8 @@ func authApiKey(r *http.Request, client *elasticsearch.Client, c cache.Cache) (* log.Info(). Err(err). Str("id", key.Id). - Dur("rtt", time.Since(start)). + Str(EcsHttpRequestId, reqId). + Int64(EcsEventDuration, time.Since(start).Nanoseconds()). Msg("ApiKey not enabled") } diff --git a/cmd/fleet/error.go b/cmd/fleet/error.go index c51112449..5f92fe15e 100644 --- a/cmd/fleet/error.go +++ b/cmd/fleet/error.go @@ -5,24 +5,144 @@ package fleet import ( + "context" "encoding/json" "net/http" + + "github.com/elastic/fleet-server/v7/internal/pkg/dl" + "github.com/elastic/fleet-server/v7/internal/pkg/limit" + "github.com/elastic/fleet-server/v7/internal/pkg/logger" + + "github.com/pkg/errors" + "github.com/rs/zerolog" +) + +// Alias useful ECS headers +const ( + EcsHttpRequestId = logger.EcsHttpRequestId + EcsEventDuration = logger.EcsEventDuration + EcsHttpResponseCode = logger.EcsHttpResponseCode + EcsHttpResponseBodyBytes = logger.EcsHttpResponseBodyBytes ) type errResp struct { - StatusCode int `json:"statusCode"` - Error string `json:"error"` - Message string `json:"message"` + StatusCode int `json:"statusCode"` + Error string `json:"error"` + Message string `json:"message,omitempty"` + Level zerolog.Level `json:"-"` +} + +func NewErrorResp(err error) errResp { + + errTable := []struct { + target error + meta errResp + }{ + { + ErrAgentNotFound, + errResp{ + http.StatusNotFound, + "AgentNotFound", + "agent could not be found", + zerolog.WarnLevel, + }, + }, + { + limit.ErrRateLimit, + errResp{ + http.StatusTooManyRequests, + "RateLimit", + "exceeded the rate limit", + zerolog.DebugLevel, + }, + }, + { + limit.ErrMaxLimit, + errResp{ + http.StatusTooManyRequests, + "MaxLimit", + "exceeded the max limit", + zerolog.DebugLevel, + }, + }, + { + ErrApiKeyNotEnabled, + errResp{ + http.StatusUnauthorized, + "Unauthorized", + "ApiKey not enabled", + zerolog.InfoLevel, + }, + }, + { + context.Canceled, + errResp{ + http.StatusServiceUnavailable, + "ServiceUnavailable", + "server is stopping", + zerolog.DebugLevel, + }, + }, + { + ErrInvalidUserAgent, + errResp{ + http.StatusBadRequest, + "InvalidUserAgent", + "user-agent is invalid", + zerolog.InfoLevel, + }, + }, + { + ErrUnsupportedVersion, + errResp{ + http.StatusBadRequest, + "UnsupportedVersion", + "version is not supported", + zerolog.InfoLevel, + }, + }, + { + dl.ErrNotFound, + errResp{ + http.StatusNotFound, + "NotFound", + "not found", + zerolog.WarnLevel, + }, + }, + { + ErrorThrottle, + errResp{ + http.StatusTooManyRequests, + "TooManyRequests", + "too many requests", + zerolog.DebugLevel, + }, + }, + } + + for _, e := range errTable { + if errors.Is(err, e.target) { + return e.meta + } + } + + // Default + return errResp{ + StatusCode: http.StatusBadRequest, + Error: "BadRequest", + Level: zerolog.InfoLevel, + } } -func WriteError(w http.ResponseWriter, code int, errStr string, msg string) error { - data, err := json.Marshal(&errResp{StatusCode: code, Error: errStr, Message: msg}) +func (er errResp) Write(w http.ResponseWriter) error { + data, err := json.Marshal(&er) if err != nil { return err } w.Header().Set("Content-Type", "application/json; charset=utf-8") w.Header().Set("X-Content-Type-Options", "nosniff") - w.WriteHeader(code) - w.Write(data) - return nil + w.WriteHeader(er.StatusCode) + _, err = w.Write(data) + return err } diff --git a/cmd/fleet/handleAck.go b/cmd/fleet/handleAck.go index 1cf0e8c00..3e0d5b11b 100644 --- a/cmd/fleet/handleAck.go +++ b/cmd/fleet/handleAck.go @@ -8,7 +8,6 @@ import ( "bytes" "context" "encoding/json" - "errors" "io/ioutil" "net/http" "strconv" @@ -21,8 +20,10 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/config" "github.com/elastic/fleet-server/v7/internal/pkg/dl" "github.com/elastic/fleet-server/v7/internal/pkg/limit" + "github.com/elastic/fleet-server/v7/internal/pkg/logger" "github.com/elastic/fleet-server/v7/internal/pkg/model" "github.com/elastic/fleet-server/v7/internal/pkg/policy" + "github.com/pkg/errors" "github.com/julienschmidt/httprouter" "github.com/rs/zerolog/log" @@ -49,20 +50,25 @@ func NewAckT(cfg *config.Server, bulker bulk.Bulk, cache cache.Cache) *AckT { } func (rt Router) handleAcks(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { - id := ps.ByName("id") + start := time.Now() + id := ps.ByName("id") err := rt.ack.handleAcks(w, r, id) if err != nil { - code, str, msg, lvl := cntAcks.IncError(err) + cntAcks.IncError(err) + resp := NewErrorResp(err) + reqId := r.Header.Get(logger.HeaderRequestID) - log.WithLevel(lvl). + log.WithLevel(resp.Level). Err(err). - Int("code", code). - Msg("Fail ACK") + Str(EcsHttpRequestId, reqId). + Int(EcsHttpResponseCode, resp.StatusCode). + Int64(EcsEventDuration, time.Since(start).Nanoseconds()). + Msg("fail ACK") - if err := WriteError(w, code, str, msg); err != nil { - log.Error().Err(err).Msg("fail writing error response") + if err := resp.Write(w); err != nil { + log.Error().Err(err).Str(EcsHttpRequestId, reqId).Msg("fail writing error response") } } } @@ -85,14 +91,14 @@ func (ack AckT) handleAcks(w http.ResponseWriter, r *http.Request, id string) er raw, err := ioutil.ReadAll(r.Body) if err != nil { - return err + return errors.Wrap(err, "handleAcks read body") } cntAcks.bodyIn.Add(uint64(len(raw))) var req AckRequest if err := json.Unmarshal(raw, &req); err != nil { - return err + return errors.Wrap(err, "handleAcks unmarshal") } log.Trace().RawJSON("raw", raw).Msg("Ack request") @@ -105,7 +111,7 @@ func (ack AckT) handleAcks(w http.ResponseWriter, r *http.Request, id string) er data, err := json.Marshal(&resp) if err != nil { - return err + return errors.Wrap(err, "handleAcks marshal response") } var nWritten int @@ -137,7 +143,7 @@ func (ack *AckT) handleAckEvents(ctx context.Context, agent *model.Agent, events if !ok { actions, err := dl.FindAction(ctx, ack.bulk, ev.ActionId) if err != nil { - return err + return errors.Wrap(err, "find actions") } if len(actions) == 0 { return errors.New("no matching action") @@ -156,7 +162,7 @@ func (ack *AckT) handleAckEvents(ctx context.Context, agent *model.Agent, events Error: ev.Error, } if _, err := dl.CreateActionResult(ctx, ack.bulk, acr); err != nil { - return err + return errors.Wrap(err, "create action result") } if ev.Error == "" { @@ -222,14 +228,14 @@ func (ack *AckT) handlePolicyChange(ctx context.Context, agent *model.Agent, act bulk.WithRetryOnConflict(3), ) - return err + return errors.Wrap(err, "handlePolicyChange update") } func (ack *AckT) handleUnenroll(ctx context.Context, agent *model.Agent) error { apiKeys := _getAPIKeyIDs(agent) if len(apiKeys) > 0 { if err := apikey.Invalidate(ctx, ack.bulk.Client(), apiKeys...); err != nil { - return err + return errors.Wrap(err, "handleUnenroll invalidate apikey") } } @@ -242,10 +248,11 @@ func (ack *AckT) handleUnenroll(ctx context.Context, agent *model.Agent) error { body, err := doc.Marshal() if err != nil { - return err + return errors.Wrap(err, "handleUnenroll marshal") } - return ack.bulk.Update(ctx, dl.FleetAgents, agent.Id, body, bulk.WithRefresh()) + err = ack.bulk.Update(ctx, dl.FleetAgents, agent.Id, body, bulk.WithRefresh()) + return errors.Wrap(err, "handleUnenroll update") } func (ack *AckT) handleUpgrade(ctx context.Context, agent *model.Agent) error { @@ -258,10 +265,11 @@ func (ack *AckT) handleUpgrade(ctx context.Context, agent *model.Agent) error { body, err := doc.Marshal() if err != nil { - return err + return errors.Wrap(err, "handleUpgrade marshal") } - return ack.bulk.Update(ctx, dl.FleetAgents, agent.Id, body, bulk.WithRefresh()) + err = ack.bulk.Update(ctx, dl.FleetAgents, agent.Id, body, bulk.WithRefresh()) + return errors.Wrap(err, "handleUpgrade update") } func _getAPIKeyIDs(agent *model.Agent) []string { diff --git a/cmd/fleet/handleArtifacts.go b/cmd/fleet/handleArtifacts.go index b7ea7acf8..c95205298 100644 --- a/cmd/fleet/handleArtifacts.go +++ b/cmd/fleet/handleArtifacts.go @@ -11,7 +11,6 @@ import ( "encoding/base64" "encoding/hex" "encoding/json" - "errors" "io" "net/http" "time" @@ -21,10 +20,12 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/config" "github.com/elastic/fleet-server/v7/internal/pkg/dl" "github.com/elastic/fleet-server/v7/internal/pkg/limit" + "github.com/elastic/fleet-server/v7/internal/pkg/logger" "github.com/elastic/fleet-server/v7/internal/pkg/model" "github.com/elastic/fleet-server/v7/internal/pkg/throttle" "github.com/julienschmidt/httprouter" + "github.com/pkg/errors" "github.com/rs/zerolog" "github.com/rs/zerolog/log" ) @@ -71,10 +72,13 @@ func (rt Router) handleArtifacts(w http.ResponseWriter, r *http.Request, ps http sha2 = ps.ByName("sha2") // DecodedSha256 in the artifact record ) + reqId := r.Header.Get(logger.HeaderRequestID) + zlog := log.With(). Str("id", id). Str("sha2", sha2). Str("remoteAddr", r.RemoteAddr). + Str(EcsHttpRequestId, reqId). Logger() rdr, err := rt.at.handleArtifacts(r, zlog, id, sha2) @@ -84,25 +88,26 @@ func (rt Router) handleArtifacts(w http.ResponseWriter, r *http.Request, ps http nWritten, err = io.Copy(w, rdr) zlog.Trace(). Err(err). - Int64("nWritten", nWritten). - Dur("rtt", time.Since(start)). + Int64(EcsHttpResponseBodyBytes, nWritten). + Int64(EcsEventDuration, time.Since(start).Nanoseconds()). Msg("Response sent") cntArtifacts.bodyOut.Add(uint64(nWritten)) } if err != nil { - code, str, msg, lvl := cntArtifacts.IncError(err) + cntArtifacts.IncError(err) + resp := NewErrorResp(err) - log.WithLevel(lvl). + zlog.WithLevel(resp.Level). Err(err). - Int("code", code). - Int64("nWritten", nWritten). - Dur("rtt", time.Since(start)). - Msg("Fail handle artifact") + Int(EcsHttpResponseCode, resp.StatusCode). + Int64(EcsHttpResponseBodyBytes, nWritten). + Int64(EcsEventDuration, time.Since(start).Nanoseconds()). + Msg("fail artifact") - if err := WriteError(w, code, str, msg); err != nil { - log.Error().Err(err).Msg("fail writing error response") + if err := resp.Write(w); err != nil { + zlog.Error().Err(err).Msg("fail writing error response") } } } @@ -261,10 +266,10 @@ func (at ArtifactT) fetchArtifact(ctx context.Context, zlog zerolog.Logger, iden zlog.Info(). Err(err). - Dur("rtt", time.Since(start)). + Int64(EcsEventDuration, time.Since(start).Nanoseconds()). Msg("fetch artifact") - return artifact, err + return artifact, errors.Wrap(err, "fetchArtifact") } func validateSha2String(sha2 string) error { @@ -283,7 +288,7 @@ func validateSha2String(sha2 string) error { func validateSha2Data(data []byte, sha2 string) error { src, err := hex.DecodeString(sha2) if err != nil { - return err + return errors.Wrap(err, "sha2 hex decode") } sum := sha256.Sum256(data) diff --git a/cmd/fleet/handleCheckin.go b/cmd/fleet/handleCheckin.go index d9947235a..f3a7cca3c 100644 --- a/cmd/fleet/handleCheckin.go +++ b/cmd/fleet/handleCheckin.go @@ -10,7 +10,6 @@ import ( "compress/gzip" "context" "encoding/json" - "errors" "net/http" "reflect" "time" @@ -22,6 +21,7 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/config" "github.com/elastic/fleet-server/v7/internal/pkg/dl" "github.com/elastic/fleet-server/v7/internal/pkg/limit" + "github.com/elastic/fleet-server/v7/internal/pkg/logger" "github.com/elastic/fleet-server/v7/internal/pkg/model" "github.com/elastic/fleet-server/v7/internal/pkg/monitor" "github.com/elastic/fleet-server/v7/internal/pkg/policy" @@ -31,6 +31,7 @@ import ( "github.com/hashicorp/go-version" "github.com/julienschmidt/httprouter" "github.com/miolini/datacounter" + "github.com/pkg/errors" "github.com/rs/zerolog" "github.com/rs/zerolog/log" ) @@ -42,30 +43,38 @@ var ( ErrFailInjectApiKey = errors.New("fail inject api key") ) -const kEncodingGzip = "gzip" +const ( + kEncodingGzip = "gzip" +) func (rt Router) handleCheckin(w http.ResponseWriter, r *http.Request, ps httprouter.Params) { + start := time.Now() id := ps.ByName("id") err := rt.ct._handleCheckin(w, r, id, rt.bulker) if err != nil { - code, str, msg, lvl := cntCheckin.IncError(err) + cntCheckin.IncError(err) + resp := NewErrorResp(err) // Log this as warn for visibility that limit has been reached. // This allows customers to tune the configuration on detection of threshold. - if err == limit.ErrMaxLimit { - lvl = zerolog.WarnLevel + if errors.Is(err, limit.ErrMaxLimit) { + resp.Level = zerolog.WarnLevel } - log.WithLevel(lvl). + reqId := r.Header.Get(logger.HeaderRequestID) + + log.WithLevel(resp.Level). Err(err). Str("id", id). - Int("code", code). + Int(EcsHttpResponseCode, resp.StatusCode). + Str(EcsHttpRequestId, reqId). + Int64(EcsEventDuration, time.Since(start).Nanoseconds()). Msg("fail checkin") - if err := WriteError(w, code, str, msg); err != nil { - log.Error().Err(err).Msg("fail writing error response") + if err := resp.Write(w); err != nil { + log.Error().Str(EcsHttpRequestId, reqId).Err(err).Msg("fail writing error response") } } } @@ -119,6 +128,8 @@ func NewCheckinT( func (ct *CheckinT) _handleCheckin(w http.ResponseWriter, r *http.Request, id string, bulker bulk.Bulk) error { + reqId := r.Header.Get(logger.HeaderRequestID) + limitF, err := ct.limit.Acquire() if err != nil { return err @@ -154,7 +165,7 @@ func (ct *CheckinT) _handleCheckin(w http.ResponseWriter, r *http.Request, id st cntCheckin.bodyIn.Add(readCounter.Count()) // Compare local_metadata content and update if different - rawMeta, err := parseMeta(agent, &req) + rawMeta, err := parseMeta(agent, reqId, &req) if err != nil { return err } @@ -165,7 +176,7 @@ func (ct *CheckinT) _handleCheckin(w http.ResponseWriter, r *http.Request, id st return err } - // Subsribe to actions dispatcher + // Subscribe to actions dispatcher aSub := ct.ad.Subscribe(agent.Id, seqno) defer ct.ad.Unsubscribe(aSub) actCh := aSub.Ch() @@ -173,7 +184,7 @@ func (ct *CheckinT) _handleCheckin(w http.ResponseWriter, r *http.Request, id st // Subscribe to policy manager for changes on PolicyId > policyRev sub, err := ct.pm.Subscribe(agent.Id, agent.PolicyId, agent.PolicyRevisionIdx, agent.PolicyCoordinatorIdx) if err != nil { - return err + return errors.Wrap(err, "subscribe policy monitor") } defer ct.pm.Unsubscribe(sub) @@ -213,14 +224,14 @@ func (ct *CheckinT) _handleCheckin(w http.ResponseWriter, r *http.Request, id st actions = append(actions, acs...) break LOOP case policy := <-sub.Output(): - actionResp, err := processPolicy(ctx, bulker, agent.Id, policy) + actionResp, err := processPolicy(ctx, bulker, agent.Id, reqId, policy) if err != nil { - return err + return errors.Wrap(err, "processPolicy") } actions = append(actions, *actionResp) break LOOP case <-longPoll.C: - log.Trace().Msg("fire long poll") + log.Trace().Str(EcsHttpRequestId, reqId).Str("agentId", agent.Id).Msg("fire long poll") break LOOP case <-tick.C: ct.bc.CheckIn(agent.Id, nil, nil) @@ -241,7 +252,7 @@ func (ct *CheckinT) writeResponse(w http.ResponseWriter, r *http.Request, resp C payload, err := json.Marshal(&resp) if err != nil { - return err + return errors.Wrap(err, "writeResponse marshal") } compressionLevel := ct.cfg.CompressionLevel @@ -253,16 +264,18 @@ func (ct *CheckinT) writeResponse(w http.ResponseWriter, r *http.Request, resp C zipper, err := gzip.NewWriterLevel(wrCounter, compressionLevel) if err != nil { - return err + return errors.Wrap(err, "writeResponse new gzip") } w.Header().Set("Content-Encoding", kEncodingGzip) if _, err = zipper.Write(payload); err != nil { - return err + return errors.Wrap(err, "writeResponse gzip write") } - err = zipper.Close() + if err = zipper.Close(); err != nil { + err = errors.Wrap(err, "writeResponse gzip close") + } cntCheckin.bodyOut.Add(wrCounter.Count()) @@ -276,6 +289,10 @@ func (ct *CheckinT) writeResponse(w http.ResponseWriter, r *http.Request, resp C var nWritten int nWritten, err = w.Write(payload) cntCheckin.bodyOut.Add(uint64(nWritten)) + + if err != nil { + err = errors.Wrap(err, "writeResponse payload") + } } return err @@ -304,6 +321,7 @@ func (ct *CheckinT) resolveSeqNo(ctx context.Context, req CheckinRequest, agent log.Debug().Str("token", ackToken).Str("agent_id", agent.Id).Msg("revision token not found") err = nil } else { + err = errors.Wrap(err, "resolveSeqNo") return } } @@ -315,12 +333,18 @@ func (ct *CheckinT) resolveSeqNo(ctx context.Context, req CheckinRequest, agent func (ct *CheckinT) fetchAgentPendingActions(ctx context.Context, seqno sqn.SeqNo, agentId string) ([]model.Action, error) { now := time.Now().UTC().Format(time.RFC3339) - return dl.FindActions(ctx, ct.bulker, dl.QueryAgentActions, map[string]interface{}{ + actions, err := dl.FindActions(ctx, ct.bulker, dl.QueryAgentActions, map[string]interface{}{ dl.FieldSeqNo: seqno.Value(), dl.FieldMaxSeqNo: ct.gcp.GetCheckpoint().Value(), dl.FieldExpiration: now, dl.FieldAgents: []string{agentId}, }) + + if err != nil { + return nil, errors.Wrap(err, "fetchAgentPendingActions") + } + + return actions, err } func convertActions(agentId string, actions []model.Action) ([]ActionResp, string) { @@ -350,10 +374,11 @@ func convertActions(agentId string, actions []model.Action) ([]ActionResp, strin // - Generate and update default ApiKey if roles have changed. // - Rewrite the policy for delivery to the agent injecting the key material. // -func processPolicy(ctx context.Context, bulker bulk.Bulk, agentId string, pp *policy.ParsedPolicy) (*ActionResp, error) { +func processPolicy(ctx context.Context, bulker bulk.Bulk, agentId, reqId string, pp *policy.ParsedPolicy) (*ActionResp, error) { zlog := log.With(). Str("ctx", "processPolicy"). + Str(EcsHttpRequestId, reqId). Str("agentId", agentId). Str("policyId", pp.Policy.PolicyId). Logger() @@ -506,15 +531,19 @@ func setMapObj(obj map[string]interface{}, val interface{}, keys ...string) bool func findAgentByApiKeyId(ctx context.Context, bulker bulk.Bulk, id string) (*model.Agent, error) { agent, err := dl.FindAgent(ctx, bulker, dl.QueryAgentByAssessAPIKeyID, dl.FieldAccessAPIKeyID, id) - if err != nil && errors.Is(err, dl.ErrNotFound) { - err = ErrAgentNotFound + if err != nil { + if errors.Is(err, dl.ErrNotFound) { + err = ErrAgentNotFound + } else { + err = errors.Wrap(err, "findAgentByApiKeyId") + } } return &agent, err } // parseMeta compares the agent and the request local_metadata content // and returns fields to update the agent record or nil -func parseMeta(agent *model.Agent, req *CheckinRequest) ([]byte, error) { +func parseMeta(agent *model.Agent, reqId string, req *CheckinRequest) ([]byte, error) { // Quick comparison first; compare the JSON payloads. // If the data is not consistently normalized, this short-circuit will not work. @@ -526,7 +555,7 @@ func parseMeta(agent *model.Agent, req *CheckinRequest) ([]byte, error) { // Deserialize the request metadata var reqLocalMeta interface{} if err := json.Unmarshal(req.LocalMeta, &reqLocalMeta); err != nil { - return nil, err + return nil, errors.Wrap(err, "parseMeta request") } // If empty, don't step on existing data @@ -537,7 +566,7 @@ func parseMeta(agent *model.Agent, req *CheckinRequest) ([]byte, error) { // Deserialize the agent's metadata copy var agentLocalMeta interface{} if err := json.Unmarshal(agent.LocalMetadata, &agentLocalMeta); err != nil { - return nil, err + return nil, errors.Wrap(err, "parseMeta local") } var outMeta []byte @@ -547,12 +576,14 @@ func parseMeta(agent *model.Agent, req *CheckinRequest) ([]byte, error) { log.Trace(). Str("agentId", agent.Id). + Str(EcsHttpRequestId, reqId). RawJSON("oldLocalMeta", agent.LocalMetadata). RawJSON("newLocalMeta", req.LocalMeta). Msg("local metadata not equal") log.Info(). Str("agentId", agent.Id). + Str(EcsHttpRequestId, reqId). RawJSON("req.LocalMeta", req.LocalMeta). Msg("applying new local metadata") diff --git a/cmd/fleet/handleEnroll.go b/cmd/fleet/handleEnroll.go index 37ac6a9b9..354e45ec9 100644 --- a/cmd/fleet/handleEnroll.go +++ b/cmd/fleet/handleEnroll.go @@ -7,7 +7,6 @@ package fleet import ( "context" "encoding/json" - "errors" "fmt" "io" "net/http" @@ -19,6 +18,7 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/config" "github.com/elastic/fleet-server/v7/internal/pkg/dl" "github.com/elastic/fleet-server/v7/internal/pkg/limit" + "github.com/elastic/fleet-server/v7/internal/pkg/logger" "github.com/elastic/fleet-server/v7/internal/pkg/model" "github.com/elastic/fleet-server/v7/internal/pkg/sqn" @@ -27,6 +27,7 @@ import ( "github.com/hashicorp/go-version" "github.com/julienschmidt/httprouter" "github.com/miolini/datacounter" + "github.com/pkg/errors" "github.com/rs/zerolog/log" ) @@ -35,10 +36,15 @@ const ( kCacheAccessInitTTL = time.Second * 30 // Cache a bit longer to handle expensive initial checkin kCacheEnrollmentTTL = time.Second * 30 + + EnrollEphemeral = "EPHEMERAL" + EnrollPermanent = "PERMANENT" + EnrollTemporary = "TEMPORARY" ) var ( - ErrUnknownEnrollType = errors.New("unknown enroll request type") + ErrUnknownEnrollType = errors.New("unknown enroll request type") + ErrInactiveEnrollmentKey = errors.New("inactive enrollment key") ) type EnrollerT struct { @@ -74,34 +80,39 @@ func (rt Router) handleEnroll(w http.ResponseWriter, r *http.Request, ps httprou data, err := rt.et.handleEnroll(r) + reqId := r.Header.Get(logger.HeaderRequestID) + if err != nil { - code, str, msg, lvl := cntEnroll.IncError(err) + cntEnroll.IncError(err) + resp := NewErrorResp(err) - log.WithLevel(lvl). + log.WithLevel(resp.Level). Err(err). + Str(EcsHttpRequestId, reqId). Str("mod", kEnrollMod). - Int("code", code). - Dur("tdiff", time.Since(start)). - Msg("Enroll fail") + Int(EcsHttpResponseCode, resp.StatusCode). + Int64(EcsEventDuration, time.Since(start).Nanoseconds()). + Msg("fail enroll") - if err := WriteError(w, code, str, msg); err != nil { - log.Error().Err(err).Msg("fail writing error response") + if err := resp.Write(w); err != nil { + log.Error().Err(err).Str(EcsHttpRequestId, reqId).Msg("fail writing error response") } return } var numWritten int if numWritten, err = w.Write(data); err != nil { - log.Error().Err(err).Msg("fail send enroll response") + log.Error().Err(err).Str(EcsHttpRequestId, reqId).Msg("fail send enroll response") } cntEnroll.bodyOut.Add(uint64(numWritten)) log.Trace(). Err(err). + Str(EcsHttpRequestId, reqId). RawJSON("raw", data). Str("mod", kEnrollMod). - Dur("rtt", time.Since(start)). + Int64(EcsEventDuration, time.Since(start).Nanoseconds()). Msg("handleEnroll OK") } @@ -309,11 +320,11 @@ func (et *EnrollerT) fetchEnrollmentKeyRecord(ctx context.Context, id string) (* // Pull API key record from .fleet-enrollment-api-keys rec, err := dl.FindEnrollmentAPIKey(ctx, et.bulker, dl.QueryEnrollmentAPIKeyByID, dl.FieldApiKeyID, id) if err != nil { - return nil, err + return nil, errors.Wrap(err, "FindEnrollmentAPIKey") } if !rec.Active { - return nil, fmt.Errorf("record is inactive") + return nil, ErrInactiveEnrollmentKey } cost := int64(len(rec.ApiKey)) @@ -328,13 +339,12 @@ func decodeEnrollRequest(data io.Reader) (*EnrollRequest, error) { var req EnrollRequest decoder := json.NewDecoder(data) if err := decoder.Decode(&req); err != nil { - return nil, err + return nil, errors.Wrap(err, "decode enroll request") } // Validate switch req.Type { - // TODO: Should these be converted to constant? Need to be kept in sync with Kibana? - case "EPHEMERAL", "PERMANENT", "TEMPORARY": + case EnrollEphemeral, EnrollPermanent, EnrollTemporary: default: return nil, ErrUnknownEnrollType } diff --git a/cmd/fleet/handleStatus.go b/cmd/fleet/handleStatus.go index 51dac23cd..dbd560ef4 100644 --- a/cmd/fleet/handleStatus.go +++ b/cmd/fleet/handleStatus.go @@ -9,13 +9,15 @@ import ( "encoding/json" "net/http" + "github.com/elastic/fleet-server/v7/internal/pkg/logger" + "github.com/elastic/elastic-agent-client/v7/pkg/proto" "github.com/julienschmidt/httprouter" "github.com/rs/zerolog/log" ) -func (rt Router) handleStatus(w http.ResponseWriter, _ *http.Request, _ httprouter.Params) { - // Metrics; serenity now. +func (rt Router) handleStatus(w http.ResponseWriter, r *http.Request, _ httprouter.Params) { + dfunc := cntStatus.IncStart() defer dfunc() @@ -25,10 +27,12 @@ func (rt Router) handleStatus(w http.ResponseWriter, _ *http.Request, _ httprout Status: status.String(), } + reqId := r.Header.Get(logger.HeaderRequestID) + data, err := json.Marshal(&resp) if err != nil { code := http.StatusInternalServerError - log.Error().Err(err).Int("code", code).Msg("fail status") + log.Error().Err(err).Str(EcsHttpRequestId, reqId).Int(EcsHttpResponseCode, code).Msg("fail status") http.Error(w, "", code) return } @@ -42,7 +46,7 @@ func (rt Router) handleStatus(w http.ResponseWriter, _ *http.Request, _ httprout var nWritten int if nWritten, err = w.Write(data); err != nil { if err != context.Canceled { - log.Error().Err(err).Int("code", code).Msg("fail status") + log.Error().Err(err).Str(EcsHttpRequestId, reqId).Int(EcsHttpResponseCode, code).Msg("fail status") } } diff --git a/cmd/fleet/metrics.go b/cmd/fleet/metrics.go index 26a650661..ac046c26a 100644 --- a/cmd/fleet/metrics.go +++ b/cmd/fleet/metrics.go @@ -6,19 +6,17 @@ package fleet import ( "context" - "github.com/pkg/errors" - "net/http" - - "github.com/elastic/fleet-server/v7/internal/pkg/config" - "github.com/elastic/fleet-server/v7/internal/pkg/limit" - "github.com/elastic/fleet-server/v7/internal/pkg/logger" "github.com/elastic/beats/v7/libbeat/api" "github.com/elastic/beats/v7/libbeat/cmd/instance/metrics" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/monitoring" + "github.com/elastic/fleet-server/v7/internal/pkg/config" "github.com/elastic/fleet-server/v7/internal/pkg/dl" - "github.com/rs/zerolog" + "github.com/elastic/fleet-server/v7/internal/pkg/limit" + "github.com/elastic/fleet-server/v7/internal/pkg/logger" + + "github.com/pkg/errors" ) var ( @@ -100,60 +98,18 @@ func init() { cntStatus.Register(routesRegistry.NewRegistry("status")) } -// Increment error metric, log and return code -func (rt *routeStats) IncError(err error) (int, string, string, zerolog.Level) { - lvl := zerolog.DebugLevel - - incFail := true - - var code int - var errStr string - var msgStr string - switch err { - case ErrAgentNotFound: - errStr = "AgentNotFound" - msgStr = "agent could not be found" - code = http.StatusNotFound - lvl = zerolog.WarnLevel - case limit.ErrRateLimit: - errStr = "RateLimit" - msgStr = "exceeded the rate limit" - code = http.StatusTooManyRequests +func (rt *routeStats) IncError(err error) { + + switch { + case errors.Is(err, limit.ErrRateLimit): rt.rateLimit.Inc() - incFail = false - case limit.ErrMaxLimit: - errStr = "MaxLimit" - msgStr = "exceeded the max limit" - code = http.StatusTooManyRequests + case errors.Is(err, limit.ErrMaxLimit): rt.maxLimit.Inc() - incFail = false - case context.Canceled: - errStr = "ServiceUnavailable" - msgStr = "server is stopping" - code = http.StatusServiceUnavailable + case errors.Is(err, context.Canceled): rt.drop.Inc() - incFail = false - case ErrInvalidUserAgent: - errStr = "InvalidUserAgent" - msgStr = "user-agent is invalid" - code = http.StatusBadRequest - lvl = zerolog.InfoLevel - case ErrUnsupportedVersion: - errStr = "UnsupportedVersion" - msgStr = "version is not supported" - code = http.StatusBadRequest - lvl = zerolog.InfoLevel default: - errStr = "BadRequest" - lvl = zerolog.InfoLevel - code = http.StatusBadRequest + rt.failure.Inc() } - - if incFail { - cntCheckin.failure.Inc() - } - - return code, errStr, msgStr, lvl } func (rt *routeStats) IncStart() func() { @@ -174,25 +130,13 @@ func (rt *artifactStats) Register(registry *monitoring.Registry) { rt.throttle = monitoring.NewUint(registry, "throttle") } -func (rt *artifactStats) IncError(err error) (code int, str string, msg string, lvl zerolog.Level) { - switch err { - case dl.ErrNotFound: - // Artifact not found indicates a race condition upstream - // or an attack on the fleet server. Either way it should - // show up in the logs at a higher level than debug - code = http.StatusNotFound - str = "NotFound" - msg = "not found" +func (rt *artifactStats) IncError(err error) { + switch { + case errors.Is(err, dl.ErrNotFound): rt.notFound.Inc() - lvl = zerolog.WarnLevel - case ErrorThrottle: - code = http.StatusTooManyRequests - str = "TooManyRequests" - msg = "too many requests" + case errors.Is(err, ErrorThrottle): rt.throttle.Inc() default: - code, str, msg, lvl = rt.routeStats.IncError(err) + rt.routeStats.IncError(err) } - - return } diff --git a/cmd/fleet/router.go b/cmd/fleet/router.go index 411022113..4f40f703a 100644 --- a/cmd/fleet/router.go +++ b/cmd/fleet/router.go @@ -5,9 +5,13 @@ package fleet import ( + "net/http" + "github.com/elastic/fleet-server/v7/internal/pkg/bulk" + "github.com/elastic/fleet-server/v7/internal/pkg/logger" "github.com/elastic/fleet-server/v7/internal/pkg/policy" "github.com/julienschmidt/httprouter" + "github.com/rs/zerolog/log" ) const ( @@ -16,9 +20,6 @@ const ( ROUTE_CHECKIN = "/api/fleet/agents/:id/checkin" ROUTE_ACKS = "/api/fleet/agents/:id/acks" ROUTE_ARTIFACTS = "/api/fleet/artifacts/:id/:sha2" - - // Support previous relative path exposed in Kibana until all feature flags are flipped - ROUTE_ARTIFACTS_DEPRECATED = "/api/endpoint/artifacts/download/:id/:sha2" ) type Router struct { @@ -42,15 +43,53 @@ func NewRouter(bulker bulk.Bulk, ct *CheckinT, et *EnrollerT, at *ArtifactT, ack ack: ack, } + routes := []struct { + method string + path string + handler httprouter.Handle + }{ + { + http.MethodGet, + ROUTE_STATUS, + r.handleStatus, + }, + { + http.MethodPost, + ROUTE_ENROLL, + r.handleEnroll, + }, + { + http.MethodPost, + ROUTE_CHECKIN, + r.handleCheckin, + }, + { + http.MethodPost, + ROUTE_ACKS, + r.handleAcks, + }, + { + http.MethodGet, + ROUTE_ARTIFACTS, + r.handleArtifacts, + }, + } + router := httprouter.New() - router.GET(ROUTE_STATUS, r.handleStatus) - router.POST(ROUTE_ENROLL, r.handleEnroll) - router.POST(ROUTE_CHECKIN, r.handleCheckin) - router.POST(ROUTE_ACKS, r.handleAcks) - router.GET(ROUTE_ARTIFACTS, r.handleArtifacts) - - // deprecated: TODO: remove - router.GET(ROUTE_ARTIFACTS_DEPRECATED, r.handleArtifacts) + + // Install routes + for _, rte := range routes { + log.Info(). + Str("method", rte.method). + Str("path", rte.path). + Msg("Server install route") + + router.Handle( + rte.method, + rte.path, + logger.HttpHandler(rte.handler), + ) + } return router } diff --git a/cmd/fleet/schema.go b/cmd/fleet/schema.go index e4378345d..e3dde6ed8 100644 --- a/cmd/fleet/schema.go +++ b/cmd/fleet/schema.go @@ -117,7 +117,6 @@ type Event struct { } type StatusResponse struct { - Name string `json:"name"` - Version string `json:"version"` - Status string `json:"status"` + Name string `json:"name"` + Status string `json:"status"` } diff --git a/internal/pkg/action/dispatcher.go b/internal/pkg/action/dispatcher.go index d23fd16b6..1ac06dee9 100644 --- a/internal/pkg/action/dispatcher.go +++ b/internal/pkg/action/dispatcher.go @@ -65,7 +65,7 @@ func (d *Dispatcher) Subscribe(agentId string, seqNo sqn.SeqNo) *Sub { sz := len(d.subs) d.mx.Unlock() - log.Debug().Str("agentId", agentId).Int("sz", sz).Msg("Subscribed to action dispatcher") + log.Trace().Str("agentId", agentId).Int("sz", sz).Msg("Subscribed to action dispatcher") return &sub } @@ -80,7 +80,7 @@ func (d *Dispatcher) Unsubscribe(sub *Sub) { sz := len(d.subs) d.mx.Unlock() - log.Debug().Str("agentId", sub.agentId).Int("sz", sz).Msg("Unsubscribed from action dispatcher") + log.Trace().Str("agentId", sub.agentId).Int("sz", sz).Msg("Unsubscribed from action dispatcher") } func (d *Dispatcher) process(ctx context.Context, hits []es.HitT) { diff --git a/internal/pkg/apikey/auth.go b/internal/pkg/apikey/auth.go index 0a7675770..1f2da7ca1 100644 --- a/internal/pkg/apikey/auth.go +++ b/internal/pkg/apikey/auth.go @@ -29,8 +29,6 @@ type SecurityInfo struct { // NOTE: Bulk request currently not available. func (k ApiKey) Authenticate(ctx context.Context, es *elasticsearch.Client) (*SecurityInfo, error) { - // TODO: Escape request for safety. Don't depend on ES. - token := fmt.Sprintf("%s%s", authPrefix, k.Token()) req := esapi.SecurityAuthenticateRequest{ @@ -40,7 +38,7 @@ func (k ApiKey) Authenticate(ctx context.Context, es *elasticsearch.Client) (*Se res, err := req.Do(ctx, es) if err != nil { - return nil, err + return nil, fmt.Errorf("apikey auth request %s: %w", k.Id, err) } if res.Body != nil { @@ -48,13 +46,13 @@ func (k ApiKey) Authenticate(ctx context.Context, es *elasticsearch.Client) (*Se } if res.IsError() { - return nil, fmt.Errorf("fail Auth: %s", res.String()) + return nil, fmt.Errorf("apikey auth response %s: %s", k.Id, res.String()) } var info SecurityInfo decoder := json.NewDecoder(res.Body) if err := decoder.Decode(&info); err != nil { - return nil, fmt.Errorf("Auth: error parsing response body: %s", err) // TODO: Wrap error + return nil, fmt.Errorf("apikey auth parse %s: %w", k.Id, err) } return &info, nil diff --git a/internal/pkg/apikey/invalidate.go b/internal/pkg/apikey/invalidate.go index 1a4b0abde..3f7d9251a 100644 --- a/internal/pkg/apikey/invalidate.go +++ b/internal/pkg/apikey/invalidate.go @@ -27,7 +27,7 @@ func Invalidate(ctx context.Context, client *elasticsearch.Client, ids ...string body, err := json.Marshal(&payload) if err != nil { - return err + return fmt.Errorf("InvalidateAPIKey: %w", err) } opts := []func(*esapi.SecurityInvalidateAPIKeyRequest){ @@ -40,7 +40,7 @@ func Invalidate(ctx context.Context, client *elasticsearch.Client, ids ...string ) if err != nil { - return err + return fmt.Errorf("InvalidateAPIKey: %w", err) } defer res.Body.Close() @@ -48,5 +48,6 @@ func Invalidate(ctx context.Context, client *elasticsearch.Client, ids ...string if res.IsError() { return fmt.Errorf("fail InvalidateAPIKey: %s", res.String()) } + return nil } diff --git a/internal/pkg/logger/ecs.go b/internal/pkg/logger/ecs.go new file mode 100644 index 000000000..f50d059d2 --- /dev/null +++ b/internal/pkg/logger/ecs.go @@ -0,0 +1,32 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package logger + +const ( + + // HTTP + EcsHttpVersion = "http.version" + EcsHttpRequestId = "http.request.id" + EcsHttpRequestMethod = "http.request.method" + EcsHttpRequestBodyBytes = "http.request.body.bytes" + EcsHttpResponseCode = "http.response.status_code" + EcsHttpResponseBodyBytes = "http.response.body.bytes" + + // URL + EcsUrlFull = "url.full" + EcsUrlDomain = "url.domain" + EcsUrlPort = "url.port" + + // Client + EcsClientAddress = "client.address" + EcsClientIp = "client.ip" + EcsClientPort = "client.port" + + // TLS + EcsTlsEstablished = "tls.established" + + // Event + EcsEventDuration = "event.duration" +) diff --git a/internal/pkg/logger/http.go b/internal/pkg/logger/http.go new file mode 100644 index 000000000..64eeae012 --- /dev/null +++ b/internal/pkg/logger/http.go @@ -0,0 +1,165 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package logger + +import ( + "io" + "net" + "net/http" + "strconv" + "strings" + "sync/atomic" + "time" + + "github.com/julienschmidt/httprouter" + "github.com/rs/zerolog/log" +) + +const ( + HeaderRequestID = "X-Request-ID" + httpSlashPrefix = "HTTP/" +) + +type ReaderCounter struct { + io.ReadCloser + count uint64 +} + +func NewReaderCounter(r io.ReadCloser) *ReaderCounter { + return &ReaderCounter{ + ReadCloser: r, + } +} + +func (rd *ReaderCounter) Read(buf []byte) (int, error) { + n, err := rd.ReadCloser.Read(buf) + atomic.AddUint64(&rd.count, uint64(n)) + return n, err +} + +func (rd *ReaderCounter) Count() uint64 { + return atomic.LoadUint64(&rd.count) +} + +type ResponseCounter struct { + http.ResponseWriter + count uint64 + statusCode int + wroteHeader bool +} + +func NewResponseCounter(w http.ResponseWriter) *ResponseCounter { + return &ResponseCounter{ + ResponseWriter: w, + } +} + +func (rc *ResponseCounter) Write(buf []byte) (int, error) { + if !rc.wroteHeader { + rc.wroteHeader = true + rc.statusCode = 200 + } + n, err := rc.ResponseWriter.Write(buf) + atomic.AddUint64(&rc.count, uint64(n)) + return n, err +} + +func (rc *ResponseCounter) WriteHeader(statusCode int) { + rc.ResponseWriter.WriteHeader(statusCode) + + // Defend unsupported multiple calls to WriteHeader + if !rc.wroteHeader { + rc.statusCode = statusCode + rc.wroteHeader = true + } +} + +func (rc *ResponseCounter) Count() uint64 { + return atomic.LoadUint64(&rc.count) +} + +func splitAddr(addr string) (host string, port int) { + + host, portS, err := net.SplitHostPort(addr) + + if err == nil { + if v, err := strconv.Atoi(portS); err == nil { + port = v + } + } + + return +} + +// Expects HTTP version in form of HTTP/x.y +func stripHTTP(h string) string { + if strings.HasPrefix(h, httpSlashPrefix) { + return h[len(httpSlashPrefix):] + } + + return h +} + +// ECS HTTP log wrapper +func HttpHandler(next httprouter.Handle) httprouter.Handle { + + return func(w http.ResponseWriter, r *http.Request, p httprouter.Params) { + e := log.Debug() + + if !e.Enabled() { + next(w, r, p) + return + } + + start := time.Now() + + rdCounter := NewReaderCounter(r.Body) + r.Body = rdCounter + + wrCounter := NewResponseCounter(w) + + next(wrCounter, r, p) + + // Look for request id + if reqID := r.Header.Get(HeaderRequestID); reqID != "" { + e.Str(EcsHttpRequestId, reqID) + } + + // URL info + e.Str(EcsUrlFull, r.URL.String()) + + if domain := r.URL.Hostname(); domain != "" { + e.Str(EcsUrlDomain, domain) + } + + port := r.URL.Port() + if port != "" { + if v, err := strconv.Atoi(port); err != nil { + e.Int(EcsUrlPort, v) + } + } + + // HTTP info + e.Str(EcsHttpVersion, stripHTTP(r.Proto)) + e.Str(EcsHttpRequestMethod, r.Method) + e.Int(EcsHttpResponseCode, wrCounter.statusCode) + e.Uint64(EcsHttpRequestBodyBytes, rdCounter.Count()) + e.Uint64(EcsHttpResponseBodyBytes, wrCounter.Count()) + + // Client info + remoteIP, remotePort := splitAddr(r.RemoteAddr) + e.Str(EcsClientAddress, r.RemoteAddr) + e.Str(EcsClientIp, remoteIP) + e.Int(EcsClientPort, remotePort) + + // TLS info + e.Bool(EcsTlsEstablished, (r.TLS != nil)) + + // Event info + e.Int64(EcsEventDuration, time.Since(start).Nanoseconds()) + + e.Msg("HTTP handler") + } +} From 4b2b37af32c2fa39313bfdea183f707aeaa5eb8d Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Fri, 28 May 2021 01:15:10 -0400 Subject: [PATCH 103/240] [Automation] Update elastic stack version to 7.14.0-d593413f for testing (#406) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 0e50963d5..788163c68 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.14.0-f385fee6-SNAPSHOT +ELASTICSEARCH_VERSION=7.14.0-d593413f-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 9a507c94ba498a712e0d17413fe71128ca95aae3 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 31 May 2021 01:15:12 -0400 Subject: [PATCH 104/240] [Automation] Update elastic stack version to 7.14.0-62d7f45e for testing (#409) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 788163c68..a00a0de06 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.14.0-d593413f-SNAPSHOT +ELASTICSEARCH_VERSION=7.14.0-62d7f45e-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From c5340dfa194ff9452a1ec61d64a6b36e6336a11e Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Mon, 31 May 2021 13:22:21 +0000 Subject: [PATCH 105/240] Add log message to diag auth latency. Replace with metrics on auth round trip time in the future. (#411) (cherry picked from commit 2568bfd70db00b495e5a3c00b2741e425f1a50fa) Co-authored-by: Sean Cunningham --- cmd/fleet/auth.go | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/cmd/fleet/auth.go b/cmd/fleet/auth.go index cf147fec3..b79e0cc51 100644 --- a/cmd/fleet/auth.go +++ b/cmd/fleet/auth.go @@ -84,17 +84,41 @@ func authApiKey(r *http.Request, client *elasticsearch.Client, c cache.Cache) (* } func authAgent(r *http.Request, id string, bulker bulk.Bulk, c cache.Cache) (*model.Agent, error) { + start := time.Now() + // authenticate key, err := authApiKey(r, bulker.Client(), c) if err != nil { return nil, err } + authTime := time.Now() + agent, err := findAgentByApiKeyId(r.Context(), bulker, key.Id) if err != nil { return nil, err } + findTime := time.Now() + + // TOOD: Remove temporary log msg to diag roundtrip speed issue on auth + if findTime.Sub(start) > time.Second*5 { + reqId := r.Header.Get(logger.HeaderRequestID) + + zlog := log.With(). + Str("agentId", id). + Str(EcsHttpRequestId, reqId). + Logger() + + zlog.Debug(). + Int64(EcsEventDuration, authTime.Sub(start).Nanoseconds()). + Msg("authApiKey slow") + + zlog.Debug(). + Int64(EcsEventDuration, findTime.Sub(authTime).Nanoseconds()). + Msg("findAgentByApiKeyId slow") + } + // validate key alignment if agent.AccessApiKeyId != key.Id { log.Info(). From 913b0e03739847d97c3670d9de2fb412824106ee Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Tue, 1 Jun 2021 01:14:58 -0400 Subject: [PATCH 106/240] [Automation] Update elastic stack version to 7.14.0-631ae8b8 for testing (#414) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index a00a0de06..476b2eee9 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.14.0-62d7f45e-SNAPSHOT +ELASTICSEARCH_VERSION=7.14.0-631ae8b8-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 60b73c176e135499e075b5d17cccb4efa9753649 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 2 Jun 2021 01:14:52 -0400 Subject: [PATCH 107/240] [Automation] Update elastic stack version to 7.14.0-bdd01eb6 for testing (#417) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 476b2eee9..adfc3f636 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.14.0-631ae8b8-SNAPSHOT +ELASTICSEARCH_VERSION=7.14.0-bdd01eb6-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From cd679a74d8aad80c19c2270a8a42f7505c948cae Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 3 Jun 2021 01:15:23 -0400 Subject: [PATCH 108/240] [Automation] Update elastic stack version to 7.14.0-8590217b for testing (#423) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index adfc3f636..4555b21c3 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.14.0-bdd01eb6-SNAPSHOT +ELASTICSEARCH_VERSION=7.14.0-8590217b-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 1a729f5c8e8ce18825062ed0fb4801377f2cbde0 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Thu, 3 Jun 2021 12:47:14 +0000 Subject: [PATCH 109/240] Revert elastic search client to the 7.x series. (#425) (cherry picked from commit c3e73eee2876e5e7418c76bfa0c364035c51fdcd) Co-authored-by: Sean Cunningham --- NOTICE.txt | 6 +++--- cmd/fleet/auth.go | 2 +- cmd/fleet/handleEnroll.go | 2 +- go.mod | 2 +- go.sum | 4 ++-- internal/pkg/apikey/auth.go | 4 ++-- internal/pkg/apikey/create.go | 4 ++-- internal/pkg/apikey/get.go | 4 ++-- internal/pkg/apikey/invalidate.go | 4 ++-- internal/pkg/bulk/engine.go | 4 ++-- internal/pkg/bulk/helpers.go | 2 +- internal/pkg/bulk/opBulk.go | 2 +- internal/pkg/bulk/opRead.go | 2 +- internal/pkg/bulk/opSearch.go | 2 +- internal/pkg/config/output.go | 2 +- internal/pkg/config/output_test.go | 2 +- internal/pkg/es/client.go | 2 +- internal/pkg/es/delete.go | 2 +- internal/pkg/es/fleet_global_checkpoints.go | 2 +- internal/pkg/es/info.go | 2 +- internal/pkg/monitor/global_checkpoint.go | 4 ++-- internal/pkg/monitor/monitor.go | 2 +- internal/pkg/monitor/subscription_monitor.go | 2 +- internal/pkg/testing/bulk.go | 2 +- internal/pkg/testing/esutil/bootstrap.go | 2 +- internal/pkg/testing/esutil/datastream.go | 2 +- internal/pkg/testing/esutil/esutil.go | 2 +- internal/pkg/testing/esutil/ilm.go | 2 +- internal/pkg/testing/esutil/index.go | 2 +- internal/pkg/testing/esutil/template.go | 2 +- internal/pkg/ver/check.go | 2 +- 31 files changed, 40 insertions(+), 40 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index 6f4478d1e..c32a251a0 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -513,12 +513,12 @@ SOFTWARE -------------------------------------------------------------------------------- -Dependency : github.com/elastic/go-elasticsearch/v8 -Version: v8.0.0-20210414074309-f7ffd04b8d6a +Dependency : github.com/elastic/go-elasticsearch/v7 +Version: v7.13.1 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/go-elasticsearch/v8@v8.0.0-20210414074309-f7ffd04b8d6a/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-elasticsearch/v7@v7.13.1/LICENSE: Apache License Version 2.0, January 2004 diff --git a/cmd/fleet/auth.go b/cmd/fleet/auth.go index b79e0cc51..d8acbd866 100644 --- a/cmd/fleet/auth.go +++ b/cmd/fleet/auth.go @@ -15,7 +15,7 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/logger" "github.com/elastic/fleet-server/v7/internal/pkg/model" - "github.com/elastic/go-elasticsearch/v8" + "github.com/elastic/go-elasticsearch/v7" "github.com/rs/zerolog/log" ) diff --git a/cmd/fleet/handleEnroll.go b/cmd/fleet/handleEnroll.go index 354e45ec9..9dc522f28 100644 --- a/cmd/fleet/handleEnroll.go +++ b/cmd/fleet/handleEnroll.go @@ -22,7 +22,7 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/model" "github.com/elastic/fleet-server/v7/internal/pkg/sqn" - "github.com/elastic/go-elasticsearch/v8" + "github.com/elastic/go-elasticsearch/v7" "github.com/gofrs/uuid" "github.com/hashicorp/go-version" "github.com/julienschmidt/httprouter" diff --git a/go.mod b/go.mod index 07ebe486f..46da4ce94 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,7 @@ require ( github.com/dgraph-io/ristretto v0.0.3 github.com/elastic/beats/v7 v7.11.1 github.com/elastic/elastic-agent-client/v7 v7.0.0-20200709172729-d43b7ad5833a - github.com/elastic/go-elasticsearch/v8 v8.0.0-20210414074309-f7ffd04b8d6a + github.com/elastic/go-elasticsearch/v7 v7.13.1 github.com/elastic/go-ucfg v0.8.3 github.com/gofrs/uuid v3.3.0+incompatible github.com/google/go-cmp v0.4.0 diff --git a/go.sum b/go.sum index 3dd47c8d8..bea793eda 100644 --- a/go.sum +++ b/go.sum @@ -254,8 +254,8 @@ github.com/elastic/fsevents v0.0.0-20181029231046-e1d381a4d270 h1:cWPqxlPtir4RoQ github.com/elastic/fsevents v0.0.0-20181029231046-e1d381a4d270/go.mod h1:Msl1pdboCbArMF/nSCDUXgQuWTeoMmE/z8607X+k7ng= github.com/elastic/go-concert v0.0.4 h1:pzgYCmJ/xMJsW8PSk33inAWZ065hrwSeP79TpwAbsLE= github.com/elastic/go-concert v0.0.4/go.mod h1:9MtFarjXroUgmm0m6HY3NSe1XiKhdktiNRRj9hWvIaM= -github.com/elastic/go-elasticsearch/v8 v8.0.0-20210414074309-f7ffd04b8d6a h1:9sZywotr64cDBOcWWCFpjOjf4oFuFhKnopckNQ4EqcU= -github.com/elastic/go-elasticsearch/v8 v8.0.0-20210414074309-f7ffd04b8d6a/go.mod h1:xe9a/L2aeOgFKKgrO3ibQTnMdpAeL0GC+5/HpGScSa4= +github.com/elastic/go-elasticsearch/v7 v7.13.1 h1:PaM3V69wPlnwR+ne50rSKKn0RNDYnnOFQcuGEI0ce80= +github.com/elastic/go-elasticsearch/v7 v7.13.1/go.mod h1:OJ4wdbtDNk5g503kvlHLyErCgQwwzmDtaFC4XyOxXA4= github.com/elastic/go-libaudit/v2 v2.1.0 h1:yWSKoGaoWLGFPjqWrQ4gwtuM77pTk7K4CsPxXss8he4= github.com/elastic/go-libaudit/v2 v2.1.0/go.mod h1:MM/l/4xV7ilcl+cIblL8Zn448J7RZaDwgNLE4gNKYPg= github.com/elastic/go-licenser v0.3.1 h1:RmRukU/JUmts+rpexAw0Fvt2ly7VVu6mw8z4HrEzObU= diff --git a/internal/pkg/apikey/auth.go b/internal/pkg/apikey/auth.go index 1f2da7ca1..f80c32600 100644 --- a/internal/pkg/apikey/auth.go +++ b/internal/pkg/apikey/auth.go @@ -9,8 +9,8 @@ import ( "encoding/json" "fmt" - "github.com/elastic/go-elasticsearch/v8" - "github.com/elastic/go-elasticsearch/v8/esapi" + "github.com/elastic/go-elasticsearch/v7" + "github.com/elastic/go-elasticsearch/v7/esapi" ) type SecurityInfo struct { diff --git a/internal/pkg/apikey/create.go b/internal/pkg/apikey/create.go index 52f9c512f..dceef524c 100644 --- a/internal/pkg/apikey/create.go +++ b/internal/pkg/apikey/create.go @@ -10,8 +10,8 @@ import ( "encoding/json" "fmt" - "github.com/elastic/go-elasticsearch/v8" - "github.com/elastic/go-elasticsearch/v8/esapi" + "github.com/elastic/go-elasticsearch/v7" + "github.com/elastic/go-elasticsearch/v7/esapi" ) func Create(ctx context.Context, client *elasticsearch.Client, name, ttl string, roles []byte, meta interface{}) (*ApiKey, error) { diff --git a/internal/pkg/apikey/get.go b/internal/pkg/apikey/get.go index 7230dcd95..2414ecb40 100644 --- a/internal/pkg/apikey/get.go +++ b/internal/pkg/apikey/get.go @@ -9,8 +9,8 @@ import ( "encoding/json" "fmt" - "github.com/elastic/go-elasticsearch/v8" - "github.com/elastic/go-elasticsearch/v8/esapi" + "github.com/elastic/go-elasticsearch/v7" + "github.com/elastic/go-elasticsearch/v7/esapi" ) type ApiKeyMetadata struct { diff --git a/internal/pkg/apikey/invalidate.go b/internal/pkg/apikey/invalidate.go index 3f7d9251a..8d284df03 100644 --- a/internal/pkg/apikey/invalidate.go +++ b/internal/pkg/apikey/invalidate.go @@ -10,8 +10,8 @@ import ( "encoding/json" "fmt" - "github.com/elastic/go-elasticsearch/v8" - "github.com/elastic/go-elasticsearch/v8/esapi" + "github.com/elastic/go-elasticsearch/v7" + "github.com/elastic/go-elasticsearch/v7/esapi" ) // Invalidate invalidates the provided API keys by ID. diff --git a/internal/pkg/bulk/engine.go b/internal/pkg/bulk/engine.go index 2c660bb36..1a766f6a8 100644 --- a/internal/pkg/bulk/engine.go +++ b/internal/pkg/bulk/engine.go @@ -15,8 +15,8 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/config" "github.com/elastic/fleet-server/v7/internal/pkg/es" - "github.com/elastic/go-elasticsearch/v8" - "github.com/elastic/go-elasticsearch/v8/esapi" + "github.com/elastic/go-elasticsearch/v7" + "github.com/elastic/go-elasticsearch/v7/esapi" "github.com/rs/zerolog/log" "golang.org/x/sync/semaphore" ) diff --git a/internal/pkg/bulk/helpers.go b/internal/pkg/bulk/helpers.go index 893275c50..f2659591a 100644 --- a/internal/pkg/bulk/helpers.go +++ b/internal/pkg/bulk/helpers.go @@ -8,7 +8,7 @@ import ( "encoding/json" "github.com/elastic/fleet-server/v7/internal/pkg/es" - "github.com/elastic/go-elasticsearch/v8/esapi" + "github.com/elastic/go-elasticsearch/v7/esapi" "github.com/rs/zerolog/log" ) diff --git a/internal/pkg/bulk/opBulk.go b/internal/pkg/bulk/opBulk.go index cbd9dfaf9..53dabeae7 100644 --- a/internal/pkg/bulk/opBulk.go +++ b/internal/pkg/bulk/opBulk.go @@ -10,7 +10,7 @@ import ( "fmt" "time" - "github.com/elastic/go-elasticsearch/v8/esapi" + "github.com/elastic/go-elasticsearch/v7/esapi" "github.com/mailru/easyjson" "github.com/rs/zerolog/log" ) diff --git a/internal/pkg/bulk/opRead.go b/internal/pkg/bulk/opRead.go index 3a8d04fc7..8dd599494 100644 --- a/internal/pkg/bulk/opRead.go +++ b/internal/pkg/bulk/opRead.go @@ -10,7 +10,7 @@ import ( "fmt" "time" - "github.com/elastic/go-elasticsearch/v8/esapi" + "github.com/elastic/go-elasticsearch/v7/esapi" "github.com/mailru/easyjson" "github.com/rs/zerolog/log" ) diff --git a/internal/pkg/bulk/opSearch.go b/internal/pkg/bulk/opSearch.go index c8d4f1b17..e0d7b341f 100644 --- a/internal/pkg/bulk/opSearch.go +++ b/internal/pkg/bulk/opSearch.go @@ -12,7 +12,7 @@ import ( "time" "github.com/elastic/fleet-server/v7/internal/pkg/es" - "github.com/elastic/go-elasticsearch/v8/esapi" + "github.com/elastic/go-elasticsearch/v7/esapi" "github.com/mailru/easyjson" "github.com/rs/zerolog/log" ) diff --git a/internal/pkg/config/output.go b/internal/pkg/config/output.go index 7e6e5205d..78e72ecf6 100644 --- a/internal/pkg/config/output.go +++ b/internal/pkg/config/output.go @@ -14,7 +14,7 @@ import ( "strings" "time" - "github.com/elastic/go-elasticsearch/v8" + "github.com/elastic/go-elasticsearch/v7" "github.com/elastic/beats/v7/libbeat/common" "github.com/elastic/beats/v7/libbeat/common/transport/tlscommon" diff --git a/internal/pkg/config/output_test.go b/internal/pkg/config/output_test.go index 9f604df35..f4c983743 100644 --- a/internal/pkg/config/output_test.go +++ b/internal/pkg/config/output_test.go @@ -13,7 +13,7 @@ import ( "time" "github.com/elastic/beats/v7/libbeat/common/transport/tlscommon" - "github.com/elastic/go-elasticsearch/v8" + "github.com/elastic/go-elasticsearch/v7" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" "github.com/stretchr/testify/assert" diff --git a/internal/pkg/es/client.go b/internal/pkg/es/client.go index 792700ef9..b3c6423b6 100644 --- a/internal/pkg/es/client.go +++ b/internal/pkg/es/client.go @@ -11,7 +11,7 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/config" - "github.com/elastic/go-elasticsearch/v8" + "github.com/elastic/go-elasticsearch/v7" "github.com/rs/zerolog/log" ) diff --git a/internal/pkg/es/delete.go b/internal/pkg/es/delete.go index a27cffaa4..708c5df0a 100644 --- a/internal/pkg/es/delete.go +++ b/internal/pkg/es/delete.go @@ -8,7 +8,7 @@ import ( "context" "encoding/json" - "github.com/elastic/go-elasticsearch/v8" + "github.com/elastic/go-elasticsearch/v7" ) func DeleteIndices(ctx context.Context, es *elasticsearch.Client, indices []string) error { diff --git a/internal/pkg/es/fleet_global_checkpoints.go b/internal/pkg/es/fleet_global_checkpoints.go index 9c88979a6..40133d606 100644 --- a/internal/pkg/es/fleet_global_checkpoints.go +++ b/internal/pkg/es/fleet_global_checkpoints.go @@ -12,7 +12,7 @@ import ( "time" "github.com/elastic/fleet-server/v7/internal/pkg/sqn" - "github.com/elastic/go-elasticsearch/v8/esapi" + "github.com/elastic/go-elasticsearch/v7/esapi" ) // The wrapper for the new _fleet global_checkpoints that is not the part of the diff --git a/internal/pkg/es/info.go b/internal/pkg/es/info.go index d5bf00239..7dad0c7ff 100644 --- a/internal/pkg/es/info.go +++ b/internal/pkg/es/info.go @@ -9,7 +9,7 @@ import ( "encoding/json" "strings" - "github.com/elastic/go-elasticsearch/v8" + "github.com/elastic/go-elasticsearch/v7" ) type versionInfo struct { diff --git a/internal/pkg/monitor/global_checkpoint.go b/internal/pkg/monitor/global_checkpoint.go index ef93be0a7..5d5e7c8fe 100644 --- a/internal/pkg/monitor/global_checkpoint.go +++ b/internal/pkg/monitor/global_checkpoint.go @@ -14,8 +14,8 @@ import ( esh "github.com/elastic/fleet-server/v7/internal/pkg/es" "github.com/elastic/fleet-server/v7/internal/pkg/sqn" - "github.com/elastic/go-elasticsearch/v8" - "github.com/elastic/go-elasticsearch/v8/esapi" + "github.com/elastic/go-elasticsearch/v7" + "github.com/elastic/go-elasticsearch/v7/esapi" ) var ErrGlobalCheckpoint = errors.New("global checkpoint error") diff --git a/internal/pkg/monitor/monitor.go b/internal/pkg/monitor/monitor.go index 1485ef441..0b1e7b48b 100644 --- a/internal/pkg/monitor/monitor.go +++ b/internal/pkg/monitor/monitor.go @@ -18,7 +18,7 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/sleep" "github.com/elastic/fleet-server/v7/internal/pkg/sqn" - "github.com/elastic/go-elasticsearch/v8" + "github.com/elastic/go-elasticsearch/v7" "github.com/rs/zerolog" "github.com/rs/zerolog/log" ) diff --git a/internal/pkg/monitor/subscription_monitor.go b/internal/pkg/monitor/subscription_monitor.go index f0880cebe..ec5c71821 100644 --- a/internal/pkg/monitor/subscription_monitor.go +++ b/internal/pkg/monitor/subscription_monitor.go @@ -13,7 +13,7 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/es" "github.com/elastic/fleet-server/v7/internal/pkg/sqn" - "github.com/elastic/go-elasticsearch/v8" + "github.com/elastic/go-elasticsearch/v7" "github.com/rs/zerolog/log" "golang.org/x/sync/errgroup" ) diff --git a/internal/pkg/testing/bulk.go b/internal/pkg/testing/bulk.go index afb5ab50c..5da6febdd 100644 --- a/internal/pkg/testing/bulk.go +++ b/internal/pkg/testing/bulk.go @@ -7,7 +7,7 @@ package testing import ( "context" - "github.com/elastic/go-elasticsearch/v8" + "github.com/elastic/go-elasticsearch/v7" "github.com/gofrs/uuid" "github.com/elastic/fleet-server/v7/internal/pkg/bulk" diff --git a/internal/pkg/testing/esutil/bootstrap.go b/internal/pkg/testing/esutil/bootstrap.go index 6b7c01f34..e2aafce76 100644 --- a/internal/pkg/testing/esutil/bootstrap.go +++ b/internal/pkg/testing/esutil/bootstrap.go @@ -7,7 +7,7 @@ package esutil import ( "context" - "github.com/elastic/go-elasticsearch/v8" + "github.com/elastic/go-elasticsearch/v7" ) // EnsureIndex sets up the index if it doesn't exists, utilized for integration tests at the moment diff --git a/internal/pkg/testing/esutil/datastream.go b/internal/pkg/testing/esutil/datastream.go index d31a37b78..990288921 100644 --- a/internal/pkg/testing/esutil/datastream.go +++ b/internal/pkg/testing/esutil/datastream.go @@ -10,7 +10,7 @@ import ( "errors" "fmt" - "github.com/elastic/go-elasticsearch/v8" + "github.com/elastic/go-elasticsearch/v7" "github.com/rs/zerolog/log" ) diff --git a/internal/pkg/testing/esutil/esutil.go b/internal/pkg/testing/esutil/esutil.go index 1bace262b..97ba8c715 100644 --- a/internal/pkg/testing/esutil/esutil.go +++ b/internal/pkg/testing/esutil/esutil.go @@ -12,7 +12,7 @@ import ( "net/http" "strings" - "github.com/elastic/go-elasticsearch/v8/esapi" + "github.com/elastic/go-elasticsearch/v7/esapi" "github.com/rs/zerolog/log" ) diff --git a/internal/pkg/testing/esutil/ilm.go b/internal/pkg/testing/esutil/ilm.go index 16dc28271..00f92e08c 100644 --- a/internal/pkg/testing/esutil/ilm.go +++ b/internal/pkg/testing/esutil/ilm.go @@ -12,7 +12,7 @@ import ( "strconv" "strings" - "github.com/elastic/go-elasticsearch/v8" + "github.com/elastic/go-elasticsearch/v7" "github.com/rs/zerolog" "github.com/rs/zerolog/log" ) diff --git a/internal/pkg/testing/esutil/index.go b/internal/pkg/testing/esutil/index.go index 6b61534af..2e1d9304d 100644 --- a/internal/pkg/testing/esutil/index.go +++ b/internal/pkg/testing/esutil/index.go @@ -10,7 +10,7 @@ import ( "errors" "fmt" - "github.com/elastic/go-elasticsearch/v8" + "github.com/elastic/go-elasticsearch/v7" "github.com/rs/zerolog/log" ) diff --git a/internal/pkg/testing/esutil/template.go b/internal/pkg/testing/esutil/template.go index 0873c4885..150b50c0e 100644 --- a/internal/pkg/testing/esutil/template.go +++ b/internal/pkg/testing/esutil/template.go @@ -12,7 +12,7 @@ import ( "net/http" "strings" - "github.com/elastic/go-elasticsearch/v8" + "github.com/elastic/go-elasticsearch/v7" "github.com/rs/zerolog/log" ) diff --git a/internal/pkg/ver/check.go b/internal/pkg/ver/check.go index a75962055..84a87d7f7 100644 --- a/internal/pkg/ver/check.go +++ b/internal/pkg/ver/check.go @@ -13,7 +13,7 @@ import ( esh "github.com/elastic/fleet-server/v7/internal/pkg/es" - "github.com/elastic/go-elasticsearch/v8" + "github.com/elastic/go-elasticsearch/v7" "github.com/hashicorp/go-version" "github.com/rs/zerolog/log" ) From 22bf71fd9cabc3c8b95efe132d88025073c09fc0 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Thu, 3 Jun 2021 16:25:18 +0000 Subject: [PATCH 110/240] Propagate checkin status to agent record (#427) (cherry picked from commit 73fcdb4cbc14f3cff5a612027f81d03401965e36) Co-authored-by: Sean Cunningham --- cmd/fleet/handleCheckin.go | 13 +++++++++-- cmd/fleet/schema.go | 1 + internal/pkg/checkin/bulk.go | 29 +++++++++++++---------- internal/pkg/checkin/bulk_test.go | 38 ++++++++++++++++++++++++++----- internal/pkg/dl/constants.go | 1 + 5 files changed, 62 insertions(+), 20 deletions(-) diff --git a/cmd/fleet/handleCheckin.go b/cmd/fleet/handleCheckin.go index f3a7cca3c..b490f500f 100644 --- a/cmd/fleet/handleCheckin.go +++ b/cmd/fleet/handleCheckin.go @@ -176,6 +176,15 @@ func (ct *CheckinT) _handleCheckin(w http.ResponseWriter, r *http.Request, id st return err } + log.Debug(). + Str("agentId", id). + Str("reqId", reqId). + Str("status", req.Status). + Str("seqNo", seqno.String()). + RawJSON("meta", rawMeta). + Uint64("bodyCount", readCounter.Count()). + Msg("checkin start long poll") + // Subscribe to actions dispatcher aSub := ct.ad.Subscribe(agent.Id, seqno) defer ct.ad.Unsubscribe(aSub) @@ -197,7 +206,7 @@ func (ct *CheckinT) _handleCheckin(w http.ResponseWriter, r *http.Request, id st defer longPoll.Stop() // Intial update on checkin, and any user fields that might have changed - ct.bc.CheckIn(agent.Id, rawMeta, seqno) + ct.bc.CheckIn(agent.Id, req.Status, rawMeta, seqno) // Initial fetch for pending actions var ( @@ -234,7 +243,7 @@ func (ct *CheckinT) _handleCheckin(w http.ResponseWriter, r *http.Request, id st log.Trace().Str(EcsHttpRequestId, reqId).Str("agentId", agent.Id).Msg("fire long poll") break LOOP case <-tick.C: - ct.bc.CheckIn(agent.Id, nil, nil) + ct.bc.CheckIn(agent.Id, req.Status, nil, nil) } } } diff --git a/cmd/fleet/schema.go b/cmd/fleet/schema.go index e3dde6ed8..8c28436e7 100644 --- a/cmd/fleet/schema.go +++ b/cmd/fleet/schema.go @@ -71,6 +71,7 @@ type EnrollResponse struct { } type CheckinRequest struct { + Status string `json:"status"` AckToken string `json:"ack_token,omitempty"` Events []Event `json:"events"` LocalMeta json.RawMessage `json:"local_metadata"` diff --git a/internal/pkg/checkin/bulk.go b/internal/pkg/checkin/bulk.go index 8f20d0294..ec9bdc937 100644 --- a/internal/pkg/checkin/bulk.go +++ b/internal/pkg/checkin/bulk.go @@ -40,8 +40,9 @@ type extraT struct { // There will be 10's of thousands of items // in the map at any point. type pendingT struct { - ts string - extra *extraT + ts string + status string + extra *extraT } type Bulk struct { @@ -93,7 +94,7 @@ func (bc *Bulk) timestamp() string { // WARNING: Bulk will take ownership of fields, // so do not use after passing in. -func (bc *Bulk) CheckIn(id string, meta []byte, seqno sqn.SeqNo) error { +func (bc *Bulk) CheckIn(id string, status string, meta []byte, seqno sqn.SeqNo) error { // Separate out the extra data to minimize // the memory footprint of the 90% case of just @@ -109,8 +110,9 @@ func (bc *Bulk) CheckIn(id string, meta []byte, seqno sqn.SeqNo) error { bc.mut.Lock() bc.pending[id] = pendingT{ - ts: bc.timestamp(), - extra: extra, + ts: bc.timestamp(), + status: status, + extra: extra, } bc.mut.Unlock() @@ -155,7 +157,7 @@ func (bc *Bulk) flush(ctx context.Context) error { updates := make([]bulk.MultiOp, 0, len(pending)) - simpleCache := make(map[string][]byte) + simpleCache := make(map[pendingT][]byte) nowTimestamp := start.UTC().Format(time.RFC3339) @@ -168,23 +170,26 @@ func (bc *Bulk) flush(ctx context.Context) error { // JSON body containing just the timestamp updates. var body []byte if pendingData.extra == nil { + var ok bool - body, ok = simpleCache[pendingData.ts] + body, ok = simpleCache[pendingData] if !ok { fields := bulk.UpdateFields{ - dl.FieldLastCheckin: pendingData.ts, - dl.FieldUpdatedAt: nowTimestamp, + dl.FieldLastCheckin: pendingData.ts, + dl.FieldUpdatedAt: nowTimestamp, + dl.FieldLastCheckinStatus: pendingData.status, } if body, err = fields.Marshal(); err != nil { return err } - simpleCache[pendingData.ts] = body + simpleCache[pendingData] = body } } else { fields := bulk.UpdateFields{ - dl.FieldLastCheckin: pendingData.ts, // Set the checkin timestamp - dl.FieldUpdatedAt: nowTimestamp, // Set "updated_at" to the current timestamp + dl.FieldLastCheckin: pendingData.ts, // Set the checkin timestamp + dl.FieldUpdatedAt: nowTimestamp, // Set "updated_at" to the current timestamp + dl.FieldLastCheckinStatus: pendingData.status, // Set the pending status } // Update local metadata if provided diff --git a/internal/pkg/checkin/bulk_test.go b/internal/pkg/checkin/bulk_test.go index fc881e16a..897450242 100644 --- a/internal/pkg/checkin/bulk_test.go +++ b/internal/pkg/checkin/bulk_test.go @@ -44,53 +44,74 @@ func TestBulkSimple(t *testing.T) { bc := NewBulk(&mockBulk) cases := []struct { - desc string - id string - meta []byte - seqno sqn.SeqNo + desc string + id string + status string + meta []byte + seqno sqn.SeqNo }{ { "Simple case", "simpleId", + "online", nil, nil, }, { "Singled field case", "singleFieldId", + "online", []byte(`{"hey":"now"}`), nil, }, { "Multi field case", "multiFieldId", + "online", []byte(`{"hey":"now","brown":"cow"}`), nil, }, { "Multi field nested case", "multiFieldNestedId", + "online", []byte(`{"hey":"now","wee":{"little":"doggie"}}`), nil, }, { "Simple case with seqNo", "simpleseqno", + "online", nil, sqn.SeqNo{1, 2, 3, 4}, }, { "Field case with seqNo", "simpleseqno", + "online", []byte(`{"uncle":"fester"}`), sqn.SeqNo{5, 6, 7, 8}, }, + { + "Unusual status", + "singleFieldId", + "unusual", + nil, + nil, + }, + { + "Empty status", + "singleFieldId", + "", + nil, + nil, + }, } for _, c := range cases { t.Run(c.desc, func(t *testing.T) { - if err := bc.CheckIn(c.id, c.meta, c.seqno); err != nil { + if err := bc.CheckIn(c.id, c.status, c.meta, c.seqno); err != nil { t.Fatal(err) } @@ -117,6 +138,7 @@ func TestBulkSimple(t *testing.T) { type updateT struct { LastCheckin string `json:"last_checkin"` + Status string `json:"last_checkin_status"` UpdatedAt string `json:"updated_at"` Meta json.RawMessage `json:"local_metadata"` SeqNo sqn.SeqNo `json:"action_seq_no"` @@ -145,6 +167,10 @@ func TestBulkSimple(t *testing.T) { t.Error("meta doesn't match up") } + if c.status != sub.Status { + t.Error("status mismatch") + } + }) } } @@ -179,7 +205,7 @@ func benchmarkBulk(n int, flush bool, b *testing.B) { for i := 0; i < b.N; i++ { for _, id := range ids { - err := bc.CheckIn(id, nil, nil) + err := bc.CheckIn(id, "", nil, nil) if err != nil { b.Fatal(err) } diff --git a/internal/pkg/dl/constants.go b/internal/pkg/dl/constants.go index 7d0d6df99..80811717f 100644 --- a/internal/pkg/dl/constants.go +++ b/internal/pkg/dl/constants.go @@ -32,6 +32,7 @@ const ( FieldRevisionIdx = "revision_idx" FieldCoordinatorIdx = "coordinator_idx" FieldLastCheckin = "last_checkin" + FieldLastCheckinStatus = "last_checkin_status" FieldLocalMetadata = "local_metadata" FieldPolicyRevisionIdx = "policy_revision_idx" FieldPolicyCoordinatorIdx = "policy_coordinator_idx" From 5a1ead02a666090481fff00392644cff6eafd8bd Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Fri, 4 Jun 2021 01:15:23 -0400 Subject: [PATCH 111/240] [Automation] Update elastic stack version to 7.14.0-983e13ea for testing (#428) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 4555b21c3..b84dd27f8 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.14.0-8590217b-SNAPSHOT +ELASTICSEARCH_VERSION=7.14.0-983e13ea-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 0efdb1d2379b406f15bda38b8d67a9343bd10185 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 7 Jun 2021 01:14:53 -0400 Subject: [PATCH 112/240] [Automation] Update elastic stack version to 7.14.0-bb1ffc77 for testing (#430) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index b84dd27f8..9725e6dab 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.14.0-983e13ea-SNAPSHOT +ELASTICSEARCH_VERSION=7.14.0-bb1ffc77-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 576f9f7e7ac71b3e3d73026d7767feae2a1140b0 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Mon, 7 Jun 2021 18:04:49 +0000 Subject: [PATCH 113/240] Remove zap stub logger bleed through. (#433) (cherry picked from commit c862cabaf6e35f4c60ece5fdfbb3f7e0c6830616) Co-authored-by: Sean Cunningham --- internal/pkg/logger/ecs.go | 9 +++++++++ internal/pkg/logger/logger.go | 8 ++++---- internal/pkg/logger/zapStub.go | 34 ++++++++++++++++++++++++++-------- 3 files changed, 39 insertions(+), 12 deletions(-) diff --git a/internal/pkg/logger/ecs.go b/internal/pkg/logger/ecs.go index f50d059d2..bd8bdf32f 100644 --- a/internal/pkg/logger/ecs.go +++ b/internal/pkg/logger/ecs.go @@ -6,6 +6,15 @@ package logger const ( + // Basic logging + EcsLogLevel = "log.level" + EcsLogName = "log.logger" + EcsLogCaller = "log.origin" + EcsLogStackTrace = "log.origin.stack_trace" + EcsMessage = "message" + EcsTimestamp = "@timestamp" + EcsErrorMessage = "error.message" + // HTTP EcsHttpVersion = "http.version" EcsHttpRequestId = "http.request.id" diff --git a/internal/pkg/logger/logger.go b/internal/pkg/logger/logger.go index 420b08a6a..e39311b26 100644 --- a/internal/pkg/logger/logger.go +++ b/internal/pkg/logger/logger.go @@ -79,11 +79,11 @@ func Init(cfg *config.Config) (*Logger, error) { } // override the field names for ECS - zerolog.LevelFieldName = "log.level" - zerolog.ErrorFieldName = "error.message" - zerolog.MessageFieldName = "message" + zerolog.LevelFieldName = EcsLogLevel + zerolog.ErrorFieldName = EcsErrorMessage + zerolog.MessageFieldName = EcsMessage zerolog.TimeFieldFormat = "2006-01-02T15:04:05.999Z" // RFC3339 at millisecond resolution in zulu timezone - zerolog.TimestampFieldName = "@timestamp" + zerolog.TimestampFieldName = EcsTimestamp if !cfg.Logging.Pretty || !cfg.Logging.ToStderr { zerolog.TimestampFunc = func() time.Time { return time.Now().UTC() } diff --git a/internal/pkg/logger/zapStub.go b/internal/pkg/logger/zapStub.go index b64d02785..4a3d3c1bd 100644 --- a/internal/pkg/logger/zapStub.go +++ b/internal/pkg/logger/zapStub.go @@ -5,6 +5,8 @@ package logger import ( + "encoding/json" + "github.com/elastic/beats/v7/libbeat/logp" "github.com/rs/zerolog" "github.com/rs/zerolog/log" @@ -14,16 +16,16 @@ import ( func encoderConfig() zapcore.EncoderConfig { return zapcore.EncoderConfig{ - MessageKey: "msg", - LevelKey: "level", - NameKey: "name", - TimeKey: "ts", - CallerKey: "caller", - StacktraceKey: "stacktrace", + MessageKey: EcsMessage, + LevelKey: EcsLogLevel, + NameKey: EcsLogName, + TimeKey: EcsTimestamp, + CallerKey: EcsLogCaller, + StacktraceKey: EcsLogStackTrace, LineEnding: "\n", EncodeTime: zapcore.EpochTimeEncoder, EncodeLevel: zapcore.LowercaseLevelEncoder, - EncodeDuration: zapcore.SecondsDurationEncoder, + EncodeDuration: zapcore.NanosDurationEncoder, EncodeCaller: zapcore.ShortCallerEncoder, } } @@ -58,7 +60,23 @@ func (z zapStub) Sync() error { } func (z zapStub) Write(p []byte) (n int, err error) { - log.Log().RawJSON("zap", p).Msg("") + + // Unwrap the zap object for logging + m := make(map[string]interface{}) + if err := json.Unmarshal(p, &m); err != nil { + return 0, err + } + + ctx := log.Log() + for key, val := range m { + + // Don't dupe the timestamp, use the fleet formatted timestamp. + if key != EcsTimestamp { + ctx.Interface(key, val) + } + } + + ctx.Send() return 0, nil } From ede7640271bf728657c822d3b0f55f144240fe21 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 8 Jun 2021 10:07:49 +0000 Subject: [PATCH 114/240] Better enroll logging (#439) (cherry picked from commit 94bd972bbc93f0cf6c55eedf502cc5376d23795c) Co-authored-by: Sean Cunningham --- cmd/fleet/handleEnroll.go | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/cmd/fleet/handleEnroll.go b/cmd/fleet/handleEnroll.go index 9dc522f28..0aed0cde5 100644 --- a/cmd/fleet/handleEnroll.go +++ b/cmd/fleet/handleEnroll.go @@ -78,7 +78,12 @@ func (rt Router) handleEnroll(w http.ResponseWriter, r *http.Request, ps httprou return } - data, err := rt.et.handleEnroll(r) + enrollResponse, err := rt.et.handleEnroll(r) + + var data []byte + if err == nil { + data, err = json.Marshal(enrollResponse) + } reqId := r.Header.Get(logger.HeaderRequestID) @@ -107,17 +112,19 @@ func (rt Router) handleEnroll(w http.ResponseWriter, r *http.Request, ps httprou cntEnroll.bodyOut.Add(uint64(numWritten)) - log.Trace(). + log.Info(). Err(err). - Str(EcsHttpRequestId, reqId). - RawJSON("raw", data). Str("mod", kEnrollMod). + Str("agentId", enrollResponse.Item.ID). + Str("policyId", enrollResponse.Item.PolicyId). + Str("apiKeyId", enrollResponse.Item.AccessApiKeyId). + Str(EcsHttpRequestId, reqId). + Int(EcsHttpResponseBodyBytes, numWritten). Int64(EcsEventDuration, time.Since(start).Nanoseconds()). - Msg("handleEnroll OK") + Msg("success enroll") } -func (et *EnrollerT) handleEnroll(r *http.Request) ([]byte, error) { - +func (et *EnrollerT) handleEnroll(r *http.Request) (*EnrollResponse, error) { limitF, err := et.limit.Acquire() if err != nil { return nil, err @@ -154,12 +161,7 @@ func (et *EnrollerT) handleEnroll(r *http.Request) ([]byte, error) { cntEnroll.bodyIn.Add(readCounter.Count()) - resp, err := _enroll(r.Context(), et.bulker, et.cache, *req, *erec) - if err != nil { - return nil, err - } - - return json.Marshal(resp) + return _enroll(r.Context(), et.bulker, et.cache, *req, *erec) } func _enroll(ctx context.Context, bulker bulk.Bulk, c cache.Cache, req EnrollRequest, erec model.EnrollmentApiKey) (*EnrollResponse, error) { From 3d504d7ee8d8ba7a1c0bdcee8233ff8d85623c01 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 8 Jun 2021 10:14:42 +0000 Subject: [PATCH 115/240] [7.x](backport #437) Refactor bulk engine startup and add bulk ApiKey (#440) * Wrap ApiKey calls limiting wrapper on the bulk interface. (cherry picked from commit a629d5c49d46925520b8126650f9925a03c0bf89) * Refactor bulk init (cherry picked from commit 5ba87ef4f29354ca885e8b6acfeae29791605209) Co-authored-by: Sean Cunningham Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- cmd/fleet/auth.go | 31 ++-- cmd/fleet/handleEnroll.go | 2 +- cmd/fleet/main.go | 145 +++++++++++++++--- cmd/fleet/server.go | 2 +- .../pkg/apikey/apikey_integration_test.go | 19 ++- internal/pkg/apikey/auth.go | 4 +- internal/pkg/apikey/get.go | 11 +- internal/pkg/bulk/bulk_test.go | 5 +- internal/pkg/bulk/engine.go | 86 ++++------- internal/pkg/bulk/opApiKey.go | 51 ++++++ internal/pkg/bulk/opt.go | 49 ++++++ internal/pkg/bulk/setup_test.go | 9 +- internal/pkg/monitor/monitor.go | 5 + internal/pkg/testing/bulk.go | 16 ++ internal/pkg/testing/setup.go | 18 ++- 15 files changed, 340 insertions(+), 113 deletions(-) create mode 100644 internal/pkg/bulk/opApiKey.go diff --git a/cmd/fleet/auth.go b/cmd/fleet/auth.go index d8acbd866..95ad1321b 100644 --- a/cmd/fleet/auth.go +++ b/cmd/fleet/auth.go @@ -15,7 +15,6 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/logger" "github.com/elastic/fleet-server/v7/internal/pkg/model" - "github.com/elastic/go-elasticsearch/v7" "github.com/rs/zerolog/log" ) @@ -31,7 +30,7 @@ var ( // This authenticates that the provided API key exists and is enabled. // WARNING: This does not validate that the api key is valid for the Fleet Domain. // An additional check must be executed to validate it is not a random api key. -func authApiKey(r *http.Request, client *elasticsearch.Client, c cache.Cache) (*apikey.ApiKey, error) { +func authApiKey(r *http.Request, bulker bulk.Bulk, c cache.Cache) (*apikey.ApiKey, error) { key, err := apikey.ExtractAPIKey(r) if err != nil { @@ -46,7 +45,7 @@ func authApiKey(r *http.Request, client *elasticsearch.Client, c cache.Cache) (* start := time.Now() - info, err := key.Authenticate(r.Context(), client) + info, err := bulker.ApiKeyAuth(r.Context(), *key) if err != nil { log.Info(). @@ -87,13 +86,21 @@ func authAgent(r *http.Request, id string, bulker bulk.Bulk, c cache.Cache) (*mo start := time.Now() // authenticate - key, err := authApiKey(r, bulker.Client(), c) + key, err := authApiKey(r, bulker, c) if err != nil { return nil, err } authTime := time.Now() + if authTime.Sub(start) > time.Second { + log.Debug(). + Str("agentId", id). + Str(EcsHttpRequestId, r.Header.Get(logger.HeaderRequestID)). + Int64(EcsEventDuration, authTime.Sub(start).Nanoseconds()). + Msg("authApiKey slow") + } + agent, err := findAgentByApiKeyId(r.Context(), bulker, key.Id) if err != nil { return nil, err @@ -101,20 +108,10 @@ func authAgent(r *http.Request, id string, bulker bulk.Bulk, c cache.Cache) (*mo findTime := time.Now() - // TOOD: Remove temporary log msg to diag roundtrip speed issue on auth - if findTime.Sub(start) > time.Second*5 { - reqId := r.Header.Get(logger.HeaderRequestID) - - zlog := log.With(). + if findTime.Sub(authTime) > time.Second { + log.Debug(). Str("agentId", id). - Str(EcsHttpRequestId, reqId). - Logger() - - zlog.Debug(). - Int64(EcsEventDuration, authTime.Sub(start).Nanoseconds()). - Msg("authApiKey slow") - - zlog.Debug(). + Str(EcsHttpRequestId, r.Header.Get(logger.HeaderRequestID)). Int64(EcsEventDuration, findTime.Sub(authTime).Nanoseconds()). Msg("findAgentByApiKeyId slow") } diff --git a/cmd/fleet/handleEnroll.go b/cmd/fleet/handleEnroll.go index 0aed0cde5..2fe739cd0 100644 --- a/cmd/fleet/handleEnroll.go +++ b/cmd/fleet/handleEnroll.go @@ -131,7 +131,7 @@ func (et *EnrollerT) handleEnroll(r *http.Request) (*EnrollResponse, error) { } defer limitF() - key, err := authApiKey(r, et.bulker.Client(), et.cache) + key, err := authApiKey(r, et.bulker, et.cache) if err != nil { return nil, err } diff --git a/cmd/fleet/main.go b/cmd/fleet/main.go index 3c619da54..7c1e9fac0 100644 --- a/cmd/fleet/main.go +++ b/cmd/fleet/main.go @@ -37,6 +37,7 @@ import ( "github.com/elastic/elastic-agent-client/v7/pkg/client" "github.com/elastic/elastic-agent-client/v7/pkg/proto" "github.com/hashicorp/go-version" + "github.com/pkg/errors" "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/spf13/cobra" @@ -456,6 +457,8 @@ func (f *FleetServer) Run(ctx context.Context) error { ) started := false + +LOOP: for { ech := make(chan error, 2) @@ -466,8 +469,8 @@ func (f *FleetServer) Run(ctx context.Context) error { f.reporter.Status(proto.StateObserved_STARTING, "Starting", nil) } - // Restart profiler - if curCfg == nil || curCfg.Inputs[0].Server.Profiler.Enabled != newCfg.Inputs[0].Server.Profiler.Enabled || curCfg.Inputs[0].Server.Profiler.Bind != newCfg.Inputs[0].Server.Profiler.Bind { + // Start or restart profiler + if configChangedProfiler(curCfg, newCfg) { stop(proCancel, proEg) proEg, proCancel = nil, nil if newCfg.Inputs[0].Server.Profiler.Enabled { @@ -477,8 +480,8 @@ func (f *FleetServer) Run(ctx context.Context) error { } } - // Restart server - if curCfg == nil || curCfg.Inputs[0].Server != newCfg.Inputs[0].Server { + // Start or restart server + if configChangedServer(curCfg, newCfg) { stop(srvCancel, srvEg) srvEg, srvCancel = start(ctx, func(ctx context.Context) error { return f.runServer(ctx, newCfg) @@ -496,22 +499,75 @@ func (f *FleetServer) Run(ctx context.Context) error { return err case <-ctx.Done(): f.reporter.Status(proto.StateObserved_STOPPING, "Stopping", nil) - log.Info().Msg("Fleet Server exited") - return nil + break LOOP } } + + // Server is coming down; wait for the server group to exit cleanly. + // Timeout if something is locked up. + err := safeWait(srvEg, time.Second) + + // Eat cancel error to minimize confusion in logs + if errors.Is(err, context.Canceled) { + err = nil + } + + log.Info().Err(err).Msg("Fleet Server exited") + return err +} + +func configChangedProfiler(curCfg, newCfg *config.Config) bool { + + changed := true + + switch { + case curCfg == nil: + case curCfg.Inputs[0].Server.Profiler.Enabled != newCfg.Inputs[0].Server.Profiler.Enabled: + case curCfg.Inputs[0].Server.Profiler.Bind != newCfg.Inputs[0].Server.Profiler.Bind: + default: + changed = false + } + + return changed +} + +func configChangedServer(curCfg, newCfg *config.Config) bool { + return curCfg == nil || curCfg.Inputs[0].Server != newCfg.Inputs[0].Server +} + +func safeWait(g *errgroup.Group, to time.Duration) (err error) { + waitCh := make(chan error) + go func() { + waitCh <- g.Wait() + }() + + select { + case err = <-waitCh: + case <-time.After(to): + log.Warn().Msg("deadlock: goroutine locked up on errgroup.Wait()") + err = errors.New("Group wait timeout") + } + + return } func loggedRunFunc(ctx context.Context, tag string, runfn runFunc) func() error { return func() error { + log.Debug().Msg(tag + " started") + err := runfn(ctx) - var ev *zerolog.Event - if err != nil { - log.Error().Err(err) + + lvl := zerolog.DebugLevel + switch { + case err == nil: + case errors.Is(err, context.Canceled): + err = nil + default: + lvl = zerolog.ErrorLevel } - ev = log.Debug() - ev.Msg(tag + " exited") + + log.WithLevel(lvl).Err(err).Msg(tag + " exited") return err } } @@ -528,6 +584,16 @@ func initRuntime(cfg *config.Config) { } } +func initBulker(ctx context.Context, cfg *config.Config) (*bulk.Bulker, error) { + es, err := es.NewClient(ctx, cfg, false) + if err != nil { + return nil, err + } + + blk := bulk.NewBulker(es, bulk.BulkOptsFromCfg(cfg)...) + return blk, nil +} + func (f *FleetServer) runServer(ctx context.Context, cfg *config.Config) (err error) { initRuntime(cfg) @@ -540,16 +606,60 @@ func (f *FleetServer) runServer(ctx context.Context, cfg *config.Config) (err er defer metricsServer.Stop() } - // Bulker is started in its own context and managed inside of this function. This is done so - // when the `ctx` is cancelled every worker using the bulker can get everything written on - // shutdown before the bulker is then cancelled. + // Bulker is started in its own context and managed in the scope of this function. This is done so + // when the `ctx` is cancelled, the bulker will remain executing until this function exits. + // This allows the child subsystems to continue to write to the data store while tearing down. bulkCtx, bulkCancel := context.WithCancel(context.Background()) defer bulkCancel() - esCli, bulker, err := bulk.InitES(bulkCtx, cfg) + + // Create the bulker subsystem + bulker, err := initBulker(bulkCtx, cfg) if err != nil { return err } + // Execute the bulker engine in a goroutine with its orphaned context. + // Create an error channel for the case where the bulker exits + // unexpectedly (ie. not cancelled by the bulkCancel context). + errCh := make(chan error) + + go func() { + runFunc := loggedRunFunc(bulkCtx, "Bulker", bulker.Run) + + // Emit the error from bulker.Run to the local error channel. + // The error group will be listening for it. (see comments below) + errCh <- runFunc() + }() + + // Wrap context with an error group context to manage the lifecycle + // of the subsystems. An error from any subsystem, or if the + // parent context is cancelled, will cancel the group. + // see https://pkg.go.dev/golang.org/x/sync/errgroup#Group.Go + g, ctx := errgroup.WithContext(ctx) + + // Stub a function for inclusion in the errgroup that exits when + // the bulker exits. If the bulker exits before the error group, + // this will tear down the error group and g.Wait() will return. + // Otherwise it will be a noop. + g.Go(func() (err error) { + select { + case err = <-errCh: + case <-ctx.Done(): + err = ctx.Err() + } + return + }) + + if err = f.runSubsystems(ctx, cfg, g, bulker); err != nil { + return err + } + + return g.Wait() +} + +func (f *FleetServer) runSubsystems(ctx context.Context, cfg *config.Config, g *errgroup.Group, bulker bulk.Bulk) (err error) { + esCli := bulker.Client() + // Check version compatibility with Elasticsearch err = ver.CheckCompatibility(ctx, esCli, f.ver) if err != nil { @@ -562,9 +672,6 @@ func (f *FleetServer) runServer(ctx context.Context, cfg *config.Config) (err er return err } - // Replacing to errgroup context - g, ctx := errgroup.WithContext(ctx) - // Coordinator policy monitor pim, err := monitor.New(dl.FleetPolicies, esCli, monCli, monitor.WithFetchSize(cfg.Inputs[0].Monitor.FetchSize), @@ -626,7 +733,7 @@ func (f *FleetServer) runServer(ctx context.Context, cfg *config.Config) (err er return runServer(ctx, router, &cfg.Inputs[0].Server) })) - return g.Wait() + return err } // Reload reloads the fleet server with the latest configuration. diff --git a/cmd/fleet/server.go b/cmd/fleet/server.go index a0e57503c..2f03b34e9 100644 --- a/cmd/fleet/server.go +++ b/cmd/fleet/server.go @@ -98,7 +98,7 @@ func runServer(ctx context.Context, router *httprouter.Router, cfg *config.Serve } ln = wrapConnLimitter(ctx, ln, cfg) - if err := server.Serve(ln); err != nil && err != context.Canceled { + if err := server.Serve(ln); err != nil && err != http.ErrServerClosed { return err } diff --git a/internal/pkg/apikey/apikey_integration_test.go b/internal/pkg/apikey/apikey_integration_test.go index f629f535c..bf2815105 100644 --- a/internal/pkg/apikey/apikey_integration_test.go +++ b/internal/pkg/apikey/apikey_integration_test.go @@ -11,8 +11,7 @@ import ( "errors" "testing" - ftesting "github.com/elastic/fleet-server/v7/internal/pkg/testing" - + "github.com/elastic/go-elasticsearch/v7" "github.com/gofrs/uuid" "github.com/google/go-cmp/cmp" ) @@ -34,19 +33,27 @@ func TestCreateApiKeyWithMetadata(t *testing.T) { ctx, cn := context.WithCancel(context.Background()) defer cn() - bulker := ftesting.SetupBulk(ctx, t) + cfg := elasticsearch.Config{ + Username: "elastic", + Password: "changeme", + } + + es, err := elasticsearch.NewClient(cfg) + if err != nil { + t.Fatal(err) + } // Create the key agentId := uuid.Must(uuid.NewV4()).String() name := uuid.Must(uuid.NewV4()).String() - akey, err := Create(ctx, bulker.Client(), name, "", []byte(testFleetRoles), + akey, err := Create(ctx, es, name, "", []byte(testFleetRoles), NewMetadata(agentId, TypeAccess)) if err != nil { t.Fatal(err) } // Get the key and verify that metadata was saved correctly - aKeyMeta, err := Get(ctx, bulker.Client(), akey.Id) + aKeyMeta, err := Read(ctx, es, akey.Id) if err != nil { t.Fatal(err) } @@ -72,7 +79,7 @@ func TestCreateApiKeyWithMetadata(t *testing.T) { } // Try to get the key that doesn't exists, expect ErrApiKeyNotFound - aKeyMeta, err = Get(ctx, bulker.Client(), "0000000000000") + aKeyMeta, err = Read(ctx, es, "0000000000000") if !errors.Is(err, ErrApiKeyNotFound) { t.Errorf("Unexpected error type: %v", err) } diff --git a/internal/pkg/apikey/auth.go b/internal/pkg/apikey/auth.go index f80c32600..6306320e3 100644 --- a/internal/pkg/apikey/auth.go +++ b/internal/pkg/apikey/auth.go @@ -24,9 +24,7 @@ type SecurityInfo struct { LookupRealm map[string]string `json:"lookup_realm"` } -// Kibana: -// https://github.com/elastic/kibana/blob/master/x-pack/plugins/security/server/authentication/authenticator.ts#L308 -// NOTE: Bulk request currently not available. +// Note: Prefer the bulk wrapper on this API func (k ApiKey) Authenticate(ctx context.Context, es *elasticsearch.Client) (*SecurityInfo, error) { token := fmt.Sprintf("%s%s", authPrefix, k.Token()) diff --git a/internal/pkg/apikey/get.go b/internal/pkg/apikey/get.go index 2414ecb40..a6ed039a5 100644 --- a/internal/pkg/apikey/get.go +++ b/internal/pkg/apikey/get.go @@ -18,7 +18,7 @@ type ApiKeyMetadata struct { Metadata Metadata } -func Get(ctx context.Context, client *elasticsearch.Client, id string) (apiKey ApiKeyMetadata, err error) { +func Read(ctx context.Context, client *elasticsearch.Client, id string) (apiKey *ApiKeyMetadata, err error) { opts := []func(*esapi.SecurityGetAPIKeyRequest){ client.Security.GetAPIKey.WithContext(ctx), @@ -36,7 +36,8 @@ func Get(ctx context.Context, client *elasticsearch.Client, id string) (apiKey A defer res.Body.Close() if res.IsError() { - return apiKey, fmt.Errorf("fail GetAPIKey: %s, %w", res.String(), ErrApiKeyNotFound) + err = fmt.Errorf("fail GetAPIKey: %s, %w", res.String(), ErrApiKeyNotFound) + return } type APIKeyResponse struct { @@ -59,8 +60,10 @@ func Get(ctx context.Context, client *elasticsearch.Client, id string) (apiKey A first := resp.ApiKeys[0] - return ApiKeyMetadata{ + apiKey = &ApiKeyMetadata{ Id: first.Id, Metadata: first.Metadata, - }, nil + } + + return } diff --git a/internal/pkg/bulk/bulk_test.go b/internal/pkg/bulk/bulk_test.go index f9fe9d183..e2977605b 100644 --- a/internal/pkg/bulk/bulk_test.go +++ b/internal/pkg/bulk/bulk_test.go @@ -291,15 +291,14 @@ func benchmarkMockBulk(b *testing.B, samples [][]byte) { ctx, cancelF := context.WithCancel(context.Background()) defer cancelF() - bulker := NewBulker(mock) - n := len(samples) + bulker := NewBulker(mock, WithFlushThresholdCount(n)) var waitBulker sync.WaitGroup waitBulker.Add(1) go func() { defer waitBulker.Done() - if err := bulker.Run(ctx, WithFlushThresholdCount(n)); err != context.Canceled { + if err := bulker.Run(ctx); err != context.Canceled { b.Error(err) } }() diff --git a/internal/pkg/bulk/engine.go b/internal/pkg/bulk/engine.go index 1a766f6a8..c574ab460 100644 --- a/internal/pkg/bulk/engine.go +++ b/internal/pkg/bulk/engine.go @@ -12,7 +12,7 @@ import ( "sync" "time" - "github.com/elastic/fleet-server/v7/internal/pkg/config" + "github.com/elastic/fleet-server/v7/internal/pkg/apikey" "github.com/elastic/fleet-server/v7/internal/pkg/es" "github.com/elastic/go-elasticsearch/v7" @@ -21,6 +21,10 @@ import ( "golang.org/x/sync/semaphore" ) +type ApiKey = apikey.ApiKey +type SecurityInfo = apikey.SecurityInfo +type ApiKeyMetadata = apikey.ApiKeyMetadata + var ( ErrNoQuotes = errors.New("quoted literal not supported") ) @@ -47,6 +51,12 @@ type Bulk interface { MUpdate(ctx context.Context, ops []MultiOp, opts ...Opt) ([]BulkIndexerResponseItem, error) MDelete(ctx context.Context, ops []MultiOp, opts ...Opt) ([]BulkIndexerResponseItem, error) + // APIKey operations + ApiKeyCreate(ctx context.Context, name, ttl string, roles []byte, meta interface{}) (*ApiKey, error) + ApiKeyRead(ctx context.Context, id string) (*ApiKeyMetadata, error) + ApiKeyAuth(ctx context.Context, key ApiKey) (*SecurityInfo, error) + ApiKeyInvalidate(ctx context.Context, ids ...string) error + // Accessor used to talk to elastic search direcly bypassing bulk engine Client() *elasticsearch.Client } @@ -54,10 +64,11 @@ type Bulk interface { const kModBulk = "bulk" type Bulker struct { - es esapi.Transport - ch chan *bulkT - - blkPool sync.Pool + es esapi.Transport + ch chan *bulkT + opts bulkOptT + blkPool sync.Pool + apikeyLimit *semaphore.Weighted } const ( @@ -66,43 +77,23 @@ const ( defaultFlushThresholdSz = 1024 * 1024 * 10 defaultMaxPending = 32 defaultBlockQueueSz = 32 // Small capacity to allow multiOp to spin fast + defaultApiKeyMaxParallel = 32 ) -func InitES(ctx context.Context, cfg *config.Config, opts ...BulkOpt) (*elasticsearch.Client, Bulk, error) { - - es, err := es.NewClient(ctx, cfg, false) - if err != nil { - return nil, nil, err - } - - // Options specified on API should override config - nopts := []BulkOpt{ - WithFlushInterval(cfg.Output.Elasticsearch.BulkFlushInterval), - WithFlushThresholdCount(cfg.Output.Elasticsearch.BulkFlushThresholdCount), - WithFlushThresholdSize(cfg.Output.Elasticsearch.BulkFlushThresholdSize), - WithMaxPending(cfg.Output.Elasticsearch.BulkFlushMaxPending), - } - nopts = append(nopts, opts...) +func NewBulker(es esapi.Transport, opts ...BulkOpt) *Bulker { - blk := NewBulker(es) - go func() { - err := blk.Run(ctx, nopts...) - log.Info().Err(err).Msg("Bulker exit") - }() - - return es, blk, nil -} - -func NewBulker(es esapi.Transport) *Bulker { + bopts := parseBulkOpts(opts...) poolFunc := func() interface{} { return &bulkT{ch: make(chan respT, 1)} } return &Bulker{ - es: es, - ch: make(chan *bulkT, defaultBlockQueueSz), - blkPool: sync.Pool{New: poolFunc}, + opts: bopts, + es: es, + ch: make(chan *bulkT, bopts.blockQueueSz), + blkPool: sync.Pool{New: poolFunc}, + apikeyLimit: semaphore.NewWeighted(int64(bopts.apikeyMaxParallel)), } } @@ -114,21 +105,6 @@ func (b *Bulker) Client() *elasticsearch.Client { return client } -func (b *Bulker) parseBulkOpts(opts ...BulkOpt) bulkOptT { - bopt := bulkOptT{ - flushInterval: defaultFlushInterval, - flushThresholdCnt: defaultFlushThresholdCnt, - flushThresholdSz: defaultFlushThresholdSz, - maxPending: defaultMaxPending, - } - - for _, f := range opts { - f(&bopt) - } - - return bopt -} - // Stop timer, but don't stall on channel. // API doesn't not seem to work as specified. func stopTimer(t *time.Timer) { @@ -163,19 +139,17 @@ func blkToQueueType(blk *bulkT) queueType { return queueIdx } -func (b *Bulker) Run(ctx context.Context, opts ...BulkOpt) error { +func (b *Bulker) Run(ctx context.Context) error { var err error - bopts := b.parseBulkOpts(opts...) - - log.Info().Interface("opts", &bopts).Msg("Run bulker with options") + log.Info().Interface("opts", &b.opts).Msg("Run bulker with options") // Create timer in stopped state - timer := time.NewTimer(bopts.flushInterval) + timer := time.NewTimer(b.opts.flushInterval) stopTimer(timer) defer timer.Stop() - w := semaphore.NewWeighted(int64(bopts.maxPending)) + w := semaphore.NewWeighted(int64(b.opts.maxPending)) var queues [kNumQueues]queueT @@ -235,11 +209,11 @@ func (b *Bulker) Run(ctx context.Context, opts ...BulkOpt) error { // Start timer on first queued item if itemCnt == 1 { - timer.Reset(bopts.flushInterval) + timer.Reset(b.opts.flushInterval) } // Threshold test, short circuit timer on pending count - if itemCnt >= bopts.flushThresholdCnt || byteCnt >= bopts.flushThresholdSz { + if itemCnt >= b.opts.flushThresholdCnt || byteCnt >= b.opts.flushThresholdSz { log.Trace(). Str("mod", kModBulk). Int("itemCnt", itemCnt). diff --git a/internal/pkg/bulk/opApiKey.go b/internal/pkg/bulk/opApiKey.go new file mode 100644 index 000000000..690dcc895 --- /dev/null +++ b/internal/pkg/bulk/opApiKey.go @@ -0,0 +1,51 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package bulk + +import ( + "context" + + "github.com/elastic/fleet-server/v7/internal/pkg/apikey" +) + +// The ApiKey API's are not yet bulk enabled. Stub the calls in the bulker +// and limit parallel access to prevent many requests from overloading +// the connection pool in the elastic search client. + +func (b *Bulker) ApiKeyAuth(ctx context.Context, key ApiKey) (*SecurityInfo, error) { + if err := b.apikeyLimit.Acquire(ctx, 1); err != nil { + return nil, err + } + defer b.apikeyLimit.Release(1) + + return key.Authenticate(ctx, b.Client()) +} + +func (b *Bulker) ApiKeyCreate(ctx context.Context, name, ttl string, roles []byte, meta interface{}) (*ApiKey, error) { + if err := b.apikeyLimit.Acquire(ctx, 1); err != nil { + return nil, err + } + defer b.apikeyLimit.Release(1) + + return apikey.Create(ctx, b.Client(), name, ttl, roles, meta) +} + +func (b *Bulker) ApiKeyRead(ctx context.Context, id string) (*ApiKeyMetadata, error) { + if err := b.apikeyLimit.Acquire(ctx, 1); err != nil { + return nil, err + } + defer b.apikeyLimit.Release(1) + + return apikey.Read(ctx, b.Client(), id) +} + +func (b *Bulker) ApiKeyInvalidate(ctx context.Context, ids ...string) error { + if err := b.apikeyLimit.Acquire(ctx, 1); err != nil { + return err + } + defer b.apikeyLimit.Release(1) + + return apikey.Invalidate(ctx, b.Client(), ids...) +} diff --git a/internal/pkg/bulk/opt.go b/internal/pkg/bulk/opt.go index f1925390a..30b1f8d62 100644 --- a/internal/pkg/bulk/opt.go +++ b/internal/pkg/bulk/opt.go @@ -8,6 +8,8 @@ import ( "github.com/rs/zerolog" "strconv" "time" + + "github.com/elastic/fleet-server/v7/internal/pkg/config" ) //----- @@ -48,6 +50,8 @@ type bulkOptT struct { flushThresholdCnt int flushThresholdSz int maxPending int + blockQueueSz int + apikeyMaxParallel int } type BulkOpt func(*bulkOptT) @@ -80,9 +84,54 @@ func WithMaxPending(max int) BulkOpt { } } +// Size of internal block queue (ie. channel) +func WithBlockQueueSize(sz int) BulkOpt { + return func(opt *bulkOptT) { + opt.blockQueueSz = sz + } +} + +// Max number of api key operations outstanding +func WithApiKeyMaxParallel(max int) BulkOpt { + return func(opt *bulkOptT) { + opt.apikeyMaxParallel = max + } +} + +func parseBulkOpts(opts ...BulkOpt) bulkOptT { + bopt := bulkOptT{ + flushInterval: defaultFlushInterval, + flushThresholdCnt: defaultFlushThresholdCnt, + flushThresholdSz: defaultFlushThresholdSz, + maxPending: defaultMaxPending, + apikeyMaxParallel: defaultApiKeyMaxParallel, + blockQueueSz: defaultBlockQueueSz, + } + + for _, f := range opts { + f(&bopt) + } + + return bopt +} + func (o *bulkOptT) MarshalZerologObject(e *zerolog.Event) { e.Dur("flushInterval", o.flushInterval) e.Int("flushThresholdCnt", o.flushThresholdCnt) e.Int("flushThresholdSz", o.flushThresholdSz) e.Int("maxPending", o.maxPending) + e.Int("blockQueueSz", o.blockQueueSz) + e.Int("apikeyMaxParallel", o.apikeyMaxParallel) +} + +// Bridge to configuration subsystem +func BulkOptsFromCfg(cfg *config.Config) []BulkOpt { + + return []BulkOpt{ + WithFlushInterval(cfg.Output.Elasticsearch.BulkFlushInterval), + WithFlushThresholdCount(cfg.Output.Elasticsearch.BulkFlushThresholdCount), + WithFlushThresholdSize(cfg.Output.Elasticsearch.BulkFlushThresholdSize), + WithMaxPending(cfg.Output.Elasticsearch.BulkFlushMaxPending), + WithApiKeyMaxParallel(cfg.Output.Elasticsearch.MaxConnPerHost - cfg.Output.Elasticsearch.BulkFlushMaxPending), + } } diff --git a/internal/pkg/bulk/setup_test.go b/internal/pkg/bulk/setup_test.go index 3c0f4504b..ff94da5ee 100644 --- a/internal/pkg/bulk/setup_test.go +++ b/internal/pkg/bulk/setup_test.go @@ -115,10 +115,17 @@ func init() { func SetupBulk(ctx context.Context, t testing.TB, opts ...BulkOpt) Bulk { t.Helper() - _, bulker, err := InitES(ctx, &defaultCfg, opts...) + + cli, err := es.NewClient(ctx, &defaultCfg, false) if err != nil { t.Fatal(err) } + + opts = append(opts, BulkOptsFromCfg(&defaultCfg)...) + + bulker := NewBulker(cli, opts...) + go bulker.Run(ctx) + return bulker } diff --git a/internal/pkg/monitor/monitor.go b/internal/pkg/monitor/monitor.go index 0b1e7b48b..b1c53b5cd 100644 --- a/internal/pkg/monitor/monitor.go +++ b/internal/pkg/monitor/monitor.go @@ -205,6 +205,9 @@ func (m *simpleMonitorT) loadCheckpoint() sqn.SeqNo { func (m *simpleMonitorT) Run(ctx context.Context) (err error) { m.log.Info().Msg("start") defer func() { + if err == context.Canceled { + err = nil + } m.log.Info().Err(err).Msg("exited") }() @@ -242,6 +245,8 @@ func (m *simpleMonitorT) Run(ctx context.Context) (err error) { // Timed out, wait again m.log.Debug().Msg("timeout on global checkpoints advance, poll again") continue + } else if errors.Is(err, context.Canceled) { + m.log.Info().Msg("context closed waiting for global checkpoints advance") } else { // Log the error and keep trying m.log.Info().Err(err).Msg("failed on waiting for global checkpoints advance") diff --git a/internal/pkg/testing/bulk.go b/internal/pkg/testing/bulk.go index 5da6febdd..c39bddded 100644 --- a/internal/pkg/testing/bulk.go +++ b/internal/pkg/testing/bulk.go @@ -70,4 +70,20 @@ func (m MockBulk) Client() *elasticsearch.Client { return nil } +func (m MockBulk) ApiKeyCreate(ctx context.Context, name, ttl string, roles []byte, meta interface{}) (*bulk.ApiKey, error) { + return nil, nil +} + +func (m MockBulk) ApiKeyRead(ctx context.Context, id string) (*bulk.ApiKeyMetadata, error) { + return nil, nil +} + +func (m MockBulk) ApiKeyAuth(ctx context.Context, key bulk.ApiKey) (*bulk.SecurityInfo, error) { + return nil, nil +} + +func (m MockBulk) ApiKeyInvalidate(ctx context.Context, ids ...string) error { + return nil +} + var _ bulk.Bulk = (*MockBulk)(nil) diff --git a/internal/pkg/testing/setup.go b/internal/pkg/testing/setup.go index 03acdc368..249aed8e7 100644 --- a/internal/pkg/testing/setup.go +++ b/internal/pkg/testing/setup.go @@ -10,11 +10,13 @@ import ( "context" "testing" + "github.com/elastic/go-elasticsearch/v7" "github.com/elastic/go-ucfg/yaml" "github.com/rs/xid" "github.com/elastic/fleet-server/v7/internal/pkg/bulk" "github.com/elastic/fleet-server/v7/internal/pkg/config" + "github.com/elastic/fleet-server/v7/internal/pkg/es" "github.com/elastic/fleet-server/v7/internal/pkg/testing/esutil" ) @@ -41,12 +43,24 @@ func init() { } } -func SetupBulk(ctx context.Context, t *testing.T, opts ...bulk.BulkOpt) bulk.Bulk { +func SetupES(ctx context.Context, t *testing.T) *elasticsearch.Client { t.Helper() - _, bulker, err := bulk.InitES(ctx, &defaultCfg, opts...) + + cli, err := es.NewClient(ctx, &defaultCfg, false) if err != nil { t.Fatal(err) } + + return cli +} + +func SetupBulk(ctx context.Context, t *testing.T, opts ...bulk.BulkOpt) bulk.Bulk { + t.Helper() + + cli := SetupES(ctx, t) + opts = append(opts, bulk.BulkOptsFromCfg(&defaultCfg)...) + bulker := bulk.NewBulker(cli, opts...) + go bulker.Run(ctx) return bulker } From bf2b232bc00839504bde18f33c424612507f29e2 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 8 Jun 2021 10:24:36 +0000 Subject: [PATCH 116/240] Tweak server timeouts. Limit body size to defend malicious agent (#441) (cherry picked from commit ad69880648b199dc060d29f2ebc4134bd66279e3) Co-authored-by: Sean Cunningham Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- cmd/fleet/error.go | 10 ++++++++++ cmd/fleet/handleAck.go | 11 +++++++++- cmd/fleet/handleCheckin.go | 11 +++++++--- cmd/fleet/handleEnroll.go | 16 +++++++++++---- cmd/fleet/server.go | 20 +++++++++++-------- internal/pkg/config/config_test.go | 32 ++++++++++++++++++++++++------ internal/pkg/config/input.go | 31 ++++++++++++++++++++++++++++- internal/pkg/config/limits.go | 4 ++++ 8 files changed, 112 insertions(+), 23 deletions(-) diff --git a/cmd/fleet/error.go b/cmd/fleet/error.go index 5f92fe15e..201b53b25 100644 --- a/cmd/fleet/error.go +++ b/cmd/fleet/error.go @@ -8,6 +8,7 @@ import ( "context" "encoding/json" "net/http" + "os" "github.com/elastic/fleet-server/v7/internal/pkg/dl" "github.com/elastic/fleet-server/v7/internal/pkg/limit" @@ -119,6 +120,15 @@ func NewErrorResp(err error) errResp { zerolog.DebugLevel, }, }, + { + os.ErrDeadlineExceeded, + errResp{ + http.StatusRequestTimeout, + "RequestTimeout", + "timeout on request", + zerolog.InfoLevel, + }, + }, } for _, e := range errTable { diff --git a/cmd/fleet/handleAck.go b/cmd/fleet/handleAck.go index 3e0d5b11b..875757a86 100644 --- a/cmd/fleet/handleAck.go +++ b/cmd/fleet/handleAck.go @@ -32,6 +32,7 @@ import ( var ErrEventAgentIdMismatch = errors.New("event agentId mismatch") type AckT struct { + cfg *config.Server limit *limit.Limiter bulk bulk.Bulk cache cache.Cache @@ -43,6 +44,7 @@ func NewAckT(cfg *config.Server, bulker bulk.Bulk, cache cache.Cache) *AckT { Msg("Ack install limits") return &AckT{ + cfg: cfg, bulk: bulker, cache: cache, limit: limit.NewLimiter(&cfg.Limits.AckLimit), @@ -89,7 +91,14 @@ func (ack AckT) handleAcks(w http.ResponseWriter, r *http.Request, id string) er dfunc := cntAcks.IncStart() defer dfunc() - raw, err := ioutil.ReadAll(r.Body) + body := r.Body + + // Limit the size of the body to prevent malicious agent from exhausting RAM in server + if ack.cfg.Limits.AckLimit.MaxBody > 0 { + body = http.MaxBytesReader(w, body, ack.cfg.Limits.AckLimit.MaxBody) + } + + raw, err := ioutil.ReadAll(body) if err != nil { return errors.Wrap(err, "handleAcks read body") } diff --git a/cmd/fleet/handleCheckin.go b/cmd/fleet/handleCheckin.go index b490f500f..c64036b3e 100644 --- a/cmd/fleet/handleCheckin.go +++ b/cmd/fleet/handleCheckin.go @@ -153,8 +153,14 @@ func (ct *CheckinT) _handleCheckin(w http.ResponseWriter, r *http.Request, id st ctx := r.Context() - // Interpret request; TODO: defend overflow, slow roll - readCounter := datacounter.NewReaderCounter(r.Body) + body := r.Body + + // Limit the size of the body to prevent malicious agent from exhausting RAM in server + if ct.cfg.Limits.CheckinLimit.MaxBody > 0 { + body = http.MaxBytesReader(w, body, ct.cfg.Limits.CheckinLimit.MaxBody) + } + + readCounter := datacounter.NewReaderCounter(body) var req CheckinRequest decoder := json.NewDecoder(readCounter) @@ -181,7 +187,6 @@ func (ct *CheckinT) _handleCheckin(w http.ResponseWriter, r *http.Request, id st Str("reqId", reqId). Str("status", req.Status). Str("seqNo", seqno.String()). - RawJSON("meta", rawMeta). Uint64("bodyCount", readCounter.Count()). Msg("checkin start long poll") diff --git a/cmd/fleet/handleEnroll.go b/cmd/fleet/handleEnroll.go index 2fe739cd0..3ae8d961f 100644 --- a/cmd/fleet/handleEnroll.go +++ b/cmd/fleet/handleEnroll.go @@ -49,6 +49,7 @@ var ( type EnrollerT struct { verCon version.Constraints + cfg *config.Server bulker bulk.Bulk cache cache.Cache limit *limit.Limiter @@ -62,6 +63,7 @@ func NewEnrollerT(verCon version.Constraints, cfg *config.Server, bulker bulk.Bu return &EnrollerT{ verCon: verCon, + cfg: cfg, limit: limit.NewLimiter(&cfg.Limits.EnrollLimit), bulker: bulker, cache: c, @@ -78,7 +80,7 @@ func (rt Router) handleEnroll(w http.ResponseWriter, r *http.Request, ps httprou return } - enrollResponse, err := rt.et.handleEnroll(r) + enrollResponse, err := rt.et.handleEnroll(w, r) var data []byte if err == nil { @@ -124,7 +126,7 @@ func (rt Router) handleEnroll(w http.ResponseWriter, r *http.Request, ps httprou Msg("success enroll") } -func (et *EnrollerT) handleEnroll(r *http.Request) (*EnrollResponse, error) { +func (et *EnrollerT) handleEnroll(w http.ResponseWriter, r *http.Request) (*EnrollResponse, error) { limitF, err := et.limit.Acquire() if err != nil { return nil, err @@ -151,7 +153,14 @@ func (et *EnrollerT) handleEnroll(r *http.Request) (*EnrollResponse, error) { return nil, err } - readCounter := datacounter.NewReaderCounter(r.Body) + body := r.Body + + // Limit the size of the body to prevent malicious agent from exhausting RAM in server + if et.cfg.Limits.EnrollLimit.MaxBody > 0 { + body = http.MaxBytesReader(w, body, et.cfg.Limits.EnrollLimit.MaxBody) + } + + readCounter := datacounter.NewReaderCounter(body) // Parse the request body req, err := decodeEnrollRequest(readCounter) @@ -337,7 +346,6 @@ func (et *EnrollerT) fetchEnrollmentKeyRecord(ctx context.Context, id string) (* func decodeEnrollRequest(data io.Reader) (*EnrollRequest, error) { - // TODO: defend overflow, slow roll var req EnrollRequest decoder := json.NewDecoder(data) if err := decoder.Decode(&req); err != nil { diff --git a/cmd/fleet/server.go b/cmd/fleet/server.go index 2f03b34e9..bc2ee3de1 100644 --- a/cmd/fleet/server.go +++ b/cmd/fleet/server.go @@ -43,6 +43,8 @@ func runServer(ctx context.Context, router *httprouter.Router, cfg *config.Serve addr := cfg.BindAddress() rdto := cfg.Timeouts.Read wrto := cfg.Timeouts.Write + idle := cfg.Timeouts.Idle + rdhr := cfg.Timeouts.ReadHeader mhbz := cfg.Limits.MaxHeaderByteSize bctx := func(net.Listener) context.Context { return ctx } @@ -53,14 +55,16 @@ func runServer(ctx context.Context, router *httprouter.Router, cfg *config.Serve Msg("server listening") server := http.Server{ - Addr: addr, - ReadTimeout: rdto, - WriteTimeout: wrto, - Handler: router, - BaseContext: bctx, - ConnState: diagConn, - MaxHeaderBytes: mhbz, - ErrorLog: errLogger(), + Addr: addr, + ReadTimeout: rdto, + WriteTimeout: wrto, + IdleTimeout: idle, + ReadHeaderTimeout: rdhr, + Handler: router, + BaseContext: bctx, + ConnState: diagConn, + MaxHeaderBytes: mhbz, + ErrorLog: errLogger(), } forceCh := make(chan struct{}) diff --git a/internal/pkg/config/config_test.go b/internal/pkg/config/config_test.go index c7c00bad3..7865f0f18 100644 --- a/internal/pkg/config/config_test.go +++ b/internal/pkg/config/config_test.go @@ -52,8 +52,10 @@ func TestConfig(t *testing.T) { Host: kDefaultHost, Port: kDefaultPort, Timeouts: ServerTimeouts{ - Read: 5 * time.Second, - Write: 60 * 10 * time.Second, + Read: 60 * time.Second, + ReadHeader: 5 * time.Second, + Idle: 30 * time.Second, + Write: 10 * time.Minute, CheckinTimestamp: 30 * time.Second, CheckinLongPoll: 5 * time.Minute, }, @@ -70,6 +72,7 @@ func TestConfig(t *testing.T) { CheckinLimit: Limit{ Interval: time.Millisecond, Burst: 1000, + MaxBody: 1048576, }, ArtifactLimit: Limit{ Interval: time.Millisecond * 5, @@ -80,11 +83,13 @@ func TestConfig(t *testing.T) { Interval: time.Millisecond * 10, Burst: 100, Max: 50, + MaxBody: 524288, }, AckLimit: Limit{ Interval: time.Millisecond * 10, Burst: 100, Max: 50, + MaxBody: 2097152, }, }, }, @@ -142,8 +147,10 @@ func TestConfig(t *testing.T) { Host: kDefaultHost, Port: kDefaultPort, Timeouts: ServerTimeouts{ - Read: 5 * time.Second, - Write: 60 * 10 * time.Second, + Read: 60 * time.Second, + ReadHeader: 5 * time.Second, + Idle: 30 * time.Second, + Write: 10 * time.Minute, CheckinTimestamp: 30 * time.Second, CheckinLongPoll: 5 * time.Minute, }, @@ -160,6 +167,7 @@ func TestConfig(t *testing.T) { CheckinLimit: Limit{ Interval: time.Millisecond, Burst: 1000, + MaxBody: 1048576, }, ArtifactLimit: Limit{ Interval: time.Millisecond * 5, @@ -170,11 +178,13 @@ func TestConfig(t *testing.T) { Interval: time.Millisecond * 10, Burst: 100, Max: 50, + MaxBody: 524288, }, AckLimit: Limit{ Interval: time.Millisecond * 10, Burst: 100, Max: 50, + MaxBody: 2097152, }, }, }, @@ -230,8 +240,10 @@ func TestConfig(t *testing.T) { Host: kDefaultHost, Port: kDefaultPort, Timeouts: ServerTimeouts{ - Read: 5 * time.Second, - Write: 60 * 10 * time.Second, + Read: 60 * time.Second, + ReadHeader: 5 * time.Second, + Idle: 30 * time.Second, + Write: 10 * time.Minute, CheckinTimestamp: 30 * time.Second, CheckinLongPoll: 5 * time.Minute, }, @@ -248,6 +260,7 @@ func TestConfig(t *testing.T) { CheckinLimit: Limit{ Interval: time.Millisecond, Burst: 1000, + MaxBody: 1048576, }, ArtifactLimit: Limit{ Interval: time.Millisecond * 5, @@ -258,11 +271,13 @@ func TestConfig(t *testing.T) { Interval: time.Millisecond * 10, Burst: 100, Max: 50, + MaxBody: 524288, }, AckLimit: Limit{ Interval: time.Millisecond * 10, Burst: 100, Max: 50, + MaxBody: 2097152, }, }, }, @@ -319,6 +334,8 @@ func TestConfig(t *testing.T) { Port: 8888, Timeouts: ServerTimeouts{ Read: 20 * time.Second, + ReadHeader: 5 * time.Second, + Idle: 30 * time.Second, Write: 5 * time.Second, CheckinTimestamp: 30 * time.Second, CheckinLongPoll: 5 * time.Minute, @@ -336,6 +353,7 @@ func TestConfig(t *testing.T) { CheckinLimit: Limit{ Interval: time.Millisecond, Burst: 1000, + MaxBody: 1048576, }, ArtifactLimit: Limit{ Interval: time.Millisecond * 5, @@ -346,11 +364,13 @@ func TestConfig(t *testing.T) { Interval: time.Millisecond * 10, Burst: 100, Max: 50, + MaxBody: 524288, }, AckLimit: Limit{ Interval: time.Millisecond * 10, Burst: 100, Max: 50, + MaxBody: 2097152, }, }, }, diff --git a/internal/pkg/config/input.go b/internal/pkg/config/input.go index 93d5bfd87..b7f355ecf 100644 --- a/internal/pkg/config/input.go +++ b/internal/pkg/config/input.go @@ -25,14 +25,43 @@ type Policy struct { type ServerTimeouts struct { Read time.Duration `config:"read"` Write time.Duration `config:"write"` + Idle time.Duration `config:"idle"` + ReadHeader time.Duration `config:"read_header"` CheckinTimestamp time.Duration `config:"checkin_timestamp"` CheckinLongPoll time.Duration `config:"checkin_long_poll"` } // InitDefaults initializes the defaults for the configuration. func (c *ServerTimeouts) InitDefaults() { - c.Read = 5 * time.Second + // see https://blog.gopheracademy.com/advent-2016/exposing-go-on-the-internet/ + + // The read timeout starts on ACCEPT of the connection, and includes + // the time to read the entire body (if the body is read, otherwise to the end of the headers). + // Note that for TLS, this include the TLS handshake as well. + // In most cases, we are authenticating the apikey and doing an agent record lookup + // *before* reading the body. This is purposeful to avoid streaming data from an unauthenticated + // connection. However, the downside is that if the roundtrip to Elastic is slow, we may + // end up hitting the Read timeout before actually reading any data off the socket. + // Use a large timeout to accomodate the authentication lag. Add a ReadHeader timeout + // below to handle preAuth. + c.Read = 60 * time.Second + + // Read header timeout covers ACCEPT to the end of the HTTP headers. + // Note that for TLS, this include the TLS handshake as well. + // This is considered preauth in this server, so limit the timeout to something reasonable. + c.ReadHeader = 5 * time.Second + + // IdleTimeout is the maximum amount of time to wait for the + // next request when keep-alives are enabled. Because TLS handshakes are expensive + // for the server, avoid aggressive connection close with generous idle timeout. + c.Idle = 30 * time.Second + + // The write timeout for HTTPS covers the time from ACCEPT to the end of the response write; + // so in that case it covers the TLS handshake. If the connection is reused, the write timeout + // covers the time from the end of the request header to the end of the response write. + // Set to a very large timeout to allow for slow backend; must be at least as large as Read timeout plus Long Poll. c.Write = 10 * time.Minute + c.CheckinTimestamp = 30 * time.Second c.CheckinLongPoll = 5 * time.Minute } diff --git a/internal/pkg/config/limits.go b/internal/pkg/config/limits.go index cef3ad525..cbe469274 100644 --- a/internal/pkg/config/limits.go +++ b/internal/pkg/config/limits.go @@ -12,6 +12,7 @@ type Limit struct { Interval time.Duration `config:"interval"` Burst int `config:"burst"` Max int64 `config:"max"` + MaxBody int64 `config:"max_body_byte_size"` } type ServerLimits struct { @@ -35,6 +36,7 @@ func (c *ServerLimits) InitDefaults() { c.CheckinLimit = Limit{ Interval: time.Millisecond, Burst: 1000, + MaxBody: 1024 * 1024, } c.ArtifactLimit = Limit{ Interval: time.Millisecond * 5, @@ -45,10 +47,12 @@ func (c *ServerLimits) InitDefaults() { Interval: time.Millisecond * 10, Burst: 100, Max: 50, + MaxBody: 1024 * 512, } c.AckLimit = Limit{ Interval: time.Millisecond * 10, Burst: 100, Max: 50, + MaxBody: 1024 * 1024 * 2, } } From 4e32e27bbfcb54ee68c9aaf60c34fec77643cae4 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Wed, 9 Jun 2021 19:04:12 +0000 Subject: [PATCH 117/240] [7.x](backport #436) Update go-version (#447) * Update go-version (#436) Update go version specify go mod download all as behaviour has changed. (cherry picked from commit b3827765452064b0309b40d505dd793dce14593e) # Conflicts: # Makefile # README.md * Fix merge conflicts Co-authored-by: Michel Laterman <82832767+michel-laterman@users.noreply.github.com> Co-authored-by: michel-laterman --- .go-version | 2 +- Makefile | 6 +++--- README.md | 3 +-- 3 files changed, 5 insertions(+), 6 deletions(-) diff --git a/.go-version b/.go-version index a23207367..0d92a1028 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.16.4 +1.16.5 diff --git a/Makefile b/Makefile index a679760e5..2f9c96bd2 100644 --- a/Makefile +++ b/Makefile @@ -69,7 +69,7 @@ check-go: ## - Run go fmt, go vet, go mod tidy notice: ## - Generates the NOTICE.txt file. @echo "Generating NOTICE.txt" @go mod tidy - @go mod download + @go mod download all go list -m -json all | go run go.elastic.co/go-licence-detector \ -includeIndirect \ -rules dev-tools/notice/rules.json \ @@ -86,11 +86,11 @@ check-no-changes: .PHONY: test test: prepare-test-context ## - Run all tests - @$(MAKE) test-unit + @$(MAKE) test-unit @$(MAKE) test-int @$(MAKE) junit-report -.PHONY: test-unit +.PHONY: test-unit test-unit: prepare-test-context ## - Run unit tests only set -o pipefail; go test -v -race ./... | tee build/test-unit.out diff --git a/README.md b/README.md index d3131d93c..e90642c30 100644 --- a/README.md +++ b/README.md @@ -100,5 +100,4 @@ If an upgrade is done, Elasticsearch / Kibana have to be upgraded first, then El ## MacOSX Version The [golang-crossbuild](https://github.com/elastic/golang-crossbuild) produces images used for testing/building. -The `golang-crossbuild:1.16.4-darwin-debian10` image expects the minimum MacOSX version to be 10.14+. ->>>>>>> 75d8242 (Update to Go version 1.16.4 (#341)) +The `golang-crossbuild:1.16.X-darwin-debian10` images expects the minimum MacOSX version to be 10.14+. From 98927338bd7a226b30b40db04cc81bca19f98eb4 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 10 Jun 2021 01:13:41 -0400 Subject: [PATCH 118/240] [Automation] Update elastic stack version to 7.14.0-28665d9b for testing (#448) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 9725e6dab..f8cfa73dc 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.14.0-bb1ffc77-SNAPSHOT +ELASTICSEARCH_VERSION=7.14.0-28665d9b-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 9a42db091c7b664c9a844bdc2e4f2e75130954b4 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Thu, 10 Jun 2021 17:21:25 +0000 Subject: [PATCH 119/240] [7.x](backport #443) Configure cache (#450) * Tweak server timeouts. Limit body size to defend malicious agent (cherry picked from commit f335936a2a699f2d6930d8a0d4971293caab79f6) * Refactor bulk init (cherry picked from commit 0cd883a5aa104c7c106ba41569d9b998335545aa) * Drop cache if configuration changes. Allow object TTL's to be configurable. Default apikey TTL to 15m to avoid auth bottleneck. (cherry picked from commit aadaa4c080357a8a42679d359dd09f6724fe3337) Co-authored-by: Sean Cunningham --- NOTICE.txt | 8 +- cmd/fleet/auth.go | 24 +- cmd/fleet/handleAck.go | 2 +- cmd/fleet/handleArtifacts.go | 7 +- cmd/fleet/handleCheckin.go | 2 +- cmd/fleet/handleEnroll.go | 33 +- cmd/fleet/main.go | 70 ++-- cmd/fleet/server_integration_test.go | 8 +- go.mod | 2 +- go.sum | 7 +- .../pkg/apikey/apikey_integration_test.go | 2 +- internal/pkg/apikey/create.go | 4 +- internal/pkg/bulk/opApiKey.go | 2 +- internal/pkg/cache/cache.go | 156 ++++++- internal/pkg/config/cache.go | 23 +- internal/pkg/config/config_test.go | 380 ++++-------------- 16 files changed, 347 insertions(+), 383 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index c32a251a0..af6cf67e6 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -72,11 +72,11 @@ SOFTWARE. -------------------------------------------------------------------------------- Dependency : github.com/dgraph-io/ristretto -Version: v0.0.3 +Version: v0.1.0 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/dgraph-io/ristretto@v0.0.3/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/dgraph-io/ristretto@v0.1.0/LICENSE: Apache License Version 2.0, January 2004 @@ -35585,11 +35585,11 @@ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -------------------------------------------------------------------------------- Dependency : golang.org/x/sys -Version: v0.0.0-20200625212154-ddb9806d33ae +Version: v0.0.0-20200930185726-fdedc70b468f Licence type (autodetected): BSD-3-Clause -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/golang.org/x/sys@v0.0.0-20200625212154-ddb9806d33ae/LICENSE: +Contents of probable licence file $GOMODCACHE/golang.org/x/sys@v0.0.0-20200930185726-fdedc70b468f/LICENSE: Copyright (c) 2009 The Go Authors. All rights reserved. diff --git a/cmd/fleet/auth.go b/cmd/fleet/auth.go index 95ad1321b..ede3f0d71 100644 --- a/cmd/fleet/auth.go +++ b/cmd/fleet/auth.go @@ -18,13 +18,10 @@ import ( "github.com/rs/zerolog/log" ) -const ( - kAPIKeyTTL = 5 * time.Second -) - var ( ErrApiKeyNotEnabled = errors.New("APIKey not enabled") ErrAgentCorrupted = errors.New("agent record corrupted") + ErrAgentInactive = errors.New("agent inactive") ) // This authenticates that the provided API key exists and is enabled. @@ -67,9 +64,8 @@ func authApiKey(r *http.Request, bulker bulk.Bulk, c cache.Cache) (*apikey.ApiKe RawJSON("meta", info.Metadata). Msg("ApiKey authenticated") - if info.Enabled { - c.SetApiKey(*key, kAPIKeyTTL) - } else { + c.SetApiKey(*key, info.Enabled) + if !info.Enabled { err = ErrApiKeyNotEnabled log.Info(). Err(err). @@ -126,5 +122,19 @@ func authAgent(r *http.Request, id string, bulker bulk.Bulk, c cache.Cache) (*mo return nil, ErrAgentCorrupted } + // validate active, an api key can be valid for an inactive agent record + // if it is in our cache and has not timed out. + if !agent.Active { + log.Info(). + Err(ErrAgentInactive). + Str("agentId", id). + Str(EcsHttpRequestId, r.Header.Get(logger.HeaderRequestID)). + Msg("agent record inactive") + + // Update the cache to mark the api key id associated with this agent as not enabled + c.SetApiKey(*key, false) + return nil, ErrAgentInactive + } + return agent, nil } diff --git a/cmd/fleet/handleAck.go b/cmd/fleet/handleAck.go index 875757a86..7048d1f23 100644 --- a/cmd/fleet/handleAck.go +++ b/cmd/fleet/handleAck.go @@ -158,7 +158,7 @@ func (ack *AckT) handleAckEvents(ctx context.Context, agent *model.Agent, events return errors.New("no matching action") } action = actions[0] - ack.cache.SetAction(action, time.Minute) + ack.cache.SetAction(action) } acr := model.ActionResult{ diff --git a/cmd/fleet/handleArtifacts.go b/cmd/fleet/handleArtifacts.go index c95205298..91e778945 100644 --- a/cmd/fleet/handleArtifacts.go +++ b/cmd/fleet/handleArtifacts.go @@ -31,9 +31,8 @@ import ( ) const ( - defaultMaxParallel = 8 // TODO: configurable - defaultCacheTTL = time.Hour * 24 // TODO: configurable - defaultThrottleTTL = time.Minute // TODO: configurable + defaultMaxParallel = 8 // TODO: configurable + defaultThrottleTTL = time.Minute // TODO: configurable ) var ( @@ -244,7 +243,7 @@ func (at ArtifactT) getArtifact(ctx context.Context, zlog zerolog.Logger, ident, art.Body = dstPayload // Update the cache. - at.cache.SetArtifact(*art, defaultCacheTTL) + at.cache.SetArtifact(*art) return art, nil } diff --git a/cmd/fleet/handleCheckin.go b/cmd/fleet/handleCheckin.go index c64036b3e..1f78cc3fc 100644 --- a/cmd/fleet/handleCheckin.go +++ b/cmd/fleet/handleCheckin.go @@ -432,7 +432,7 @@ func processPolicy(ctx context.Context, bulker bulk.Bulk, agentId, reqId string, Str("newHash", defaultRole.Sha2). Msg("Generating a new API key") - defaultOutputApiKey, err := generateOutputApiKey(ctx, bulker.Client(), agent.Id, policy.DefaultOutputName, defaultRole.Raw) + defaultOutputApiKey, err := generateOutputApiKey(ctx, bulker, agent.Id, policy.DefaultOutputName, defaultRole.Raw) if err != nil { zlog.Error().Err(err).Msg("fail generate output key") return nil, err diff --git a/cmd/fleet/handleEnroll.go b/cmd/fleet/handleEnroll.go index 3ae8d961f..b25dcf874 100644 --- a/cmd/fleet/handleEnroll.go +++ b/cmd/fleet/handleEnroll.go @@ -22,7 +22,6 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/model" "github.com/elastic/fleet-server/v7/internal/pkg/sqn" - "github.com/elastic/go-elasticsearch/v7" "github.com/gofrs/uuid" "github.com/hashicorp/go-version" "github.com/julienschmidt/httprouter" @@ -34,9 +33,6 @@ import ( const ( kEnrollMod = "enroll" - kCacheAccessInitTTL = time.Second * 30 // Cache a bit longer to handle expensive initial checkin - kCacheEnrollmentTTL = time.Second * 30 - EnrollEphemeral = "EPHEMERAL" EnrollPermanent = "PERMANENT" EnrollTemporary = "TEMPORARY" @@ -127,6 +123,7 @@ func (rt Router) handleEnroll(w http.ResponseWriter, r *http.Request, ps httprou } func (et *EnrollerT) handleEnroll(w http.ResponseWriter, r *http.Request) (*EnrollResponse, error) { + limitF, err := et.limit.Acquire() if err != nil { return nil, err @@ -194,7 +191,7 @@ func _enroll(ctx context.Context, bulker bulk.Bulk, c cache.Cache, req EnrollReq agentId := u.String() - accessApiKey, err := generateAccessApiKey(ctx, bulker.Client(), agentId) + accessApiKey, err := generateAccessApiKey(ctx, bulker, agentId) if err != nil { return nil, err } @@ -237,7 +234,7 @@ func _enroll(ctx context.Context, bulker bulk.Bulk, c cache.Cache, req EnrollReq } // We are Kool & and the Gang; cache the access key to avoid the roundtrip on impending checkin - c.SetApiKey(*accessApiKey, kCacheAccessInitTTL) + c.SetApiKey(*accessApiKey, true) return &resp, nil } @@ -311,15 +308,25 @@ func createFleetAgent(ctx context.Context, bulker bulk.Bulk, id string, agent mo return nil } -func generateAccessApiKey(ctx context.Context, client *elasticsearch.Client, agentId string) (*apikey.ApiKey, error) { - return apikey.Create(ctx, client, agentId, "", []byte(kFleetAccessRolesJSON), - apikey.NewMetadata(agentId, apikey.TypeAccess)) +func generateAccessApiKey(ctx context.Context, bulk bulk.Bulk, agentId string) (*apikey.ApiKey, error) { + return bulk.ApiKeyCreate( + ctx, + agentId, + "", + []byte(kFleetAccessRolesJSON), + apikey.NewMetadata(agentId, apikey.TypeAccess), + ) } -func generateOutputApiKey(ctx context.Context, client *elasticsearch.Client, agentId, outputName string, roles []byte) (*apikey.ApiKey, error) { +func generateOutputApiKey(ctx context.Context, bulk bulk.Bulk, agentId, outputName string, roles []byte) (*apikey.ApiKey, error) { name := fmt.Sprintf("%s:%s", agentId, outputName) - return apikey.Create(ctx, client, name, "", roles, - apikey.NewMetadata(agentId, apikey.TypeOutput)) + return bulk.ApiKeyCreate( + ctx, + name, + "", + roles, + apikey.NewMetadata(agentId, apikey.TypeOutput), + ) } func (et *EnrollerT) fetchEnrollmentKeyRecord(ctx context.Context, id string) (*model.EnrollmentApiKey, error) { @@ -339,7 +346,7 @@ func (et *EnrollerT) fetchEnrollmentKeyRecord(ctx context.Context, id string) (* } cost := int64(len(rec.ApiKey)) - et.cache.SetEnrollmentApiKey(id, rec, cost, kCacheEnrollmentTTL) + et.cache.SetEnrollmentApiKey(id, rec, cost) return &rec, nil } diff --git a/cmd/fleet/main.go b/cmd/fleet/main.go index 7c1e9fac0..505c33727 100644 --- a/cmd/fleet/main.go +++ b/cmd/fleet/main.go @@ -55,18 +55,23 @@ func installSignalHandler() context.Context { } func makeCache(cfg *config.Config) (cache.Cache, error) { + cacheCfg := makeCacheConfig(cfg) + log.Info().Interface("cfg", cacheCfg).Msg("makeCache") + return cache.New(cacheCfg) +} - log.Info(). - Int64("numCounters", cfg.Inputs[0].Cache.NumCounters). - Int64("maxCost", cfg.Inputs[0].Cache.MaxCost). - Msg("makeCache") +func makeCacheConfig(cfg *config.Config) cache.Config { + ccfg := cfg.Inputs[0].Cache - cacheCfg := cache.Config{ - NumCounters: cfg.Inputs[0].Cache.NumCounters, - MaxCost: cfg.Inputs[0].Cache.MaxCost, + return cache.Config{ + NumCounters: ccfg.NumCounters, + MaxCost: ccfg.MaxCost, + ActionTTL: ccfg.ActionTTL, + EnrollKeyTTL: ccfg.EnrollKeyTTL, + ArtifactTTL: ccfg.ArtifactTTL, + ApiKeyTTL: ccfg.ApiKeyTTL, + ApiKeyJitter: ccfg.ApiKeyJitter, } - - return cache.New(cacheCfg) } func initLogger(cfg *config.Config, version, commit string) (*logger.Logger, error) { @@ -110,12 +115,7 @@ func getRunCommand(version, commit string) func(cmd *cobra.Command, args []strin return err } - c, err := makeCache(cfg) - if err != nil { - return err - } - - agent, err := NewAgentMode(cliCfg, os.Stdin, c, version, l) + agent, err := NewAgentMode(cliCfg, os.Stdin, version, l) if err != nil { return err } @@ -144,12 +144,7 @@ func getRunCommand(version, commit string) func(cmd *cobra.Command, args []strin return err } - c, err := makeCache(cfg) - if err != nil { - return err - } - - srv, err := NewFleetServer(cfg, c, version, status.NewLog()) + srv, err := NewFleetServer(cfg, version, status.NewLog()) if err != nil { return err } @@ -186,7 +181,6 @@ type firstCfg struct { type AgentMode struct { cliCfg *ucfg.Config - cache cache.Cache version string reloadables []reload.Reloadable @@ -201,12 +195,11 @@ type AgentMode struct { startChan chan struct{} } -func NewAgentMode(cliCfg *ucfg.Config, reader io.Reader, c cache.Cache, version string, reloadables ...reload.Reloadable) (*AgentMode, error) { +func NewAgentMode(cliCfg *ucfg.Config, reader io.Reader, version string, reloadables ...reload.Reloadable) (*AgentMode, error) { var err error a := &AgentMode{ cliCfg: cliCfg, - cache: c, version: version, reloadables: reloadables, } @@ -252,7 +245,7 @@ func (a *AgentMode) Run(ctx context.Context) error { srvCtx, srvCancel := context.WithCancel(ctx) defer srvCancel() log.Info().Msg("received initial configuration starting Fleet Server") - srv, err := NewFleetServer(cfg.cfg, a.cache, a.version, status.NewChained(status.NewLog(), a.agent)) + srv, err := NewFleetServer(cfg.cfg, a.version, status.NewChained(status.NewLog(), a.agent)) if err != nil { // unblock startChan even though there was an error a.startChan <- struct{}{} @@ -400,17 +393,23 @@ type FleetServer struct { } // NewFleetServer creates the actual fleet server service. -func NewFleetServer(cfg *config.Config, c cache.Cache, verStr string, reporter status.Reporter) (*FleetServer, error) { +func NewFleetServer(cfg *config.Config, verStr string, reporter status.Reporter) (*FleetServer, error) { verCon, err := buildVersionConstraint(verStr) if err != nil { return nil, err } + + cache, err := makeCache(cfg) + if err != nil { + return nil, err + } + return &FleetServer{ ver: verStr, verCon: verCon, cfg: cfg, cfgCh: make(chan *config.Config, 1), - cache: c, + cache: cache, reporter: reporter, }, nil } @@ -469,6 +468,16 @@ LOOP: f.reporter.Status(proto.StateObserved_STARTING, "Starting", nil) } + // Create or recreate cache + if configCacheChanged(curCfg, newCfg) { + cacheCfg := makeCacheConfig(newCfg) + err := f.cache.Reconfigure(cacheCfg) + log.Info().Err(err).Interface("cfg", cacheCfg).Msg("Reconfigure cache") + if err != nil { + return err + } + } + // Start or restart profiler if configChangedProfiler(curCfg, newCfg) { stop(proCancel, proEg) @@ -535,6 +544,13 @@ func configChangedServer(curCfg, newCfg *config.Config) bool { return curCfg == nil || curCfg.Inputs[0].Server != newCfg.Inputs[0].Server } +func configCacheChanged(curCfg, newCfg *config.Config) bool { + if curCfg == nil { + return false + } + return curCfg.Inputs[0].Cache != newCfg.Inputs[0].Cache +} + func safeWait(g *errgroup.Group, to time.Duration) (err error) { waitCh := make(chan error) go func() { diff --git a/cmd/fleet/server_integration_test.go b/cmd/fleet/server_integration_test.go index 5cd779f49..2fc7072be 100644 --- a/cmd/fleet/server_integration_test.go +++ b/cmd/fleet/server_integration_test.go @@ -24,7 +24,6 @@ import ( "github.com/stretchr/testify/require" "golang.org/x/sync/errgroup" - "github.com/elastic/fleet-server/v7/internal/pkg/cache" "github.com/elastic/fleet-server/v7/internal/pkg/config" "github.com/elastic/fleet-server/v7/internal/pkg/logger" "github.com/elastic/fleet-server/v7/internal/pkg/sleep" @@ -63,11 +62,6 @@ func startTestServer(ctx context.Context) (*tserver, error) { return nil, err } - c, err := cache.New(cache.Config{NumCounters: 100, MaxCost: 100000}) - if err != nil { - return nil, err - } - logger.Init(cfg) port, err := ftesting.FreePort() @@ -82,7 +76,7 @@ func startTestServer(ctx context.Context) (*tserver, error) { cfg.Inputs[0].Server = *srvcfg log.Info().Uint16("port", port).Msg("Test fleet server") - srv, err := NewFleetServer(cfg, c, serverVersion, status.NewLog()) + srv, err := NewFleetServer(cfg, serverVersion, status.NewLog()) if err != nil { return nil, err } diff --git a/go.mod b/go.mod index 46da4ce94..231b427f2 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.16 require ( github.com/Pallinder/go-randomdata v1.2.0 github.com/aleksmaus/generate v0.0.0-20210326194607-c630e07a2742 - github.com/dgraph-io/ristretto v0.0.3 + github.com/dgraph-io/ristretto v0.1.0 github.com/elastic/beats/v7 v7.11.1 github.com/elastic/elastic-agent-client/v7 v7.0.0-20200709172729-d43b7ad5833a github.com/elastic/go-elasticsearch/v7 v7.13.1 diff --git a/go.sum b/go.sum index bea793eda..c2a857491 100644 --- a/go.sum +++ b/go.sum @@ -205,8 +205,8 @@ github.com/devigned/tab v0.1.2-0.20190607222403-0c15cf42f9a2/go.mod h1:XG9mPq0dF github.com/dgraph-io/badger/v2 v2.2007.3-0.20201012072640-f5a7e0a1c83b h1:mUDs72Rlzv6A4YN8w3Ra3hU9x/plOQPcQjZYL/1f5SM= github.com/dgraph-io/badger/v2 v2.2007.3-0.20201012072640-f5a7e0a1c83b/go.mod h1:26P/7fbL4kUZVEVKLAKXkBXKOydDmM2p1e+NhhnBCAE= github.com/dgraph-io/ristretto v0.0.3-0.20200630154024-f66de99634de/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= -github.com/dgraph-io/ristretto v0.0.3 h1:jh22xisGBjrEVnRZ1DVTpBVQm0Xndu8sMl0CWDzSIBI= -github.com/dgraph-io/ristretto v0.0.3/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= +github.com/dgraph-io/ristretto v0.1.0 h1:Jv3CGQHp9OjuMBSne1485aDpUkTKEcUqF+jm/LuerPI= +github.com/dgraph-io/ristretto v0.1.0/go.mod h1:fux0lOrBhrVCJd3lcTHsIJhq1T2rokOu6v9Vcb3Q9ug= github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/dgrijalva/jwt-go v3.2.1-0.20190620180102-5e25c22bd5d6+incompatible h1:4jGdduO4ceTJFKf0IhgaB8NJapGqKHwC2b4xQ/cXujM= github.com/dgrijalva/jwt-go v3.2.1-0.20190620180102-5e25c22bd5d6+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= @@ -922,8 +922,9 @@ golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200622214017-ed371f2e16b4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae h1:Ih9Yo4hSPImZOpfGuA4bR/ORKTAbhZo2AbWNRCnevdo= golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f h1:+Nyd8tzPX9R7BWHguqsrbFdRx3WQ/1ib8I44HXV5yTA= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= diff --git a/internal/pkg/apikey/apikey_integration_test.go b/internal/pkg/apikey/apikey_integration_test.go index bf2815105..f5dbb6a1f 100644 --- a/internal/pkg/apikey/apikey_integration_test.go +++ b/internal/pkg/apikey/apikey_integration_test.go @@ -46,7 +46,7 @@ func TestCreateApiKeyWithMetadata(t *testing.T) { // Create the key agentId := uuid.Must(uuid.NewV4()).String() name := uuid.Must(uuid.NewV4()).String() - akey, err := Create(ctx, es, name, "", []byte(testFleetRoles), + akey, err := Create(ctx, es, name, "", "true", []byte(testFleetRoles), NewMetadata(agentId, TypeAccess)) if err != nil { t.Fatal(err) diff --git a/internal/pkg/apikey/create.go b/internal/pkg/apikey/create.go index dceef524c..0371934fe 100644 --- a/internal/pkg/apikey/create.go +++ b/internal/pkg/apikey/create.go @@ -14,7 +14,7 @@ import ( "github.com/elastic/go-elasticsearch/v7/esapi" ) -func Create(ctx context.Context, client *elasticsearch.Client, name, ttl string, roles []byte, meta interface{}) (*ApiKey, error) { +func Create(ctx context.Context, client *elasticsearch.Client, name, ttl, refresh string, roles []byte, meta interface{}) (*ApiKey, error) { payload := struct { Name string `json:"name,omitempty"` Expiration string `json:"expiration,omitempty"` @@ -34,7 +34,7 @@ func Create(ctx context.Context, client *elasticsearch.Client, name, ttl string, opts := []func(*esapi.SecurityCreateAPIKeyRequest){ client.Security.CreateAPIKey.WithContext(ctx), - client.Security.CreateAPIKey.WithRefresh("true"), + client.Security.CreateAPIKey.WithRefresh(refresh), } res, err := client.Security.CreateAPIKey( diff --git a/internal/pkg/bulk/opApiKey.go b/internal/pkg/bulk/opApiKey.go index 690dcc895..190cbe8bb 100644 --- a/internal/pkg/bulk/opApiKey.go +++ b/internal/pkg/bulk/opApiKey.go @@ -29,7 +29,7 @@ func (b *Bulker) ApiKeyCreate(ctx context.Context, name, ttl string, roles []byt } defer b.apikeyLimit.Release(1) - return apikey.Create(ctx, b.Client(), name, ttl, roles, meta) + return apikey.Create(ctx, b.Client(), name, ttl, "false", roles, meta) } func (b *Bulker) ApiKeyRead(ctx context.Context, id string) (*ApiKeyMetadata, error) { diff --git a/internal/pkg/cache/cache.go b/internal/pkg/cache/cache.go index 27fd27551..10a1bd10f 100644 --- a/internal/pkg/cache/cache.go +++ b/internal/pkg/cache/cache.go @@ -6,25 +6,61 @@ package cache import ( "fmt" + "math/rand" + "sync" "time" "github.com/dgraph-io/ristretto" + "github.com/rs/zerolog" "github.com/rs/zerolog/log" "github.com/elastic/fleet-server/v7/internal/pkg/apikey" "github.com/elastic/fleet-server/v7/internal/pkg/model" ) +type Cache interface { + Reconfigure(Config) error + + SetAction(model.Action) + GetAction(id string) (model.Action, bool) + + SetApiKey(key ApiKey, enabled bool) + ValidApiKey(key ApiKey) bool + + SetEnrollmentApiKey(id string, key model.EnrollmentApiKey, cost int64) + GetEnrollmentApiKey(id string) (model.EnrollmentApiKey, bool) + + SetArtifact(artifact model.Artifact) + GetArtifact(ident, sha2 string) (model.Artifact, bool) +} + type ApiKey = apikey.ApiKey type SecurityInfo = apikey.SecurityInfo -type Cache struct { +type CacheT struct { cache *ristretto.Cache + cfg Config + mut sync.RWMutex } type Config struct { - NumCounters int64 // number of keys to track frequency of - MaxCost int64 // maximum cost of cache in 'cost' units + NumCounters int64 // number of keys to track frequency of + MaxCost int64 // maximum cost of cache in 'cost' units + ActionTTL time.Duration + ApiKeyTTL time.Duration + EnrollKeyTTL time.Duration + ArtifactTTL time.Duration + ApiKeyJitter time.Duration +} + +func (c *Config) MarshalZerologObject(e *zerolog.Event) { + e.Int64("numCounters", c.NumCounters) + e.Int64("maxCost", c.MaxCost) + e.Dur("actionTTL", c.ActionTTL) + e.Dur("enrollTTL", c.EnrollKeyTTL) + e.Dur("artifactTTL", c.ArtifactTTL) + e.Dur("apiKeyTTL", c.ApiKeyTTL) + e.Dur("apiKeyJitter", c.ApiKeyJitter) } type actionCache struct { @@ -33,28 +69,64 @@ type actionCache struct { } // New creates a new cache. -func New(cfg Config) (Cache, error) { +func New(cfg Config) (*CacheT, error) { + cache, err := newCache(cfg) + if err != nil { + return nil, err + } + + c := CacheT{ + cache: cache, + cfg: cfg, + } + + return &c, nil +} + +func newCache(cfg Config) (*ristretto.Cache, error) { rcfg := &ristretto.Config{ NumCounters: cfg.NumCounters, MaxCost: cfg.MaxCost, BufferItems: 64, } - cache, err := ristretto.NewCache(rcfg) - return Cache{cache}, err + return ristretto.NewCache(rcfg) +} + +// Reconfigure will drop cache +func (c *CacheT) Reconfigure(cfg Config) error { + c.mut.Lock() + defer c.mut.Unlock() + + cache, err := newCache(cfg) + if err != nil { + return err + } + + // Close down previous cache + c.cache.Close() + + // And assign new one + c.cfg = cfg + c.cache = cache + return nil } // SetAction sets an action in the cache. // // This will only cache the action ID and action Type. So `GetAction` will only // return a `model.Action` with `ActionId` and `Type` set. -func (c Cache) SetAction(action model.Action, ttl time.Duration) { +func (c *CacheT) SetAction(action model.Action) { + c.mut.RLock() + defer c.mut.RUnlock() + scopedKey := "action:" + action.ActionId v := actionCache{ actionId: action.ActionId, actionType: action.Type, } cost := len(action.ActionId) + len(action.Type) + ttl := c.cfg.ActionTTL ok := c.cache.SetWithTTL(scopedKey, v, int64(cost), ttl) log.Trace(). Bool("ok", ok). @@ -67,7 +139,10 @@ func (c Cache) SetAction(action model.Action, ttl time.Duration) { // // This will only return a `model.Action` with the action ID and action Type set. // This is because `SetAction` So `GetAction` will only cache the action ID and action Type. -func (c Cache) GetAction(id string) (model.Action, bool) { +func (c *CacheT) GetAction(id string) (model.Action, bool) { + c.mut.RLock() + defer c.mut.RUnlock() + scopedKey := "action:" + id if v, ok := c.cache.Get(scopedKey); ok { log.Trace().Str("id", id).Msg("Action cache HIT") @@ -87,12 +162,36 @@ func (c Cache) GetAction(id string) (model.Action, bool) { } // SetApiKey sets the API key in the cache. -func (c Cache) SetApiKey(key ApiKey, ttl time.Duration) { +func (c *CacheT) SetApiKey(key ApiKey, enabled bool) { + c.mut.RLock() + defer c.mut.RUnlock() + scopedKey := "api:" + key.Id - cost := len(scopedKey) + len(key.Key) - ok := c.cache.SetWithTTL(scopedKey, key.Key, int64(cost), ttl) + + // Use the valid key as the payload of the record; + // If caller has marked key as not enabled, use empty string. + val := key.Key + if !enabled { + val = "" + } + + // If enabled, jitter allows us to randomize the expirtion of the artifact + // across time, which is helpful if a bunch of agents came on at the same time, + // say during a network restoration. With some jitter, we avoid having to + // revalidate the API Keys all at the same time, which we know causes load on Elastic. + ttl := c.cfg.ApiKeyTTL + if c.cfg.ApiKeyJitter != 0 { + jitter := time.Duration(rand.Int63n(int64(c.cfg.ApiKeyJitter))) + if jitter < ttl { + ttl = ttl - jitter + } + } + + cost := len(scopedKey) + len(val) + ok := c.cache.SetWithTTL(scopedKey, val, int64(cost), ttl) log.Trace(). Bool("ok", ok). + Bool("enabled", enabled). Str("key", key.Id). Dur("ttl", ttl). Int("cost", cost). @@ -100,13 +199,19 @@ func (c Cache) SetApiKey(key ApiKey, ttl time.Duration) { } // ValidApiKey returns true if the ApiKey is valid (aka. also present in cache). -func (c Cache) ValidApiKey(key ApiKey) bool { +func (c *CacheT) ValidApiKey(key ApiKey) bool { + c.mut.RLock() + defer c.mut.RUnlock() + scopedKey := "api:" + key.Id v, ok := c.cache.Get(scopedKey) if ok { - if v == key.Key { + switch v { + case "": + log.Trace().Str("id", key.Id).Msg("ApiKey cache HIT on disabled KEY") + case key.Key: log.Trace().Str("id", key.Id).Msg("ApiKey cache HIT") - } else { + default: log.Trace().Str("id", key.Id).Msg("ApiKey cache MISMATCH") ok = false } @@ -117,7 +222,10 @@ func (c Cache) ValidApiKey(key ApiKey) bool { } // GetEnrollmentApiKey returns the enrollment API key by ID. -func (c Cache) GetEnrollmentApiKey(id string) (model.EnrollmentApiKey, bool) { +func (c *CacheT) GetEnrollmentApiKey(id string) (model.EnrollmentApiKey, bool) { + c.mut.RLock() + defer c.mut.RUnlock() + scopedKey := "record:" + id if v, ok := c.cache.Get(scopedKey); ok { log.Trace().Str("id", id).Msg("Enrollment cache HIT") @@ -135,8 +243,12 @@ func (c Cache) GetEnrollmentApiKey(id string) (model.EnrollmentApiKey, bool) { } // SetEnrollmentApiKey adds the enrollment API key into the cache. -func (c Cache) SetEnrollmentApiKey(id string, key model.EnrollmentApiKey, cost int64, ttl time.Duration) { +func (c *CacheT) SetEnrollmentApiKey(id string, key model.EnrollmentApiKey, cost int64) { + c.mut.RLock() + defer c.mut.RUnlock() + scopedKey := "record:" + id + ttl := c.cfg.EnrollKeyTTL ok := c.cache.SetWithTTL(scopedKey, key, cost, ttl) log.Trace(). Bool("ok", ok). @@ -150,7 +262,10 @@ func makeArtifactKey(ident, sha2 string) string { return fmt.Sprintf("artifact:%s:%s", ident, sha2) } -func (c Cache) GetArtifact(ident, sha2 string) (model.Artifact, bool) { +func (c *CacheT) GetArtifact(ident, sha2 string) (model.Artifact, bool) { + c.mut.RLock() + defer c.mut.RUnlock() + scopedKey := makeArtifactKey(ident, sha2) if v, ok := c.cache.Get(scopedKey); ok { log.Trace().Str("key", scopedKey).Msg("Artifact cache HIT") @@ -168,9 +283,14 @@ func (c Cache) GetArtifact(ident, sha2 string) (model.Artifact, bool) { } // TODO: strip body and spool to on disk cache if larger than a size threshold -func (c Cache) SetArtifact(artifact model.Artifact, ttl time.Duration) { +func (c *CacheT) SetArtifact(artifact model.Artifact) { + c.mut.RLock() + defer c.mut.RUnlock() + scopedKey := makeArtifactKey(artifact.Identifier, artifact.DecodedSha256) cost := int64(len(artifact.Body)) + ttl := c.cfg.ArtifactTTL + ok := c.cache.SetWithTTL(scopedKey, artifact, cost, ttl) log.Trace(). Bool("ok", ok). diff --git a/internal/pkg/config/cache.go b/internal/pkg/config/cache.go index b6e1a744a..72cb38939 100644 --- a/internal/pkg/config/cache.go +++ b/internal/pkg/config/cache.go @@ -4,17 +4,36 @@ package config +import ( + "time" +) + const ( defaultCacheNumCounters = 500000 // 10x times expected count defaultCacheMaxCost = 50 * 1024 * 1024 // 50MiB cache size + defaultActionTTL = time.Minute * 5 + defaultEnrollKeyTTL = time.Minute + defaultArtifactTTL = time.Hour * 24 + defaultApiKeyTTL = time.Minute * 15 // ApiKey validation is a bottleneck. + defaultApiKeyJitter = time.Minute * 5 // Jitter allows some randomness on ApiKeyTTL, zero to disable ) type Cache struct { - NumCounters int64 `config:"num_counters"` - MaxCost int64 `config:"max_cost"` + NumCounters int64 `config:"num_counters"` + MaxCost int64 `config:"max_cost"` + ActionTTL time.Duration `config:"ttl_action"` + EnrollKeyTTL time.Duration `config:"ttl_enroll_key"` + ArtifactTTL time.Duration `config:"ttl_artifact"` + ApiKeyTTL time.Duration `config:"ttl_api_key"` + ApiKeyJitter time.Duration `config:"jitter_api_key"` } func (c *Cache) InitDefaults() { c.NumCounters = defaultCacheNumCounters c.MaxCost = defaultCacheMaxCost + c.ActionTTL = defaultActionTTL + c.EnrollKeyTTL = defaultEnrollKeyTTL + c.ArtifactTTL = defaultArtifactTTL + c.ApiKeyTTL = defaultApiKeyTTL + c.ApiKeyJitter = defaultApiKeyJitter } diff --git a/internal/pkg/config/config_test.go b/internal/pkg/config/config_test.go index 7865f0f18..9b0605f72 100644 --- a/internal/pkg/config/config_test.go +++ b/internal/pkg/config/config_test.go @@ -24,95 +24,23 @@ func TestConfig(t *testing.T) { }{ "basic": { cfg: &Config{ - Fleet: Fleet{ - Agent: Agent{ - ID: "1e4954ce-af37-4731-9f4a-407b08e69e42", - Logging: AgentLogging{}, - }, - }, + Fleet: defaultFleet(), Output: Output{ - Elasticsearch: Elasticsearch{ - Protocol: "http", - Hosts: []string{"localhost:9200"}, - Username: "elastic", - Password: "changeme", - MaxRetries: 3, - MaxConnPerHost: 128, - BulkFlushInterval: 250 * time.Millisecond, - BulkFlushThresholdCount: 2048, - BulkFlushThresholdSize: 1048576, - BulkFlushMaxPending: 8, - Timeout: 90 * time.Second, - }, + Elasticsearch: defaultElastic(), }, Inputs: []Input{ { - Type: "fleet-server", - Server: Server{ - Host: kDefaultHost, - Port: kDefaultPort, - Timeouts: ServerTimeouts{ - Read: 60 * time.Second, - ReadHeader: 5 * time.Second, - Idle: 30 * time.Second, - Write: 10 * time.Minute, - CheckinTimestamp: 30 * time.Second, - CheckinLongPoll: 5 * time.Minute, - }, - Profiler: ServerProfiler{ - Enabled: false, - Bind: "localhost:6060", - }, - CompressionLevel: 1, - CompressionThresh: 1024, - Limits: ServerLimits{ - MaxHeaderByteSize: 8192, - MaxConnections: 0, - PolicyThrottle: 5 * time.Millisecond, - CheckinLimit: Limit{ - Interval: time.Millisecond, - Burst: 1000, - MaxBody: 1048576, - }, - ArtifactLimit: Limit{ - Interval: time.Millisecond * 5, - Burst: 25, - Max: 50, - }, - EnrollLimit: Limit{ - Interval: time.Millisecond * 10, - Burst: 100, - Max: 50, - MaxBody: 524288, - }, - AckLimit: Limit{ - Interval: time.Millisecond * 10, - Burst: 100, - Max: 50, - MaxBody: 2097152, - }, - }, - }, - Cache: Cache{ - NumCounters: defaultCacheNumCounters, - MaxCost: defaultCacheMaxCost, - }, + Type: "fleet-server", + Server: defaultServer(), + Cache: defaultCache(), Monitor: Monitor{ FetchSize: defaultFetchSize, PollTimeout: defaultPollTimeout, }, }, }, - Logging: Logging{ - Level: "info", - ToStderr: false, - ToFiles: true, - Files: nil, - }, - HTTP: HTTP{ - Host: kDefaultHTTPHost, - Port: kDefaultHTTPPort, - }, + Logging: defaultLogging(), + HTTP: defaultHTTP(), }, }, "fleet-logging": { @@ -126,205 +54,49 @@ func TestConfig(t *testing.T) { }, }, Output: Output{ - Elasticsearch: Elasticsearch{ - Protocol: "http", - Hosts: []string{"localhost:9200"}, - Username: "elastic", - Password: "changeme", - MaxRetries: 3, - MaxConnPerHost: 128, - BulkFlushInterval: 250 * time.Millisecond, - BulkFlushThresholdCount: 2048, - BulkFlushThresholdSize: 1048576, - BulkFlushMaxPending: 8, - Timeout: 90 * time.Second, - }, + Elasticsearch: defaultElastic(), }, Inputs: []Input{ { - Type: "fleet-server", - Server: Server{ - Host: kDefaultHost, - Port: kDefaultPort, - Timeouts: ServerTimeouts{ - Read: 60 * time.Second, - ReadHeader: 5 * time.Second, - Idle: 30 * time.Second, - Write: 10 * time.Minute, - CheckinTimestamp: 30 * time.Second, - CheckinLongPoll: 5 * time.Minute, - }, - Profiler: ServerProfiler{ - Enabled: false, - Bind: "localhost:6060", - }, - CompressionLevel: 1, - CompressionThresh: 1024, - Limits: ServerLimits{ - MaxHeaderByteSize: 8192, - MaxConnections: 0, - PolicyThrottle: 5 * time.Millisecond, - CheckinLimit: Limit{ - Interval: time.Millisecond, - Burst: 1000, - MaxBody: 1048576, - }, - ArtifactLimit: Limit{ - Interval: time.Millisecond * 5, - Burst: 25, - Max: 50, - }, - EnrollLimit: Limit{ - Interval: time.Millisecond * 10, - Burst: 100, - Max: 50, - MaxBody: 524288, - }, - AckLimit: Limit{ - Interval: time.Millisecond * 10, - Burst: 100, - Max: 50, - MaxBody: 2097152, - }, - }, - }, - Cache: Cache{ - NumCounters: defaultCacheNumCounters, - MaxCost: defaultCacheMaxCost, - }, + Type: "fleet-server", + Server: defaultServer(), + Cache: defaultCache(), Monitor: Monitor{ FetchSize: defaultFetchSize, PollTimeout: defaultPollTimeout, }, }, }, - Logging: Logging{ - Level: "info", - ToStderr: false, - ToFiles: true, - Files: nil, - }, - HTTP: HTTP{ - Host: kDefaultHTTPHost, - Port: kDefaultHTTPPort, - }, + Logging: defaultLogging(), + HTTP: defaultHTTP(), }, }, "input": { cfg: &Config{ - Fleet: Fleet{ - Agent: Agent{ - ID: "1e4954ce-af37-4731-9f4a-407b08e69e42", - Logging: AgentLogging{}, - }, - }, + Fleet: defaultFleet(), Output: Output{ - Elasticsearch: Elasticsearch{ - Protocol: "http", - Hosts: []string{"localhost:9200"}, - Username: "elastic", - Password: "changeme", - MaxRetries: 3, - MaxConnPerHost: 128, - BulkFlushInterval: 250 * time.Millisecond, - BulkFlushThresholdCount: 2048, - BulkFlushThresholdSize: 1048576, - BulkFlushMaxPending: 8, - Timeout: 90 * time.Second, - }, + Elasticsearch: defaultElastic(), }, Inputs: []Input{ { - Type: "fleet-server", - Server: Server{ - Host: kDefaultHost, - Port: kDefaultPort, - Timeouts: ServerTimeouts{ - Read: 60 * time.Second, - ReadHeader: 5 * time.Second, - Idle: 30 * time.Second, - Write: 10 * time.Minute, - CheckinTimestamp: 30 * time.Second, - CheckinLongPoll: 5 * time.Minute, - }, - Profiler: ServerProfiler{ - Enabled: false, - Bind: "localhost:6060", - }, - CompressionLevel: 1, - CompressionThresh: 1024, - Limits: ServerLimits{ - MaxHeaderByteSize: 8192, - MaxConnections: 0, - PolicyThrottle: 5 * time.Millisecond, - CheckinLimit: Limit{ - Interval: time.Millisecond, - Burst: 1000, - MaxBody: 1048576, - }, - ArtifactLimit: Limit{ - Interval: time.Millisecond * 5, - Burst: 25, - Max: 50, - }, - EnrollLimit: Limit{ - Interval: time.Millisecond * 10, - Burst: 100, - Max: 50, - MaxBody: 524288, - }, - AckLimit: Limit{ - Interval: time.Millisecond * 10, - Burst: 100, - Max: 50, - MaxBody: 2097152, - }, - }, - }, - Cache: Cache{ - NumCounters: defaultCacheNumCounters, - MaxCost: defaultCacheMaxCost, - }, + Type: "fleet-server", + Server: defaultServer(), + Cache: defaultCache(), Monitor: Monitor{ FetchSize: defaultFetchSize, PollTimeout: defaultPollTimeout, }, }, }, - Logging: Logging{ - Level: "info", - ToStderr: false, - ToFiles: true, - Files: nil, - }, - HTTP: HTTP{ - Host: kDefaultHTTPHost, - Port: kDefaultHTTPPort, - }, + Logging: defaultLogging(), + HTTP: defaultHTTP(), }, }, "input-config": { cfg: &Config{ - Fleet: Fleet{ - Agent: Agent{ - ID: "1e4954ce-af37-4731-9f4a-407b08e69e42", - Logging: AgentLogging{}, - }, - }, + Fleet: defaultFleet(), Output: Output{ - Elasticsearch: Elasticsearch{ - Protocol: "http", - Hosts: []string{"localhost:9200"}, - Username: "elastic", - Password: "changeme", - MaxRetries: 3, - MaxConnPerHost: 128, - BulkFlushInterval: 250 * time.Millisecond, - BulkFlushThresholdCount: 2048, - BulkFlushThresholdSize: 1048576, - BulkFlushMaxPending: 8, - Timeout: 90 * time.Second, - }, + Elasticsearch: defaultElastic(), }, Inputs: []Input{ { @@ -346,54 +118,17 @@ func TestConfig(t *testing.T) { }, CompressionLevel: 1, CompressionThresh: 1024, - Limits: ServerLimits{ - MaxHeaderByteSize: 8192, - MaxConnections: 0, - PolicyThrottle: 5 * time.Millisecond, - CheckinLimit: Limit{ - Interval: time.Millisecond, - Burst: 1000, - MaxBody: 1048576, - }, - ArtifactLimit: Limit{ - Interval: time.Millisecond * 5, - Burst: 25, - Max: 50, - }, - EnrollLimit: Limit{ - Interval: time.Millisecond * 10, - Burst: 100, - Max: 50, - MaxBody: 524288, - }, - AckLimit: Limit{ - Interval: time.Millisecond * 10, - Burst: 100, - Max: 50, - MaxBody: 2097152, - }, - }, - }, - Cache: Cache{ - NumCounters: defaultCacheNumCounters, - MaxCost: defaultCacheMaxCost, + Limits: defaultServerLimits(), }, + Cache: defaultCache(), Monitor: Monitor{ FetchSize: defaultFetchSize, PollTimeout: defaultPollTimeout, }, }, }, - Logging: Logging{ - Level: "info", - ToStderr: false, - ToFiles: true, - Files: nil, - }, - HTTP: HTTP{ - Host: kDefaultHTTPHost, - Port: kDefaultHTTPPort, - }, + Logging: defaultLogging(), + HTTP: defaultHTTP(), }, }, "bad-input": { @@ -433,3 +168,66 @@ func TestConfig(t *testing.T) { }) } } + +// Stub out the defaults so that the above is easier to maintain + +func defaultCache() Cache { + var d Cache + d.InitDefaults() + return d +} + +func defaultServerTimeouts() ServerTimeouts { + var d ServerTimeouts + d.InitDefaults() + return d +} + +func defaultServerLimits() ServerLimits { + var d ServerLimits + d.InitDefaults() + return d +} + +func defaultLogging() Logging { + var d Logging + d.InitDefaults() + return d +} + +func defaultHTTP() HTTP { + var d HTTP + d.InitDefaults() + return d +} + +func defaultFleet() Fleet { + return Fleet{ + Agent: Agent{ + ID: "1e4954ce-af37-4731-9f4a-407b08e69e42", + Logging: AgentLogging{}, + }, + } +} + +func defaultElastic() Elasticsearch { + return Elasticsearch{ + Protocol: "http", + Hosts: []string{"localhost:9200"}, + Username: "elastic", + Password: "changeme", + MaxRetries: 3, + MaxConnPerHost: 128, + BulkFlushInterval: 250 * time.Millisecond, + BulkFlushThresholdCount: 2048, + BulkFlushThresholdSize: 1048576, + BulkFlushMaxPending: 8, + Timeout: 90 * time.Second, + } +} + +func defaultServer() Server { + var d Server + d.InitDefaults() + return d +} From 21d91bce69c86b1214e072541b8ac7de9bcc1580 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Thu, 10 Jun 2021 22:44:53 +0000 Subject: [PATCH 120/240] [7.x](backport #445) Add optional jitter to long poll (#451) * Tweak server timeouts. Limit body size to defend malicious agent (cherry picked from commit 7ed2fc142c980dc416b22c05ee4129a74d453749) * Refactor bulk init (cherry picked from commit 2cc1691cf5ffe902b0e8753bbaa3bfe0aa09d363) * Add optional Jitter to long poll to help smooth out load over time. (cherry picked from commit 80f18ef84cb1bef32d1d7fde7fa0bd7093a56f07) Co-authored-by: Sean Cunningham Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- cmd/fleet/handleCheckin.go | 13 ++++++- internal/pkg/config/config_test.go | 1 + internal/pkg/config/input.go | 46 ---------------------- internal/pkg/config/timeouts.go | 61 ++++++++++++++++++++++++++++++ 4 files changed, 74 insertions(+), 47 deletions(-) create mode 100644 internal/pkg/config/timeouts.go diff --git a/cmd/fleet/handleCheckin.go b/cmd/fleet/handleCheckin.go index 1f78cc3fc..277feeb09 100644 --- a/cmd/fleet/handleCheckin.go +++ b/cmd/fleet/handleCheckin.go @@ -10,6 +10,7 @@ import ( "compress/gzip" "context" "encoding/json" + "math/rand" "net/http" "reflect" "time" @@ -108,6 +109,7 @@ func NewCheckinT( Interface("limits", cfg.Limits.CheckinLimit). Dur("long_poll_timeout", cfg.Timeouts.CheckinLongPoll). Dur("long_poll_timestamp", cfg.Timeouts.CheckinTimestamp). + Dur("long_poll_jitter", cfg.Timeouts.CheckinJitter). Msg("Checkin install limits") ct := &CheckinT{ @@ -206,8 +208,17 @@ func (ct *CheckinT) _handleCheckin(w http.ResponseWriter, r *http.Request, id st tick := time.NewTicker(ct.cfg.Timeouts.CheckinTimestamp) defer tick.Stop() + pollDuration := ct.cfg.Timeouts.CheckinLongPoll + if ct.cfg.Timeouts.CheckinJitter != 0 { + jitter := time.Duration(rand.Int63n(int64(ct.cfg.Timeouts.CheckinJitter))) + if jitter < pollDuration { + pollDuration = pollDuration - jitter + log.Trace().Str("agentId", id).Dur("poll", pollDuration).Msg("Long poll with jitter") + } + } + // Chill out for for a bit. Long poll. - longPoll := time.NewTicker(ct.cfg.Timeouts.CheckinLongPoll) + longPoll := time.NewTicker(pollDuration) defer longPoll.Stop() // Intial update on checkin, and any user fields that might have changed diff --git a/internal/pkg/config/config_test.go b/internal/pkg/config/config_test.go index 9b0605f72..35c3a233d 100644 --- a/internal/pkg/config/config_test.go +++ b/internal/pkg/config/config_test.go @@ -111,6 +111,7 @@ func TestConfig(t *testing.T) { Write: 5 * time.Second, CheckinTimestamp: 30 * time.Second, CheckinLongPoll: 5 * time.Minute, + CheckinJitter: 30 * time.Second, }, Profiler: ServerProfiler{ Enabled: false, diff --git a/internal/pkg/config/input.go b/internal/pkg/config/input.go index b7f355ecf..a9b5c284a 100644 --- a/internal/pkg/config/input.go +++ b/internal/pkg/config/input.go @@ -8,7 +8,6 @@ import ( "compress/flate" "fmt" "strings" - "time" "github.com/elastic/beats/v7/libbeat/common/transport/tlscommon" ) @@ -21,51 +20,6 @@ type Policy struct { ID string `config:"id"` } -// ServerTimeouts is the configuration for the server timeouts -type ServerTimeouts struct { - Read time.Duration `config:"read"` - Write time.Duration `config:"write"` - Idle time.Duration `config:"idle"` - ReadHeader time.Duration `config:"read_header"` - CheckinTimestamp time.Duration `config:"checkin_timestamp"` - CheckinLongPoll time.Duration `config:"checkin_long_poll"` -} - -// InitDefaults initializes the defaults for the configuration. -func (c *ServerTimeouts) InitDefaults() { - // see https://blog.gopheracademy.com/advent-2016/exposing-go-on-the-internet/ - - // The read timeout starts on ACCEPT of the connection, and includes - // the time to read the entire body (if the body is read, otherwise to the end of the headers). - // Note that for TLS, this include the TLS handshake as well. - // In most cases, we are authenticating the apikey and doing an agent record lookup - // *before* reading the body. This is purposeful to avoid streaming data from an unauthenticated - // connection. However, the downside is that if the roundtrip to Elastic is slow, we may - // end up hitting the Read timeout before actually reading any data off the socket. - // Use a large timeout to accomodate the authentication lag. Add a ReadHeader timeout - // below to handle preAuth. - c.Read = 60 * time.Second - - // Read header timeout covers ACCEPT to the end of the HTTP headers. - // Note that for TLS, this include the TLS handshake as well. - // This is considered preauth in this server, so limit the timeout to something reasonable. - c.ReadHeader = 5 * time.Second - - // IdleTimeout is the maximum amount of time to wait for the - // next request when keep-alives are enabled. Because TLS handshakes are expensive - // for the server, avoid aggressive connection close with generous idle timeout. - c.Idle = 30 * time.Second - - // The write timeout for HTTPS covers the time from ACCEPT to the end of the response write; - // so in that case it covers the TLS handshake. If the connection is reused, the write timeout - // covers the time from the end of the request header to the end of the response write. - // Set to a very large timeout to allow for slow backend; must be at least as large as Read timeout plus Long Poll. - c.Write = 10 * time.Minute - - c.CheckinTimestamp = 30 * time.Second - c.CheckinLongPoll = 5 * time.Minute -} - // ServerProfiler is the configuration for profiling the server. type ServerProfiler struct { Enabled bool `config:"enabled"` diff --git a/internal/pkg/config/timeouts.go b/internal/pkg/config/timeouts.go new file mode 100644 index 000000000..1c58382ba --- /dev/null +++ b/internal/pkg/config/timeouts.go @@ -0,0 +1,61 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package config + +import ( + "time" +) + +// ServerTimeouts is the configuration for the server timeouts +type ServerTimeouts struct { + Read time.Duration `config:"read"` + Write time.Duration `config:"write"` + Idle time.Duration `config:"idle"` + ReadHeader time.Duration `config:"read_header"` + CheckinTimestamp time.Duration `config:"checkin_timestamp"` + CheckinLongPoll time.Duration `config:"checkin_long_poll"` + CheckinJitter time.Duration `config:"checkin_jitter"` +} + +// InitDefaults initializes the defaults for the configuration. +func (c *ServerTimeouts) InitDefaults() { + // see https://blog.gopheracademy.com/advent-2016/exposing-go-on-the-internet/ + + // The read timeout starts on ACCEPT of the connection, and includes + // the time to read the entire body (if the body is read, otherwise to the end of the headers). + // Note that for TLS, this include the TLS handshake as well. + // In most cases, we are authenticating the apikey and doing an agent record lookup + // *before* reading the body. This is purposeful to avoid streaming data from an unauthenticated + // connection. However, the downside is that if the roundtrip to Elastic is slow, we may + // end up hitting the Read timeout before actually reading any data off the socket. + // Use a large timeout to accomodate the authentication lag. Add a ReadHeader timeout + // below to handle preAuth. + c.Read = 60 * time.Second + + // Read header timeout covers ACCEPT to the end of the HTTP headers. + // Note that for TLS, this include the TLS handshake as well. + // This is considered preauth in this server, so limit the timeout to something reasonable. + c.ReadHeader = 5 * time.Second + + // IdleTimeout is the maximum amount of time to wait for the + // next request when keep-alives are enabled. Because TLS handshakes are expensive + // for the server, avoid aggressive connection close with generous idle timeout. + c.Idle = 30 * time.Second + + // The write timeout for HTTPS covers the time from ACCEPT to the end of the response write; + // so in that case it covers the TLS handshake. If the connection is reused, the write timeout + // covers the time from the end of the request header to the end of the response write. + // Set to a very large timeout to allow for slow backend; must be at least as large as Read timeout plus Long Poll. + c.Write = 10 * time.Minute + + // Write out a timestamp to elastic on this timeout during long poll + c.CheckinTimestamp = 30 * time.Second + + // Long poll timeout, will be short-circuited on policy change + c.CheckinLongPoll = 5 * time.Minute + + // Jitter subtracted from c.CheckinLongPoll. Disabled if zero. + c.CheckinJitter = 30 * time.Second +} From 6af3e35a7daa583374aae1df37cbb265db510c5e Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 15 Jun 2021 11:02:59 +0000 Subject: [PATCH 121/240] [7.x](backport #346) fix: use the right Go version in the CI (#460) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix: use the right Go version in the CI (cherry picked from commit 40e6fb3a6d49defe3774eb2d95cdc7fb896492fd) * chore: replace gimme with gvm (cherry picked from commit f888d11436c4773293da64a46a9e6a9450d698c8) * chore: enforce running the tests with go version (cherry picked from commit 078b9bc3d91658bbae92577b4f577d52df34c646) # Conflicts: # Makefile * Revert "fix: use the right Go version in the CI" This reverts commit 40e6fb3a6d49defe3774eb2d95cdc7fb896492fd. (cherry picked from commit 087158971cd0c58ab3f6b7352dcf683276375c6b) * fix: use WithGoEnv properly If we call it within the BASE_DIR, it will automatically infer the .go-version reading it from the root dir (cherry picked from commit 21328bf142b9578aa8ca73b4c912c0259cf03aa1) * Update Makefile Co-authored-by: Manuel de la Peña Co-authored-by: Sean Cunningham --- .ci/Jenkinsfile | 12 ++++++------ Makefile | 4 ++-- dev-tools/common.bash | 25 ++++++++++++------------- 3 files changed, 20 insertions(+), 21 deletions(-) diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile index 1ceac6e24..860f7ff14 100644 --- a/.ci/Jenkinsfile +++ b/.ci/Jenkinsfile @@ -38,8 +38,8 @@ pipeline { stage('Check') { steps { cleanup() - withGoEnv(){ - dir("${BASE_DIR}"){ + dir("${BASE_DIR}"){ + withGoEnv(){ sh(label: 'check',script: 'make check') } } @@ -48,8 +48,8 @@ pipeline { stage('Local') { steps { cleanup() - withGoEnv(){ - dir("${BASE_DIR}"){ + dir("${BASE_DIR}"){ + withGoEnv(){ sh(label: 'local',script: 'make local') } } @@ -58,8 +58,8 @@ pipeline { stage('Test') { steps { cleanup() - withGoEnv(){ - dir("${BASE_DIR}"){ + dir("${BASE_DIR}"){ + withGoEnv(){ retryWithSleep(retries: 2, seconds: 5, backoff: true){ sh(label: "Install Docker", script: '.ci/scripts/install-docker-compose.sh') } sh(label: 'test', script: 'make test') } diff --git a/Makefile b/Makefile index 2f9c96bd2..f1022d246 100644 --- a/Makefile +++ b/Makefile @@ -86,8 +86,8 @@ check-no-changes: .PHONY: test test: prepare-test-context ## - Run all tests - @$(MAKE) test-unit - @$(MAKE) test-int + @./dev-tools/run_with_go_ver $(MAKE) test-unit + @./dev-tools/run_with_go_ver $(MAKE) test-int @$(MAKE) junit-report .PHONY: test-unit diff --git a/dev-tools/common.bash b/dev-tools/common.bash index 9f8ac1e1e..1e5aedfeb 100644 --- a/dev-tools/common.bash +++ b/dev-tools/common.bash @@ -34,18 +34,18 @@ get_go_version() { fi } -# install_gimme -# Install gimme to HOME/bin. -install_gimme() { - # Install gimme - if [ ! -f "${HOME}/bin/gimme" ]; then - mkdir -p ${HOME}/bin - curl -sL -o ${HOME}/bin/gimme https://raw.githubusercontent.com/travis-ci/gimme/v1.1.0/gimme - chmod +x ${HOME}/bin/gimme +# install_gvm +# Install gvm to /usr/local/bin. +# To read more about installing gvm in other platforms: https://github.com/andrewkroh/gvm#installation +install_gvm() { + # Install gvm + if [ ! -f "/usr/local/bin/gvm" ]; then + curl -sL -o ~/bin/gvm https://github.com/andrewkroh/gvm/releases/download/v0.3.0/gvm-linux-amd64 + chmod +x /usr/local/bin/gvm fi - GIMME="${HOME}/bin/gimme" - debug "Gimme version $(${GIMME} version)" + GVM="/usr/local/bin/gvm" + debug "Gvm version $(${GVM} --version)" } # setup_go_root "version" @@ -55,11 +55,10 @@ install_gimme() { setup_go_root() { local version=${1} - install_gimme + install_gvm # Setup GOROOT and add go to the PATH. - ${GIMME} "${version}" > /dev/null - source "${HOME}/.gimme/envs/go${version}.env" 2> /dev/null + eval "$(${GVM} ${version})" debug "$(go version)" } From 74c4038a72f48fcb0ca694a0a074d14e2f63f8fe Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 15 Jun 2021 11:41:07 +0000 Subject: [PATCH 122/240] Remove indirect build dependency on go-license-detector. (#457) (#459) * Remove indirect build dependency on go-license-detector. The 'go tidy' command in go1.16.5 stripped the hash from go.sum breaking the dependencies-report script. Explictly install the go-license-detector@v0.4.0 script and invoke directly. * Fix paths for schema generator and license as well. * Properly version URL for schema-generate. (cherry picked from commit 6f636f79898c25f55136196f6dd7f7cc86bb17be) Co-authored-by: Sean Cunningham --- Makefile | 14 +++++++++----- NOTICE.txt | 31 ------------------------------- dev-tools/dependencies-report | 4 +++- go.mod | 1 - go.sum | 2 -- main.go | 3 --- 6 files changed, 12 insertions(+), 43 deletions(-) diff --git a/Makefile b/Makefile index f1022d246..5a1fc0312 100644 --- a/Makefile +++ b/Makefile @@ -23,6 +23,9 @@ LDFLAGS=-w -s -X main.Version=${VERSION} -X main.Commit=${COMMIT} CMD_COLOR_ON=\033[32m\xE2\x9c\x93 CMD_COLOR_OFF=\033[0m +# Directory to dump build tools into +GOBIN=$(shell go env GOPATH)/bin/ + .PHONY: help help: ## - Show help message @printf "${CMD_COLOR_ON} usage: make [target]\n\n${CMD_COLOR_OFF}" @@ -42,9 +45,9 @@ clean: ## - Clean up build artifacts .PHONY: generate generate: ## - Generate schema models @printf "${CMD_COLOR_ON} Installing module for go generate\n${CMD_COLOR_OFF}" - go install github.com/aleksmaus/generate/... + env GOBIN=${GOBIN} go install github.com/aleksmaus/generate/cmd/schema-generate@latest @printf "${CMD_COLOR_ON} Running go generate\n${CMD_COLOR_OFF}" - go generate ./... + env PATH=${GOBIN}:${PATH} go generate ./... .PHONY: check check: ## - Run all checks @@ -56,8 +59,8 @@ check: ## - Run all checks .PHONY: check-headers check-headers: ## - Check copyright headers - @go install github.com/elastic/go-licenser - @go-licenser -license Elastic + @env GOBIN=${GOBIN} go install github.com/elastic/go-licenser@latest + @env PATH=${GOBIN}:${PATH} go-licenser -license Elastic .PHONY: check-go check-go: ## - Run go fmt, go vet, go mod tidy @@ -70,7 +73,8 @@ notice: ## - Generates the NOTICE.txt file. @echo "Generating NOTICE.txt" @go mod tidy @go mod download all - go list -m -json all | go run go.elastic.co/go-licence-detector \ + @env GOBIN=${GOBIN} go install go.elastic.co/go-licence-detector@latest + go list -m -json all | env PATH=${GOBIN}:${PATH} go-licence-detector \ -includeIndirect \ -rules dev-tools/notice/rules.json \ -overrides dev-tools/notice/overrides.json \ diff --git a/NOTICE.txt b/NOTICE.txt index af6cf67e6..88b483a8a 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -39,37 +39,6 @@ IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --------------------------------------------------------------------------------- -Dependency : github.com/aleksmaus/generate -Version: v0.0.0-20210326194607-c630e07a2742 -Licence type (autodetected): MIT --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/github.com/aleksmaus/generate@v0.0.0-20210326194607-c630e07a2742/LICENSE.txt: - -MIT License - -Copyright (c) 2017 Adrian Hesketh - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in all -copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - - -------------------------------------------------------------------------------- Dependency : github.com/dgraph-io/ristretto Version: v0.1.0 diff --git a/dev-tools/dependencies-report b/dev-tools/dependencies-report index a824d0f4b..9492a9d59 100755 --- a/dev-tools/dependencies-report +++ b/dev-tools/dependencies-report @@ -34,7 +34,9 @@ done go mod tidy go mod download -go list -m -json all $@ | go run go.elastic.co/go-licence-detector \ +GOPATH=`go env GOPATH` +env GOBIN=$GOPATH/bin/ go install go.elastic.co/go-licence-detector@v0.4.0 +go list -m -json all $@ | $GOPATH/bin/go-licence-detector \ -includeIndirect \ -rules "$SRCPATH/notice/rules.json" \ -overrides "$SRCPATH/notice/overrides.json" \ diff --git a/go.mod b/go.mod index 231b427f2..c5c0c899b 100644 --- a/go.mod +++ b/go.mod @@ -4,7 +4,6 @@ go 1.16 require ( github.com/Pallinder/go-randomdata v1.2.0 - github.com/aleksmaus/generate v0.0.0-20210326194607-c630e07a2742 github.com/dgraph-io/ristretto v0.1.0 github.com/elastic/beats/v7 v7.11.1 github.com/elastic/elastic-agent-client/v7 v7.0.0-20200709172729-d43b7ad5833a diff --git a/go.sum b/go.sum index c2a857491..516e38661 100644 --- a/go.sum +++ b/go.sum @@ -108,8 +108,6 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/aleksmaus/generate v0.0.0-20210326194607-c630e07a2742 h1:lDBhj+4eBCS9tNiJLXrNbvwO5xwkn2/kjvy+tO+PWlI= -github.com/aleksmaus/generate v0.0.0-20210326194607-c630e07a2742/go.mod h1:lvlu2Ij1bLmxB8RUWyw5IQ4/JcLX60eYhLiBmvImnhk= github.com/andrewkroh/goja v0.0.0-20190128172624-dd2ac4456e20 h1:7rj9qZ63knnVo2ZeepYHvHuRdG76f3tRUTdIQDzRBeI= github.com/andrewkroh/goja v0.0.0-20190128172624-dd2ac4456e20/go.mod h1:cI59GRkC2FRaFYtgbYEqMlgnnfvAwXzjojyZKXwklNg= github.com/andrewkroh/sys v0.0.0-20151128191922-287798fe3e43 h1:WFwa9pqou0Nb4DdfBOyaBTH0GqLE74Qwdf61E7ITHwQ= diff --git a/main.go b/main.go index 8b06606a5..2f9973901 100644 --- a/main.go +++ b/main.go @@ -13,9 +13,6 @@ import ( "fmt" "os" - // Needed for the generator not to be nuked by go tidy. Fails make check otherwise. - _ "github.com/aleksmaus/generate" - "github.com/elastic/fleet-server/v7/cmd/fleet" ) From 82a821e6390cc47050d4bcc6af7d30853dab5dce Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 15 Jun 2021 17:32:14 +0000 Subject: [PATCH 123/240] Policy tests occasionally fail in jenkins. Force init to be deterministic. (#464) (cherry picked from commit e6e03c03ab53cf9259d08dad63e1b204fdae9616) Co-authored-by: Sean Cunningham --- internal/pkg/policy/monitor.go | 13 +++++++++++++ internal/pkg/policy/monitor_test.go | 4 ++++ internal/pkg/policy/self.go | 14 ++++++++++++++ internal/pkg/policy/self_test.go | 16 ++++++++++++++++ 4 files changed, 47 insertions(+) diff --git a/internal/pkg/policy/monitor.go b/internal/pkg/policy/monitor.go index 42ad76634..49befe528 100644 --- a/internal/pkg/policy/monitor.go +++ b/internal/pkg/policy/monitor.go @@ -70,6 +70,8 @@ type monitorT struct { policyF policyFetcher policiesIndex string throttle time.Duration + + startCh chan struct{} } // Output returns a new policy that needs to be sent based on the current subscription. @@ -88,6 +90,7 @@ func NewMonitor(bulker bulk.Bulk, monitor monitor.Monitor, throttle time.Duratio throttle: throttle, policyF: dl.QueryLatestPolicies, policiesIndex: dl.FleetPolicies, + startCh: make(chan struct{}), } } @@ -98,6 +101,7 @@ func (m *monitorT) Run(ctx context.Context) error { s := m.monitor.Subscribe() defer m.monitor.Unsubscribe(s) + close(m.startCh) LOOP: for { select { @@ -124,6 +128,15 @@ LOOP: return nil } +func (m *monitorT) waitStart(ctx context.Context) (err error) { + select { + case <-ctx.Done(): + err = ctx.Err() + case <-m.startCh: + } + return +} + func (m *monitorT) process(ctx context.Context) error { policies, err := m.policyF(ctx, m.bulker, dl.WithIndexName(m.policiesIndex)) if err != nil { diff --git a/internal/pkg/policy/monitor_test.go b/internal/pkg/policy/monitor_test.go index 7057aaf8d..5be221c1c 100644 --- a/internal/pkg/policy/monitor_test.go +++ b/internal/pkg/policy/monitor_test.go @@ -45,6 +45,10 @@ func TestMonitor_NewPolicy(t *testing.T) { merr = monitor.Run(ctx) }() + if err := monitor.(*monitorT).waitStart(ctx); err != nil { + t.Fatal(err) + } + agentId := uuid.Must(uuid.NewV4()).String() policyId := uuid.Must(uuid.NewV4()).String() s, err := monitor.Subscribe(agentId, policyId, 0, 0) diff --git a/internal/pkg/policy/self.go b/internal/pkg/policy/self.go index ce969407f..802676133 100644 --- a/internal/pkg/policy/self.go +++ b/internal/pkg/policy/self.go @@ -55,6 +55,8 @@ type selfMonitorT struct { policiesIndex string enrollmentTokenF enrollmentTokenFetcher checkTime time.Duration + + startCh chan struct{} } // NewSelfMonitor creates the self policy monitor. @@ -74,6 +76,7 @@ func NewSelfMonitor(fleet config.Fleet, bulker bulk.Bulk, monitor monitor.Monito policiesIndex: dl.FleetPolicies, enrollmentTokenF: findEnrollmentAPIKeys, checkTime: DefaultCheckTime, + startCh: make(chan struct{}), } } @@ -90,6 +93,8 @@ func (m *selfMonitorT) Run(ctx context.Context) error { cT := time.NewTimer(m.checkTime) defer cT.Stop() + close(m.startCh) + LOOP: for { select { @@ -133,6 +138,15 @@ func (m *selfMonitorT) Status() proto.StateObserved_Status { return m.status } +func (m *selfMonitorT) waitStart(ctx context.Context) (err error) { + select { + case <-ctx.Done(): + err = ctx.Err() + case <-m.startCh: + } + return +} + func (m *selfMonitorT) process(ctx context.Context) (proto.StateObserved_Status, error) { policies, err := m.policyF(ctx, m.bulker, dl.WithIndexName(m.policiesIndex)) if err != nil { diff --git a/internal/pkg/policy/self_test.go b/internal/pkg/policy/self_test.go index 2a4dfefd2..92b07e09f 100644 --- a/internal/pkg/policy/self_test.go +++ b/internal/pkg/policy/self_test.go @@ -53,6 +53,10 @@ func TestSelfMonitor_DefaultPolicy(t *testing.T) { merr = monitor.Run(ctx) }() + if err := monitor.(*selfMonitorT).waitStart(ctx); err != nil { + t.Fatal(err) + } + // should be set to starting ftesting.Retry(t, ctx, func(ctx context.Context) error { status, msg, _ := reporter.Current() @@ -161,6 +165,10 @@ func TestSelfMonitor_DefaultPolicy_Degraded(t *testing.T) { merr = monitor.Run(ctx) }() + if err := monitor.(*selfMonitorT).waitStart(ctx); err != nil { + t.Fatal(err) + } + // should be set to starting ftesting.Retry(t, ctx, func(ctx context.Context) error { status, msg, _ := reporter.Current() @@ -312,6 +320,10 @@ func TestSelfMonitor_SpecificPolicy(t *testing.T) { merr = monitor.Run(ctx) }() + if err := monitor.(*selfMonitorT).waitStart(ctx); err != nil { + t.Fatal(err) + } + // should be set to starting ftesting.Retry(t, ctx, func(ctx context.Context) error { status, msg, _ := reporter.Current() @@ -420,6 +432,10 @@ func TestSelfMonitor_SpecificPolicy_Degraded(t *testing.T) { merr = monitor.Run(ctx) }() + if err := monitor.(*selfMonitorT).waitStart(ctx); err != nil { + t.Fatal(err) + } + // should be set to starting ftesting.Retry(t, ctx, func(ctx context.Context) error { status, msg, _ := reporter.Current() From 4ca031b883ca16dcc0cac70bf81ef7faca6e578f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Manuel=20de=20la=20Pe=C3=B1a?= Date: Fri, 18 Jun 2021 16:29:24 +0200 Subject: [PATCH 124/240] fix: resolve missed merge-conflicts --- README.md | 3 --- 1 file changed, 3 deletions(-) diff --git a/README.md b/README.md index e90642c30..9d7b57dc5 100644 --- a/README.md +++ b/README.md @@ -79,9 +79,6 @@ Replace {YOUR-IP} with the IP address of your machine. ## fleet-server repo -<<<<<<< HEAD -By default the above will download the most recent snapshot build for fleet-server. To use your own development build, run `make release` in the fleet-server repository, go to `build/distributions` and copy the `.tar.gz` and `sha512` file to the `data/elastic-agent-{hash}/downloads` inside the elastic-agent directory. Now you run with your own build of fleet-server. -======= By default the above will download the most recent snapshot build for fleet-server. To use your own development build, run `make release` in the fleet-server repository, go to `build/distributions` and copy the `.tar.gz` and `sha512` file to the `data/elastic-agent-{hash}/downloads` inside the elastic-agent directory. Now you run with your own build of fleet-server. From 2975e6f2a35eae3873129fc80b198d9cfcad26a9 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Fri, 18 Jun 2021 14:37:36 +0000 Subject: [PATCH 125/240] [7.x](backport #351) chore: separate unit tests from integration tests in CI pipeline (#462) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * fix: rename integration tests output file (cherry picked from commit cfe51b46d53e5acf4fae56aea28eda30b3162f8c) * chore: separate unit from integration tests in pipeline (cherry picked from commit ffb8bfebb507c981f8ff1260872cff34e61303a2) # Conflicts: # .ci/Jenkinsfile * fix: resolve conflicts Co-authored-by: Manuel de la Peña Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- .ci/Jenkinsfile | 23 +++++++++++++++++++++-- Makefile | 2 +- 2 files changed, 22 insertions(+), 3 deletions(-) diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile index 860f7ff14..771d61765 100644 --- a/.ci/Jenkinsfile +++ b/.ci/Jenkinsfile @@ -55,13 +55,32 @@ pipeline { } } } - stage('Test') { + stage('Unit Test') { + options { skipDefaultCheckout() } + steps { + cleanup() + dir("${BASE_DIR}"){ + withGoEnv(){ + sh(label: 'test', script: 'make test-unit') + sh(label: 'test', script: 'make junit-report') + } + } + } + post { + always { + junit(allowEmptyResults: true, keepLongStdio: true, testResults: "${BASE_DIR}/build/*.xml") + } + } + } + stage('Integration Test') { + options { skipDefaultCheckout() } steps { cleanup() dir("${BASE_DIR}"){ withGoEnv(){ retryWithSleep(retries: 2, seconds: 5, backoff: true){ sh(label: "Install Docker", script: '.ci/scripts/install-docker-compose.sh') } - sh(label: 'test', script: 'make test') + sh(label: 'test', script: 'make test-int') + sh(label: 'test', script: 'make junit-report') } } } diff --git a/Makefile b/Makefile index 5a1fc0312..36620167b 100644 --- a/Makefile +++ b/Makefile @@ -178,7 +178,7 @@ int-docker-stop: ## - Stop docker environment for integration tests .PHONY: test-int test-int: prepare-test-context ## - Run integration tests with full setup (slow!) @$(MAKE) int-docker-start - @set -o pipefail; $(MAKE) test-int-set | tee build/test-init.out + @set -o pipefail; $(MAKE) test-int-set | tee build/test-int.out @$(MAKE) int-docker-stop # Run integration tests without starting/stopping docker From 8cc004e7e3466a8bd224e076cd2bca5711937ec5 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 21 Jun 2021 01:13:20 -0400 Subject: [PATCH 126/240] [Automation] Update elastic stack version to 7.14.0-52b7d996 for testing (#471) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index f8cfa73dc..1c3996965 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.14.0-28665d9b-SNAPSHOT +ELASTICSEARCH_VERSION=7.14.0-52b7d996-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From ee44308b12a1675813137056ab1a389c005c36bc Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Tue, 22 Jun 2021 01:15:01 -0400 Subject: [PATCH 127/240] [Automation] Update elastic stack version to 7.14.0-08bc11a0 for testing (#475) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 1c3996965..ed4c8cfe7 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.14.0-52b7d996-SNAPSHOT +ELASTICSEARCH_VERSION=7.14.0-08bc11a0-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 2f48409ff1ce6f52aca8c09634dda148d653a17c Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Wed, 23 Jun 2021 00:11:35 +0000 Subject: [PATCH 128/240] Move Fleet Server specific config from elastic search section to server section. (#478) The elastic search section is an "output" section and has the side effect of being applied to all agents when changed in Kibana. The bulk section only applies to Fleet Server so should be located in the input section configuring the server. (cherry picked from commit c41c3e7c25c30f3599e42948390e6ced2b8a3157) Co-authored-by: Sean Cunningham --- example/fleet-server-100.yml | 5 +++-- internal/pkg/bulk/opt.go | 18 ++++++++++----- internal/pkg/config/config_test.go | 25 ++++++++++++--------- internal/pkg/config/input.go | 17 ++++++++++++++ internal/pkg/config/output.go | 36 ++++++++++++------------------ internal/pkg/config/output_test.go | 36 +++++++++++++----------------- 6 files changed, 77 insertions(+), 60 deletions(-) diff --git a/example/fleet-server-100.yml b/example/fleet-server-100.yml index 3a8add744..f6a1ad708 100644 --- a/example/fleet-server-100.yml +++ b/example/fleet-server-100.yml @@ -6,8 +6,6 @@ output: hosts: '${ELASTICSEARCH_HOSTS:localhost:9200}' username: '${ELASTICSEARCH_USERNAME:elastic}' password: '${ELASTICSEARCH_PASSWORD:changeme}' - bulk_flush_max_pending: 8 # Limit the number of pending ES bulk operations - bulk_flush_interval: 100ms # Flush ES bulk queues on this interval. fleet: agent: @@ -41,6 +39,9 @@ inputs: enabled: true key: /path/to/key.pem # To support TLS, server needs cert, key pair certificate: /path/to/cert.pem + bulk: + flush_max_pending: 8 # Limit the number of pending ES bulk operations + flush_interval: 100ms # Flush ES bulk queues on this interval. runtime: gc_percent: 20 # Force the GC to execute more frequently: see https://golang.org/pkg/runtime/debug/#SetGCPercent diff --git a/internal/pkg/bulk/opt.go b/internal/pkg/bulk/opt.go index 30b1f8d62..f7bbc4cd9 100644 --- a/internal/pkg/bulk/opt.go +++ b/internal/pkg/bulk/opt.go @@ -127,11 +127,19 @@ func (o *bulkOptT) MarshalZerologObject(e *zerolog.Event) { // Bridge to configuration subsystem func BulkOptsFromCfg(cfg *config.Config) []BulkOpt { + bulkCfg := cfg.Inputs[0].Server.Bulk + + // Attempt to slice the max number of connections to leave room for the bulk flush queues + maxKeyParallel := cfg.Output.Elasticsearch.MaxConnPerHost + if cfg.Output.Elasticsearch.MaxConnPerHost > bulkCfg.FlushMaxPending { + maxKeyParallel = cfg.Output.Elasticsearch.MaxConnPerHost - bulkCfg.FlushMaxPending + } + return []BulkOpt{ - WithFlushInterval(cfg.Output.Elasticsearch.BulkFlushInterval), - WithFlushThresholdCount(cfg.Output.Elasticsearch.BulkFlushThresholdCount), - WithFlushThresholdSize(cfg.Output.Elasticsearch.BulkFlushThresholdSize), - WithMaxPending(cfg.Output.Elasticsearch.BulkFlushMaxPending), - WithApiKeyMaxParallel(cfg.Output.Elasticsearch.MaxConnPerHost - cfg.Output.Elasticsearch.BulkFlushMaxPending), + WithFlushInterval(bulkCfg.FlushInterval), + WithFlushThresholdCount(bulkCfg.FlushThresholdCount), + WithFlushThresholdSize(bulkCfg.FlushThresholdSize), + WithMaxPending(bulkCfg.FlushMaxPending), + WithApiKeyMaxParallel(maxKeyParallel), } } diff --git a/internal/pkg/config/config_test.go b/internal/pkg/config/config_test.go index 35c3a233d..25862e25a 100644 --- a/internal/pkg/config/config_test.go +++ b/internal/pkg/config/config_test.go @@ -120,6 +120,7 @@ func TestConfig(t *testing.T) { CompressionLevel: 1, CompressionThresh: 1024, Limits: defaultServerLimits(), + Bulk: defaultServerBulk(), }, Cache: defaultCache(), Monitor: Monitor{ @@ -190,6 +191,12 @@ func defaultServerLimits() ServerLimits { return d } +func defaultServerBulk() ServerBulk { + var d ServerBulk + d.InitDefaults() + return d +} + func defaultLogging() Logging { var d Logging d.InitDefaults() @@ -213,17 +220,13 @@ func defaultFleet() Fleet { func defaultElastic() Elasticsearch { return Elasticsearch{ - Protocol: "http", - Hosts: []string{"localhost:9200"}, - Username: "elastic", - Password: "changeme", - MaxRetries: 3, - MaxConnPerHost: 128, - BulkFlushInterval: 250 * time.Millisecond, - BulkFlushThresholdCount: 2048, - BulkFlushThresholdSize: 1048576, - BulkFlushMaxPending: 8, - Timeout: 90 * time.Second, + Protocol: "http", + Hosts: []string{"localhost:9200"}, + Username: "elastic", + Password: "changeme", + MaxRetries: 3, + MaxConnPerHost: 128, + Timeout: 90 * time.Second, } } diff --git a/internal/pkg/config/input.go b/internal/pkg/config/input.go index a9b5c284a..3ec438177 100644 --- a/internal/pkg/config/input.go +++ b/internal/pkg/config/input.go @@ -8,6 +8,7 @@ import ( "compress/flate" "fmt" "strings" + "time" "github.com/elastic/beats/v7/libbeat/common/transport/tlscommon" ) @@ -38,6 +39,20 @@ type ServerTLS struct { Cert string `config:"cert"` } +type ServerBulk struct { + FlushInterval time.Duration `config:"flush_interval"` + FlushThresholdCount int `config:"flush_threshold_cnt"` + FlushThresholdSize int `config:"flush_threshold_size"` + FlushMaxPending int `config:"flush_max_pending"` +} + +func (c *ServerBulk) InitDefaults() { + c.FlushInterval = 250 * time.Millisecond + c.FlushThresholdCount = 2048 + c.FlushThresholdSize = 1024 * 1024 + c.FlushMaxPending = 8 +} + // Server is the configuration for the server type Server struct { Host string `config:"host"` @@ -49,6 +64,7 @@ type Server struct { CompressionThresh int `config:"compression_threshold"` Limits ServerLimits `config:"limits"` Runtime Runtime `config:"runtime"` + Bulk ServerBulk `config:"bulk"` } // InitDefaults initializes the defaults for the configuration. @@ -61,6 +77,7 @@ func (c *Server) InitDefaults() { c.Profiler.InitDefaults() c.Limits.InitDefaults() c.Runtime.InitDefaults() + c.Bulk.InitDefaults() } // BindAddress returns the binding address for the HTTP server. diff --git a/internal/pkg/config/output.go b/internal/pkg/config/output.go index 78e72ecf6..d7a566bd9 100644 --- a/internal/pkg/config/output.go +++ b/internal/pkg/config/output.go @@ -28,24 +28,20 @@ var hasScheme = regexp.MustCompile(`^([a-z][a-z0-9+\-.]*)://`) // Elasticsearch is the configuration for elasticsearch. type Elasticsearch struct { - Protocol string `config:"protocol"` - Hosts []string `config:"hosts"` - Path string `config:"path"` - Headers map[string]string `config:"headers"` - Username string `config:"username"` - Password string `config:"password"` - APIKey string `config:"api_key"` - ServiceToken string `config:"service_token"` - ProxyURL string `config:"proxy_url"` - ProxyDisable bool `config:"proxy_disable"` - TLS *tlscommon.Config `config:"ssl"` - MaxRetries int `config:"max_retries"` - MaxConnPerHost int `config:"max_conn_per_host"` - BulkFlushInterval time.Duration `config:"bulk_flush_interval"` - BulkFlushThresholdCount int `config:"bulk_flush_threshold_cnt"` - BulkFlushThresholdSize int `config:"bulk_flush_threshold_size"` - BulkFlushMaxPending int `config:"bulk_flush_max_pending"` - Timeout time.Duration `config:"timeout"` + Protocol string `config:"protocol"` + Hosts []string `config:"hosts"` + Path string `config:"path"` + Headers map[string]string `config:"headers"` + Username string `config:"username"` + Password string `config:"password"` + APIKey string `config:"api_key"` + ServiceToken string `config:"service_token"` + ProxyURL string `config:"proxy_url"` + ProxyDisable bool `config:"proxy_disable"` + TLS *tlscommon.Config `config:"ssl"` + MaxRetries int `config:"max_retries"` + MaxConnPerHost int `config:"max_conn_per_host"` + Timeout time.Duration `config:"timeout"` } // InitDefaults initializes the defaults for the configuration. @@ -55,10 +51,6 @@ func (c *Elasticsearch) InitDefaults() { c.Timeout = 90 * time.Second c.MaxRetries = 3 c.MaxConnPerHost = 128 - c.BulkFlushInterval = 250 * time.Millisecond - c.BulkFlushThresholdCount = 2048 - c.BulkFlushThresholdSize = 1024 * 1024 - c.BulkFlushMaxPending = 8 } // Validate ensures that the configuration is valid. diff --git a/internal/pkg/config/output_test.go b/internal/pkg/config/output_test.go index f4c983743..ebbed6281 100644 --- a/internal/pkg/config/output_test.go +++ b/internal/pkg/config/output_test.go @@ -27,14 +27,13 @@ func TestToESConfig(t *testing.T) { }{ "http": { cfg: Elasticsearch{ - Protocol: "http", - Hosts: []string{"localhost:9200"}, - Username: "elastic", - Password: "changeme", - MaxRetries: 3, - MaxConnPerHost: 128, - BulkFlushInterval: 250 * time.Millisecond, - Timeout: 90 * time.Second, + Protocol: "http", + Hosts: []string{"localhost:9200"}, + Username: "elastic", + Password: "changeme", + MaxRetries: 3, + MaxConnPerHost: 128, + Timeout: 90 * time.Second, }, result: elasticsearch.Config{ Addresses: []string{"http://localhost:9200"}, @@ -62,10 +61,9 @@ func TestToESConfig(t *testing.T) { Headers: map[string]string{ "X-Custom-Header": "Header-Value", }, - MaxRetries: 6, - MaxConnPerHost: 256, - BulkFlushInterval: 250 * time.Millisecond, - Timeout: 120 * time.Second, + MaxRetries: 6, + MaxConnPerHost: 256, + Timeout: 120 * time.Second, }, result: elasticsearch.Config{ Addresses: []string{"http://localhost:9200", "http://other-host:9200"}, @@ -93,10 +91,9 @@ func TestToESConfig(t *testing.T) { Headers: map[string]string{ "X-Custom-Header": "Header-Value", }, - MaxRetries: 6, - MaxConnPerHost: 256, - BulkFlushInterval: 250 * time.Millisecond, - Timeout: 120 * time.Second, + MaxRetries: 6, + MaxConnPerHost: 256, + Timeout: 120 * time.Second, TLS: &tlscommon.Config{ VerificationMode: tlscommon.VerifyNone, }, @@ -132,10 +129,9 @@ func TestToESConfig(t *testing.T) { Headers: map[string]string{ "X-Custom-Header": "Header-Value", }, - MaxRetries: 6, - MaxConnPerHost: 256, - BulkFlushInterval: 250 * time.Millisecond, - Timeout: 120 * time.Second, + MaxRetries: 6, + MaxConnPerHost: 256, + Timeout: 120 * time.Second, TLS: &tlscommon.Config{ VerificationMode: tlscommon.VerifyNone, }, From c968534afec88a22fe23ffc339e00ba13eac372f Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 23 Jun 2021 01:15:22 -0400 Subject: [PATCH 129/240] [Automation] Update elastic stack version to 7.14.0-df0371f0 for testing (#480) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index ed4c8cfe7..92363d533 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.14.0-08bc11a0-SNAPSHOT +ELASTICSEARCH_VERSION=7.14.0-df0371f0-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 332764f26a36c4df9307fc307708c3cebb671afb Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Wed, 23 Jun 2021 11:22:14 +0000 Subject: [PATCH 130/240] Automate the go version update (#465) (#482) (cherry picked from commit 50b2b3159b9d17de2b19abbd1de29f833f702514) Co-authored-by: Victor Martinez --- .ci/bump-go-release-version.sh | 22 ++++++++++++++++++++++ .ci/jobs/fleet-server.yml | 2 +- .mergify.yml | 13 ++++++++----- 3 files changed, 31 insertions(+), 6 deletions(-) create mode 100755 .ci/bump-go-release-version.sh diff --git a/.ci/bump-go-release-version.sh b/.ci/bump-go-release-version.sh new file mode 100755 index 000000000..f97e8b764 --- /dev/null +++ b/.ci/bump-go-release-version.sh @@ -0,0 +1,22 @@ +#!/usr/bin/env bash +# +# Given the Golang release version this script will bump the version. +# +# This script is executed by the automation we are putting in place +# and it requires the git add/commit commands. +# +# Parameters: +# $1 -> the Golang release version to be bumped. Mandatory. +# +set -euo pipefail +MSG="parameter missing." +GO_RELEASE_VERSION=${1:?$MSG} + +echo "Update go version ${GO_RELEASE_VERSION}" +echo "${GO_RELEASE_VERSION}" > .go-version + +git add .go-version +git diff --staged --quiet || git commit -m "[Automation] Update go release version to ${GO_RELEASE_VERSION}" +git --no-pager log -1 + +echo "You can now push and create a Pull Request" diff --git a/.ci/jobs/fleet-server.yml b/.ci/jobs/fleet-server.yml index fbb3df579..4ae16dd18 100644 --- a/.ci/jobs/fleet-server.yml +++ b/.ci/jobs/fleet-server.yml @@ -13,7 +13,7 @@ discover-pr-forks-trust: permission discover-pr-origin: merge-current discover-tags: true - head-filter-regex: '^(?!update-stack-version).*$' + head-filter-regex: '^(?!update-.*-version).*$' notification-context: 'fleet-server' repo: fleet-server repo-owner: elastic diff --git a/.mergify.yml b/.mergify.yml index 8da8ccb87..7df61f975 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -72,11 +72,14 @@ pull_request_rules: merge: method: squash strict: smart+fasttrack - - name: delete upstream branch after merging changes on dev-tools/integration/.env + - name: delete upstream branch with changes on dev-tools/integration/.env or .go-version after merging/closing it conditions: - - merged - - label=automation - - head~=^update-stack-version - - files~=^dev-tools/integration/.env$ + - or: + - merged + - closed + - and: + - label=automation + - head~=^update-.*-version + - files~=^(dev-tools/integration/.env|.go-version)$ actions: delete_head_branch: From 460816d073c6dda1ff797d3867d8510a5afcc185 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Wed, 23 Jun 2021 18:35:18 +0000 Subject: [PATCH 131/240] Add unenroller based on unenroll_timeout from policy (#472) (#483) * Add unenroller based on timeout. * Add API key invalidation, fix code review. (cherry picked from commit 35f53373d374089d633bbbd615ec6cb62f1a68d1) Co-authored-by: Blake Rouse --- Makefile | 6 +- internal/pkg/coordinator/monitor.go | 153 +++++++++++++++--- .../coordinator/monitor_integration_test.go | 143 ++++++++++++++-- internal/pkg/dl/agent.go | 48 +++++- internal/pkg/dl/agent_integration_test.go | 110 +++++++++++++ internal/pkg/dl/constants.go | 1 + internal/pkg/es/mapping.go | 6 + internal/pkg/model/schema.go | 6 + model/schema.json | 9 ++ 9 files changed, 437 insertions(+), 45 deletions(-) create mode 100644 internal/pkg/dl/agent_integration_test.go diff --git a/Makefile b/Makefile index 36620167b..4608aa41e 100644 --- a/Makefile +++ b/Makefile @@ -47,7 +47,7 @@ generate: ## - Generate schema models @printf "${CMD_COLOR_ON} Installing module for go generate\n${CMD_COLOR_OFF}" env GOBIN=${GOBIN} go install github.com/aleksmaus/generate/cmd/schema-generate@latest @printf "${CMD_COLOR_ON} Running go generate\n${CMD_COLOR_OFF}" - env PATH=${GOBIN}:${PATH} go generate ./... + env PATH="${GOBIN}:${PATH}" go generate ./... .PHONY: check check: ## - Run all checks @@ -60,7 +60,7 @@ check: ## - Run all checks .PHONY: check-headers check-headers: ## - Check copyright headers @env GOBIN=${GOBIN} go install github.com/elastic/go-licenser@latest - @env PATH=${GOBIN}:${PATH} go-licenser -license Elastic + @env PATH="${GOBIN}:${PATH}" go-licenser -license Elastic .PHONY: check-go check-go: ## - Run go fmt, go vet, go mod tidy @@ -74,7 +74,7 @@ notice: ## - Generates the NOTICE.txt file. @go mod tidy @go mod download all @env GOBIN=${GOBIN} go install go.elastic.co/go-licence-detector@latest - go list -m -json all | env PATH=${GOBIN}:${PATH} go-licence-detector \ + go list -m -json all | env PATH="${GOBIN}:${PATH}" go-licence-detector \ -includeIndirect \ -rules dev-tools/notice/rules.json \ -overrides dev-tools/notice/overrides.json \ diff --git a/internal/pkg/coordinator/monitor.go b/internal/pkg/coordinator/monitor.go index d36e3fd78..391ac429e 100644 --- a/internal/pkg/coordinator/monitor.go +++ b/internal/pkg/coordinator/monitor.go @@ -7,6 +7,7 @@ package coordinator import ( "context" "errors" + "github.com/elastic/fleet-server/v7/internal/pkg/apikey" "net" "os" "runtime" @@ -30,6 +31,9 @@ const ( defaultLeaderInterval = 30 * time.Second // become leader for at least 30 seconds defaultMetadataInterval = 5 * time.Minute // update metadata every 5 minutes defaultCoordinatorRestartDelay = 5 * time.Second // delay in restarting coordinator on failure + defaultUnenrollCheckInterval = 1 * time.Minute // perform unenroll timeout interval check + + unenrolledReasonTimeout = "timeout" // reason agent was unenrolled ) // Monitor monitors the leader election of policies and routes managed policies to the coordinator. @@ -39,9 +43,11 @@ type Monitor interface { } type policyT struct { - id string - cord Coordinator - canceller context.CancelFunc + id string + cord Coordinator + cordCanceller context.CancelFunc + unenrollTimeout time.Duration + unenrollCanceller context.CancelFunc } type monitorT struct { @@ -56,14 +62,16 @@ type monitorT struct { agentMetadata model.AgentMetadata hostMetadata model.HostMetadata - checkInterval time.Duration - leaderInterval time.Duration - metadataInterval time.Duration - coordRestartDelay time.Duration + checkInterval time.Duration + leaderInterval time.Duration + metadataInterval time.Duration + coordRestartDelay time.Duration + unenrollCheckInterval time.Duration serversIndex string policiesIndex string leadersIndex string + agentsIndex string policies map[string]policyT } @@ -71,20 +79,22 @@ type monitorT struct { // NewMonitor creates a new coordinator policy monitor. func NewMonitor(fleet config.Fleet, version string, bulker bulk.Bulk, monitor monitor.Monitor, factory Factory) Monitor { return &monitorT{ - log: log.With().Str("ctx", "policy leader manager").Logger(), - version: version, - fleet: fleet, - bulker: bulker, - monitor: monitor, - factory: factory, - checkInterval: defaultCheckInterval, - leaderInterval: defaultLeaderInterval, - metadataInterval: defaultMetadataInterval, - coordRestartDelay: defaultCoordinatorRestartDelay, - serversIndex: dl.FleetServers, - policiesIndex: dl.FleetPolicies, - leadersIndex: dl.FleetPoliciesLeader, - policies: make(map[string]policyT), + log: log.With().Str("ctx", "policy leader manager").Logger(), + version: version, + fleet: fleet, + bulker: bulker, + monitor: monitor, + factory: factory, + checkInterval: defaultCheckInterval, + leaderInterval: defaultLeaderInterval, + metadataInterval: defaultMetadataInterval, + coordRestartDelay: defaultCoordinatorRestartDelay, + unenrollCheckInterval: defaultUnenrollCheckInterval, + serversIndex: dl.FleetServers, + policiesIndex: dl.FleetPolicies, + leadersIndex: dl.FleetPoliciesLeader, + agentsIndex: dl.FleetAgents, + policies: make(map[string]policyT), } } @@ -163,6 +173,7 @@ func (m *monitorT) handlePolicies(ctx context.Context, hits []es.HitT) error { return err } } + m.rescheduleUnenroller(ctx, &p, &policy) } else { new = true } @@ -245,9 +256,9 @@ func (m *monitorT) ensureLeadership(ctx context.Context) error { if pt.cord != nil { pt.cord = nil } - if pt.canceller != nil { - pt.canceller() - pt.canceller = nil + if pt.cordCanceller != nil { + pt.cordCanceller() + pt.cordCanceller = nil } return } @@ -266,13 +277,14 @@ func (m *monitorT) ensureLeadership(ctx context.Context) error { go runCoordinator(cordCtx, cord, l, m.coordRestartDelay) go runCoordinatorOutput(cordCtx, cord, m.bulker, l, m.policiesIndex) pt.cord = cord - pt.canceller = canceller + pt.cordCanceller = canceller } else { err = pt.cord.Update(ctx, p) if err != nil { l.Err(err).Msg("failed to update coordinator") } } + m.rescheduleUnenroller(ctx, &pt, &p) }(p, pt) } for range lead { @@ -294,7 +306,7 @@ func (m *monitorT) releaseLeadership() { for _, pt := range m.policies { go func(pt policyT) { if pt.cord != nil { - pt.canceller() + pt.cordCanceller() } // uses a background context, because the context for the // monitor will be cancelled at this point in the code @@ -363,6 +375,26 @@ func (m *monitorT) getIPs() ([]string, error) { return ips, nil } +func (m *monitorT) rescheduleUnenroller(ctx context.Context, pt *policyT, p *model.Policy) { + l := m.log.With().Str(dl.FieldPolicyId, pt.id).Logger() + unenrollTimeout := time.Duration(p.UnenrollTimeout) * time.Second + if unenrollTimeout != pt.unenrollTimeout { + // unenroll timeout changed + if pt.unenrollCanceller != nil { + pt.unenrollCanceller() + pt.unenrollCanceller = nil + } + + if unenrollTimeout > 0 { + // start worker for unenrolling agents based timeout + unenrollCtx, canceller := context.WithCancel(ctx) + go runUnenroller(unenrollCtx, m.bulker, pt.id, unenrollTimeout, l, m.unenrollCheckInterval, m.agentsIndex) + pt.unenrollCanceller = canceller + } + pt.unenrollTimeout = unenrollTimeout + } +} + func runCoordinator(ctx context.Context, cord Coordinator, l zerolog.Logger, d time.Duration) { for { l.Info().Str("coordinator", cord.Name()).Msg("starting coordinator for policy") @@ -394,3 +426,72 @@ func runCoordinatorOutput(ctx context.Context, cord Coordinator, bulker bulk.Bul } } } + +func runUnenroller(ctx context.Context, bulker bulk.Bulk, policyId string, unenrollTimeout time.Duration, l zerolog.Logger, checkInterval time.Duration, agentsIndex string) { + t := time.NewTimer(checkInterval) + defer t.Stop() + for { + select { + case <-t.C: + if err := runUnenrollerWork(ctx, bulker, policyId, unenrollTimeout, l, agentsIndex); err != nil { + l.Err(err).Dur("unenroll_timeout", unenrollTimeout).Msg("failed to unenroll offline agents") + } + t.Reset(checkInterval) + case <-ctx.Done(): + return + } + } +} + +func runUnenrollerWork(ctx context.Context, bulker bulk.Bulk, policyId string, unenrollTimeout time.Duration, l zerolog.Logger, agentsIndex string) error { + agents, err := dl.FindOfflineAgents(ctx, bulker, policyId, unenrollTimeout, dl.WithIndexName(agentsIndex)) + if err != nil { + return err + } + agentIds := make([]string, len(agents)) + for i, agent := range agents { + err = unenrollAgent(ctx, bulker, &agent, agentsIndex) + if err != nil { + return err + } + agentIds[i] = agent.Id + } + if len(agentIds) > 0 { + l.Info().Strs("agents", agentIds).Msg("marked agents unenrolled due to unenroll timeout") + } + return nil +} + +func unenrollAgent(ctx context.Context, bulker bulk.Bulk, agent *model.Agent, agentsIndex string) error { + now := time.Now().UTC().Format(time.RFC3339) + fields := bulk.UpdateFields{ + dl.FieldActive: false, + dl.FieldUnenrolledAt: now, + dl.FieldUnenrolledReason: unenrolledReasonTimeout, + dl.FieldUpdatedAt: now, + } + body, err := fields.Marshal() + if err != nil { + return err + } + apiKeys := getAPIKeyIDs(agent) + if len(apiKeys) > 0 { + err = apikey.Invalidate(ctx, bulker.Client(), apiKeys...) + if err != nil { + return err + } + } + err = bulker.Update(ctx, agentsIndex, agent.Id, body, bulk.WithRefresh()) + return err +} + +func getAPIKeyIDs(agent *model.Agent) []string { + keys := make([]string, 0, 1) + if agent.AccessApiKeyId != "" { + keys = append(keys, agent.AccessApiKeyId) + } + if agent.DefaultApiKeyId != "" { + keys = append(keys, agent.DefaultApiKeyId) + } + return keys +} diff --git a/internal/pkg/coordinator/monitor_integration_test.go b/internal/pkg/coordinator/monitor_integration_test.go index d039a8abc..782e83047 100644 --- a/internal/pkg/coordinator/monitor_integration_test.go +++ b/internal/pkg/coordinator/monitor_integration_test.go @@ -9,12 +9,16 @@ package coordinator import ( "context" "encoding/json" - "sync" + "fmt" "testing" "time" "github.com/gofrs/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/sync/errgroup" + "github.com/elastic/fleet-server/v7/internal/pkg/apikey" "github.com/elastic/fleet-server/v7/internal/pkg/bulk" "github.com/elastic/fleet-server/v7/internal/pkg/config" "github.com/elastic/fleet-server/v7/internal/pkg/dl" @@ -60,22 +64,21 @@ func TestMonitorLeadership(t *testing.T) { } // start the monitors - var wg sync.WaitGroup - wg.Add(2) - go func() { + g, _ := errgroup.WithContext(context.Background()) + g.Go(func() error { err := pim.Run(ctx) - wg.Done() if err != nil && err != context.Canceled { - t.Fatal(err) + return err } - }() - go func() { + return nil + }) + g.Go(func() error { err := pm.Run(ctx) - wg.Done() if err != nil && err != context.Canceled { - t.Fatal(err) + return err } - }() + return nil + }) // wait 500ms to ensure everything is running; then create a new policy <-time.After(500 * time.Millisecond) @@ -101,13 +104,129 @@ func TestMonitorLeadership(t *testing.T) { // stop the monitors cn() - wg.Wait() + err = g.Wait() + require.NoError(t, err) // ensure leadership was released ensureLeadershipReleased(bulkCtx, t, bulker, cfg, leadersIndex, policy1Id) ensureLeadershipReleased(bulkCtx, t, bulker, cfg, leadersIndex, policy2Id) } +func TestMonitorUnenroller(t *testing.T) { + parentCtx := context.Background() + bulkCtx, bulkCn := context.WithCancel(parentCtx) + defer bulkCn() + ctx, cn := context.WithCancel(parentCtx) + defer cn() + + // flush bulker on every operation + bulker := ftesting.SetupBulk(bulkCtx, t, bulk.WithFlushThresholdCount(1)) + serversIndex := ftesting.SetupIndex(bulkCtx, t, bulker, es.MappingServer) + policiesIndex := ftesting.SetupIndex(bulkCtx, t, bulker, es.MappingPolicy) + leadersIndex := ftesting.SetupIndex(bulkCtx, t, bulker, es.MappingPolicyLeader) + agentsIndex := ftesting.SetupIndex(bulkCtx, t, bulker, es.MappingAgent) + pim, err := monitor.New(policiesIndex, bulker.Client(), bulker.Client()) + require.NoError(t, err) + cfg := makeFleetConfig() + pm := NewMonitor(cfg, "1.0.0", bulker, pim, NewCoordinatorZero) + pm.(*monitorT).serversIndex = serversIndex + pm.(*monitorT).leadersIndex = leadersIndex + pm.(*monitorT).policiesIndex = policiesIndex + pm.(*monitorT).agentsIndex = agentsIndex + pm.(*monitorT).unenrollCheckInterval = 10 * time.Millisecond // very fast check interval for test + + // add policy with unenroll timeout + policy1Id := uuid.Must(uuid.NewV4()).String() + policy1 := model.Policy{ + PolicyId: policy1Id, + CoordinatorIdx: 0, + Data: []byte("{}"), + RevisionIdx: 1, + UnenrollTimeout: 300, // 5 minutes (300 seconds) + } + _, err = dl.CreatePolicy(ctx, bulker, policy1, dl.WithIndexName(policiesIndex)) + require.NoError(t, err) + + // create apikeys that should be invalidated + agentId := uuid.Must(uuid.NewV4()).String() + accessKey, err := bulker.ApiKeyCreate( + ctx, + agentId, + "", + []byte(""), + apikey.NewMetadata(agentId, apikey.TypeAccess), + ) + require.NoError(t, err) + outputKey, err := bulker.ApiKeyCreate( + ctx, + agentId, + "", + []byte(""), + apikey.NewMetadata(agentId, apikey.TypeAccess), + ) + require.NoError(t, err) + + // add agent that should be unenrolled + sixAgo := time.Now().UTC().Add(-6 * time.Minute) + agentBody, err := json.Marshal(model.Agent{ + AccessApiKeyId: accessKey.Id, + DefaultApiKeyId: outputKey.Id, + Active: true, + EnrolledAt: sixAgo.Format(time.RFC3339), + LastCheckin: sixAgo.Format(time.RFC3339), + PolicyId: policy1Id, + UpdatedAt: sixAgo.Format(time.RFC3339), + }) + _, err = bulker.Create(ctx, agentsIndex, agentId, agentBody) + require.NoError(t, err) + + // start the monitors + g, _ := errgroup.WithContext(context.Background()) + g.Go(func() error { + err := pim.Run(ctx) + if err != nil && err != context.Canceled { + return err + } + return nil + }) + g.Go(func() error { + err := pm.Run(ctx) + if err != nil && err != context.Canceled { + return err + } + return nil + }) + + // should set the agent to not active (aka. unenrolled) + ftesting.Retry(t, ctx, func(ctx context.Context) error { + agent, err := dl.FindAgent(bulkCtx, bulker, dl.QueryAgentByID, dl.FieldId, agentId, dl.WithIndexName(agentsIndex)) + if err != nil { + return err + } + if agent.Active { + return fmt.Errorf("agent %s is still active", agentId) + } + return nil + }, ftesting.RetrySleep(100*time.Millisecond), ftesting.RetryCount(50)) + + // stop the monitors + cn() + err = g.Wait() + require.NoError(t, err) + + // check other fields now we know its marked unactive + agent, err := dl.FindAgent(bulkCtx, bulker, dl.QueryAgentByID, dl.FieldId, agentId, dl.WithIndexName(agentsIndex)) + require.NoError(t, err) + assert.NotEmpty(t, agent.UnenrolledAt) + assert.Equal(t, unenrolledReasonTimeout, agent.UnenrolledReason) + + // should error as they are now invalidated + _, err = bulker.ApiKeyAuth(bulkCtx, *accessKey) + assert.Error(t, err) + _, err = bulker.ApiKeyAuth(bulkCtx, *outputKey) + assert.Error(t, err) +} + func makeFleetConfig() config.Fleet { id := uuid.Must(uuid.NewV4()).String() return config.Fleet{ diff --git a/internal/pkg/dl/agent.go b/internal/pkg/dl/agent.go index fd802d197..7ec07cc53 100644 --- a/internal/pkg/dl/agent.go +++ b/internal/pkg/dl/agent.go @@ -6,6 +6,7 @@ package dl import ( "context" + "time" "github.com/elastic/fleet-server/v7/internal/pkg/bulk" "github.com/elastic/fleet-server/v7/internal/pkg/dsl" @@ -17,8 +18,9 @@ const ( ) var ( - QueryAgentByAssessAPIKeyID = prepareAgentFindByAccessAPIKeyID() - QueryAgentByID = prepareAgentFindByID() + QueryAgentByAssessAPIKeyID = prepareAgentFindByAccessAPIKeyID() + QueryAgentByID = prepareAgentFindByID() + QueryOfflineAgentsByPolicyID = prepareOfflineAgentsByPolicyID() ) func prepareAgentFindByID() *dsl.Tmpl { @@ -33,8 +35,22 @@ func prepareAgentFindByField(field string) *dsl.Tmpl { return prepareFindByField(field, map[string]interface{}{"version": true}) } -func FindAgent(ctx context.Context, bulker bulk.Bulk, tmpl *dsl.Tmpl, name string, v interface{}) (agent model.Agent, err error) { - res, err := SearchWithOneParam(ctx, bulker, tmpl, FleetAgents, name, v) +func prepareOfflineAgentsByPolicyID() *dsl.Tmpl { + tmpl := dsl.NewTmpl() + + root := dsl.NewRoot() + filter := root.Query().Bool().Filter() + filter.Term(FieldActive, true, nil) + filter.Term(FieldPolicyId, tmpl.Bind(FieldPolicyId), nil) + filter.Range(FieldLastCheckin, dsl.WithRangeLTE(tmpl.Bind(FieldLastCheckin))) + + tmpl.MustResolve(root) + return tmpl +} + +func FindAgent(ctx context.Context, bulker bulk.Bulk, tmpl *dsl.Tmpl, name string, v interface{}, opt ...Option) (agent model.Agent, err error) { + o := newOption(FleetAgents, opt...) + res, err := SearchWithOneParam(ctx, bulker, tmpl, o.indexName, name, v) if err != nil { return } @@ -46,3 +62,27 @@ func FindAgent(ctx context.Context, bulker bulk.Bulk, tmpl *dsl.Tmpl, name strin err = res.Hits[0].Unmarshal(&agent) return agent, err } + +func FindOfflineAgents(ctx context.Context, bulker bulk.Bulk, policyId string, unenrollTimeout time.Duration, opt ...Option) ([]model.Agent, error) { + o := newOption(FleetAgents, opt...) + past := time.Now().UTC().Add(-unenrollTimeout).Format(time.RFC3339) + res, err := Search(ctx, bulker, QueryOfflineAgentsByPolicyID, o.indexName, map[string]interface{}{ + FieldPolicyId: policyId, + FieldLastCheckin: past, + }) + if err != nil { + return nil, err + } + + if len(res.Hits) == 0 { + return nil, nil + } + + agents := make([]model.Agent, len(res.Hits)) + for i, hit := range res.Hits { + if err := hit.Unmarshal(&agents[i]); err != nil { + return nil, err + } + } + return agents, nil +} diff --git a/internal/pkg/dl/agent_integration_test.go b/internal/pkg/dl/agent_integration_test.go new file mode 100644 index 000000000..dff60492f --- /dev/null +++ b/internal/pkg/dl/agent_integration_test.go @@ -0,0 +1,110 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// +build integration + +package dl + +import ( + "context" + "encoding/json" + "testing" + "time" + + "github.com/gofrs/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/fleet-server/v7/internal/pkg/bulk" + "github.com/elastic/fleet-server/v7/internal/pkg/es" + "github.com/elastic/fleet-server/v7/internal/pkg/model" + ftesting "github.com/elastic/fleet-server/v7/internal/pkg/testing" +) + +func TestFindOfflineAgents(t *testing.T) { + ctx, cn := context.WithCancel(context.Background()) + defer cn() + + index, bulker := ftesting.SetupIndexWithBulk(ctx, t, es.MappingAgent) + + now := time.Now().UTC() + nowStr := now.Format(time.RFC3339) + + policyID := uuid.Must(uuid.NewV4()).String() + dayOld := now.Add(-24 * time.Hour).Format(time.RFC3339) + dayOldID := uuid.Must(uuid.NewV4()).String() + body, err := json.Marshal(model.Agent{ + PolicyId: policyID, + Active: true, + LastCheckin: dayOld, + LastCheckinStatus: "", + UpdatedAt: dayOld, + EnrolledAt: nowStr, + }) + require.NoError(t, err) + _, err = bulker.Create(ctx, index, dayOldID, body, bulk.WithRefresh()) + require.NoError(t, err) + + twoDayOld := now.Add(-48 * time.Hour).Format(time.RFC3339) + twoDayOldID := uuid.Must(uuid.NewV4()).String() + body, err = json.Marshal(model.Agent{ + PolicyId: policyID, + Active: true, + LastCheckin: twoDayOld, + LastCheckinStatus: "", + UpdatedAt: twoDayOld, + EnrolledAt: nowStr, + }) + require.NoError(t, err) + _, err = bulker.Create(ctx, index, twoDayOldID, body, bulk.WithRefresh()) + require.NoError(t, err) + + // not active (should not be included) + notActiveID := uuid.Must(uuid.NewV4()).String() + body, err = json.Marshal(model.Agent{ + PolicyId: policyID, + Active: false, + LastCheckin: twoDayOld, + LastCheckinStatus: "", + UpdatedAt: twoDayOld, + EnrolledAt: nowStr, + }) + require.NoError(t, err) + _, err = bulker.Create(ctx, index, notActiveID, body, bulk.WithRefresh()) + require.NoError(t, err) + + threeDayOld := now.Add(-48 * time.Hour).Format(time.RFC3339) + threeDayOldID := uuid.Must(uuid.NewV4()).String() + body, err = json.Marshal(model.Agent{ + PolicyId: policyID, + Active: true, + LastCheckin: threeDayOld, + LastCheckinStatus: "", + UpdatedAt: threeDayOld, + EnrolledAt: nowStr, + }) + require.NoError(t, err) + _, err = bulker.Create(ctx, index, threeDayOldID, body, bulk.WithRefresh()) + require.NoError(t, err) + + // add agent on a different policy; should not be returned (3 days old) + otherPolicyID := uuid.Must(uuid.NewV4()).String() + otherID := uuid.Must(uuid.NewV4()).String() + body, err = json.Marshal(model.Agent{ + PolicyId: otherPolicyID, + Active: true, + LastCheckin: threeDayOld, + LastCheckinStatus: "", + UpdatedAt: threeDayOld, + EnrolledAt: nowStr, + }) + require.NoError(t, err) + _, err = bulker.Create(ctx, index, otherID, body, bulk.WithRefresh()) + require.NoError(t, err) + + agents, err := FindOfflineAgents(ctx, bulker, policyID, 36*time.Hour, WithIndexName(index)) + require.NoError(t, err) + require.Len(t, agents, 2) + assert.EqualValues(t, []string{twoDayOldID, threeDayOldID}, []string{agents[0].Id, agents[1].Id}) +} diff --git a/internal/pkg/dl/constants.go b/internal/pkg/dl/constants.go index 80811717f..7a1ed13d0 100644 --- a/internal/pkg/dl/constants.go +++ b/internal/pkg/dl/constants.go @@ -39,6 +39,7 @@ const ( FieldDefaultApiKey = "default_api_key" FieldDefaultApiKeyId = "default_api_key_id" FieldPolicyOutputPermissionsHash = "policy_output_permissions_hash" + FieldUnenrolledReason = "unenrolled_reason" FieldActive = "active" FieldUpdatedAt = "updated_at" diff --git a/internal/pkg/es/mapping.go b/internal/pkg/es/mapping.go index ee48f060f..6cca74500 100644 --- a/internal/pkg/es/mapping.go +++ b/internal/pkg/es/mapping.go @@ -146,6 +146,9 @@ const ( "unenrolled_at": { "type": "date" }, + "unenrolled_reason": { + "type": "keyword" + }, "unenrollment_started_at": { "type": "date" }, @@ -304,6 +307,9 @@ const ( }, "@timestamp": { "type": "date" + }, + "unenroll_timeout": { + "type": "integer" } } }` diff --git a/internal/pkg/model/schema.go b/internal/pkg/model/schema.go index ee8357fea..e93e95684 100644 --- a/internal/pkg/model/schema.go +++ b/internal/pkg/model/schema.go @@ -150,6 +150,9 @@ type Agent struct { // Date/time the Elastic Agent unenrolled UnenrolledAt string `json:"unenrolled_at,omitempty"` + // Reason the Elastic Agent was unenrolled + UnenrolledReason string `json:"unenrolled_reason,omitempty"` + // Date/time the Elastic Agent unenrolled started UnenrollmentStartedAt string `json:"unenrollment_started_at,omitempty"` @@ -281,6 +284,9 @@ type Policy struct { // Date/time the policy revision was created Timestamp string `json:"@timestamp,omitempty"` + + // Timeout (seconds) that an Elastic Agent should be un-enrolled. + UnenrollTimeout int64 `json:"unenroll_timeout,omitempty"` } // PolicyLeader The current leader Fleet Server for a policy diff --git a/model/schema.json b/model/schema.json index 43a09c525..8def4eca5 100644 --- a/model/schema.json +++ b/model/schema.json @@ -293,6 +293,10 @@ "default_fleet_server": { "description": "True when this policy is the default policy to start Fleet Server", "type": "boolean" + }, + "unenroll_timeout": { + "description": "Timeout (seconds) that an Elastic Agent should be un-enrolled.", + "type": "integer" } }, "required": [ @@ -355,6 +359,11 @@ "type": "string", "format": "date-time" }, + "unenrolled_reason": { + "description": "Reason the Elastic Agent was unenrolled", + "type": "string", + "enum": ["manual", "timeout"] + }, "unenrollment_started_at": { "description": "Date/time the Elastic Agent unenrolled started", "type": "string", From a54a1f872d1aab5e70c0a618111453ce807910f7 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 24 Jun 2021 01:15:33 -0400 Subject: [PATCH 132/240] [Automation] Update elastic stack version to 7.14.0-8f9888e1 for testing (#484) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 92363d533..222883baa 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.14.0-df0371f0-SNAPSHOT +ELASTICSEARCH_VERSION=7.14.0-8f9888e1-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From a4bee6c251df8dd8bfc06ce6271995b3fff140dd Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Fri, 25 Jun 2021 01:15:38 -0400 Subject: [PATCH 133/240] [Automation] Update elastic stack version to 7.14.0-25663e85 for testing (#488) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 222883baa..aa88b91fe 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.14.0-8f9888e1-SNAPSHOT +ELASTICSEARCH_VERSION=7.14.0-25663e85-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 878020301e87ab49a2f30d86a587591eb0af2414 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 28 Jun 2021 01:18:22 -0400 Subject: [PATCH 134/240] [Automation] Update elastic stack version to 7.14.0-15b00b37 for testing (#493) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index aa88b91fe..abcb3b2e8 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.14.0-25663e85-SNAPSHOT +ELASTICSEARCH_VERSION=7.14.0-15b00b37-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From eb0200b25b765b2683f5c158b62aa7648faa732d Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Mon, 28 Jun 2021 15:43:35 +0000 Subject: [PATCH 135/240] Short-circuit long poll if the auth/setup takes too long. Add service.name to logging context to ease log lookup in cloud (#496) (cherry picked from commit f70bb81c9946be2c845b9dc97db17e0dccb78beb) Co-authored-by: Sean Cunningham --- cmd/fleet/handleCheckin.go | 107 ++++++++++++++++++--------- cmd/fleet/handleStatus.go | 2 +- cmd/fleet/main.go | 5 +- cmd/fleet/metrics.go | 4 +- cmd/fleet/server_integration_test.go | 2 +- internal/pkg/logger/ecs.go | 3 + internal/pkg/logger/logger.go | 82 ++++++++++++-------- 7 files changed, 133 insertions(+), 72 deletions(-) diff --git a/cmd/fleet/handleCheckin.go b/cmd/fleet/handleCheckin.go index 277feeb09..56718722d 100644 --- a/cmd/fleet/handleCheckin.go +++ b/cmd/fleet/handleCheckin.go @@ -52,7 +52,15 @@ func (rt Router) handleCheckin(w http.ResponseWriter, r *http.Request, ps httpro start := time.Now() id := ps.ByName("id") - err := rt.ct._handleCheckin(w, r, id, rt.bulker) + + reqId := r.Header.Get(logger.HeaderRequestID) + + zlog := log.With(). + Str("agentId", id). + Str(EcsHttpRequestId, reqId). + Logger() + + err := rt.ct._handleCheckin(zlog, w, r, id, rt.bulker) if err != nil { cntCheckin.IncError(err) @@ -128,9 +136,9 @@ func NewCheckinT( return ct } -func (ct *CheckinT) _handleCheckin(w http.ResponseWriter, r *http.Request, id string, bulker bulk.Bulk) error { +func (ct *CheckinT) _handleCheckin(zlog zerolog.Logger, w http.ResponseWriter, r *http.Request, id string, bulker bulk.Bulk) error { - reqId := r.Header.Get(logger.HeaderRequestID) + start := time.Now() limitF, err := ct.limit.Acquire() if err != nil { @@ -173,7 +181,7 @@ func (ct *CheckinT) _handleCheckin(w http.ResponseWriter, r *http.Request, id st cntCheckin.bodyIn.Add(readCounter.Count()) // Compare local_metadata content and update if different - rawMeta, err := parseMeta(agent, reqId, &req) + rawMeta, err := parseMeta(zlog, agent, &req) if err != nil { return err } @@ -184,14 +192,6 @@ func (ct *CheckinT) _handleCheckin(w http.ResponseWriter, r *http.Request, id st return err } - log.Debug(). - Str("agentId", id). - Str("reqId", reqId). - Str("status", req.Status). - Str("seqNo", seqno.String()). - Uint64("bodyCount", readCounter.Count()). - Msg("checkin start long poll") - // Subscribe to actions dispatcher aSub := ct.ad.Subscribe(agent.Id, seqno) defer ct.ad.Unsubscribe(aSub) @@ -208,14 +208,17 @@ func (ct *CheckinT) _handleCheckin(w http.ResponseWriter, r *http.Request, id st tick := time.NewTicker(ct.cfg.Timeouts.CheckinTimestamp) defer tick.Stop() - pollDuration := ct.cfg.Timeouts.CheckinLongPoll - if ct.cfg.Timeouts.CheckinJitter != 0 { - jitter := time.Duration(rand.Int63n(int64(ct.cfg.Timeouts.CheckinJitter))) - if jitter < pollDuration { - pollDuration = pollDuration - jitter - log.Trace().Str("agentId", id).Dur("poll", pollDuration).Msg("Long poll with jitter") - } - } + setupDuration := time.Since(start) + pollDuration, jitter := calcPollDuration(zlog, ct.cfg, setupDuration) + + zlog.Debug(). + Str("status", req.Status). + Str("seqNo", seqno.String()). + Dur("setupDuration", setupDuration). + Dur("jitter", jitter). + Dur("pollDuration", pollDuration). + Uint64("bodyCount", readCounter.Count()). + Msg("checkin start long poll") // Chill out for for a bit. Long poll. longPoll := time.NewTicker(pollDuration) @@ -249,14 +252,14 @@ func (ct *CheckinT) _handleCheckin(w http.ResponseWriter, r *http.Request, id st actions = append(actions, acs...) break LOOP case policy := <-sub.Output(): - actionResp, err := processPolicy(ctx, bulker, agent.Id, reqId, policy) + actionResp, err := processPolicy(ctx, zlog, bulker, agent.Id, policy) if err != nil { return errors.Wrap(err, "processPolicy") } actions = append(actions, *actionResp) break LOOP case <-longPoll.C: - log.Trace().Str(EcsHttpRequestId, reqId).Str("agentId", agent.Id).Msg("fire long poll") + zlog.Trace().Msg("fire long poll") break LOOP case <-tick.C: ct.bc.CheckIn(agent.Id, req.Status, nil, nil) @@ -270,10 +273,10 @@ func (ct *CheckinT) _handleCheckin(w http.ResponseWriter, r *http.Request, id st Actions: actions, } - return ct.writeResponse(w, r, resp) + return ct.writeResponse(zlog, w, r, resp) } -func (ct *CheckinT) writeResponse(w http.ResponseWriter, r *http.Request, resp CheckinResponse) error { +func (ct *CheckinT) writeResponse(zlog zerolog.Logger, w http.ResponseWriter, r *http.Request, resp CheckinResponse) error { payload, err := json.Marshal(&resp) if err != nil { @@ -304,7 +307,7 @@ func (ct *CheckinT) writeResponse(w http.ResponseWriter, r *http.Request, resp C cntCheckin.bodyOut.Add(wrCounter.Count()) - log.Trace(). + zlog.Trace(). Err(err). Int("lvl", compressionLevel). Int("srcSz", len(payload)). @@ -399,12 +402,10 @@ func convertActions(agentId string, actions []model.Action) ([]ActionResp, strin // - Generate and update default ApiKey if roles have changed. // - Rewrite the policy for delivery to the agent injecting the key material. // -func processPolicy(ctx context.Context, bulker bulk.Bulk, agentId, reqId string, pp *policy.ParsedPolicy) (*ActionResp, error) { +func processPolicy(ctx context.Context, zlog zerolog.Logger, bulker bulk.Bulk, agentId string, pp *policy.ParsedPolicy) (*ActionResp, error) { - zlog := log.With(). + zlog = zlog.With(). Str("ctx", "processPolicy"). - Str(EcsHttpRequestId, reqId). - Str("agentId", agentId). Str("policyId", pp.Policy.PolicyId). Logger() @@ -568,7 +569,7 @@ func findAgentByApiKeyId(ctx context.Context, bulker bulk.Bulk, id string) (*mod // parseMeta compares the agent and the request local_metadata content // and returns fields to update the agent record or nil -func parseMeta(agent *model.Agent, reqId string, req *CheckinRequest) ([]byte, error) { +func parseMeta(zlog zerolog.Logger, agent *model.Agent, req *CheckinRequest) ([]byte, error) { // Quick comparison first; compare the JSON payloads. // If the data is not consistently normalized, this short-circuit will not work. @@ -599,16 +600,12 @@ func parseMeta(agent *model.Agent, reqId string, req *CheckinRequest) ([]byte, e // Compare the deserialized meta structures and return the bytes to update if different if !reflect.DeepEqual(reqLocalMeta, agentLocalMeta) { - log.Trace(). - Str("agentId", agent.Id). - Str(EcsHttpRequestId, reqId). + zlog.Trace(). RawJSON("oldLocalMeta", agent.LocalMetadata). RawJSON("newLocalMeta", req.LocalMeta). Msg("local metadata not equal") - log.Info(). - Str("agentId", agent.Id). - Str(EcsHttpRequestId, reqId). + zlog.Info(). RawJSON("req.LocalMeta", req.LocalMeta). Msg("applying new local metadata") @@ -617,3 +614,41 @@ func parseMeta(agent *model.Agent, reqId string, req *CheckinRequest) ([]byte, e return outMeta, nil } + +func calcPollDuration(zlog zerolog.Logger, cfg *config.Server, setupDuration time.Duration) (time.Duration, time.Duration) { + + pollDuration := cfg.Timeouts.CheckinLongPoll + + // Under heavy load, elastic may take along time to authorize the api key, many seconds to minutes. + // Short circuit the long poll to take the setup delay into account. This is particularly necessary + // in cloud where the proxy will time us out after 5m20s causing unnecessary errors. + + if setupDuration >= pollDuration { + // We took so long to setup that we need to exit immediately + pollDuration = 0 + zlog.Warn(). + Dur("setupDuration", setupDuration). + Dur("pollDuration", cfg.Timeouts.CheckinLongPoll). + Msg("excessive setup duration short cicuit long poll") + + } else { + pollDuration -= setupDuration + if setupDuration > (time.Second * 10) { + zlog.Warn(). + Dur("setupDuration", setupDuration). + Dur("pollDuration", pollDuration). + Msg("checking poll duration decreased due to slow setup") + } + } + + var jitter time.Duration + if cfg.Timeouts.CheckinJitter != 0 { + jitter = time.Duration(rand.Int63n(int64(cfg.Timeouts.CheckinJitter))) + if jitter < pollDuration { + pollDuration = pollDuration - jitter + zlog.Trace().Dur("poll", pollDuration).Msg("Long poll with jitter") + } + } + + return pollDuration, jitter +} diff --git a/cmd/fleet/handleStatus.go b/cmd/fleet/handleStatus.go index dbd560ef4..ba809da6f 100644 --- a/cmd/fleet/handleStatus.go +++ b/cmd/fleet/handleStatus.go @@ -23,7 +23,7 @@ func (rt Router) handleStatus(w http.ResponseWriter, r *http.Request, _ httprout status := rt.sm.Status() resp := StatusResponse{ - Name: "fleet-server", + Name: kServiceName, Status: status.String(), } diff --git a/cmd/fleet/main.go b/cmd/fleet/main.go index 505c33727..6d6f5c00c 100644 --- a/cmd/fleet/main.go +++ b/cmd/fleet/main.go @@ -45,6 +45,7 @@ import ( ) const ( + kServiceName = "fleet-server" kAgentMode = "agent-mode" kAgentModeRestartLoopDelay = 2 * time.Second ) @@ -75,7 +76,7 @@ func makeCacheConfig(cfg *config.Config) cache.Config { } func initLogger(cfg *config.Config, version, commit string) (*logger.Logger, error) { - l, err := logger.Init(cfg) + l, err := logger.Init(cfg, kServiceName) if err != nil { return nil, err } @@ -164,7 +165,7 @@ func getRunCommand(version, commit string) func(cmd *cobra.Command, args []strin func NewCommand(version, commit string) *cobra.Command { cmd := &cobra.Command{ - Use: "fleet-server", + Use: kServiceName, Short: "Fleet Server controls a fleet of Elastic Agents", RunE: getRunCommand(version, commit), } diff --git a/cmd/fleet/metrics.go b/cmd/fleet/metrics.go index ac046c26a..43885a678 100644 --- a/cmd/fleet/metrics.go +++ b/cmd/fleet/metrics.go @@ -38,7 +38,7 @@ func (f *FleetServer) initMetrics(ctx context.Context, cfg *config.Config) (*api monitoring.NewString(registry, "version").Set(f.ver) } if registry.Get("name") == nil { - monitoring.NewString(registry, "name").Set("fleet-server") + monitoring.NewString(registry, "name").Set(kServiceName) } if !cfg.HTTP.Enabled { @@ -84,7 +84,7 @@ func (rt *routeStats) Register(registry *monitoring.Registry) { } func init() { - metrics.SetupMetrics("fleet-server") + metrics.SetupMetrics(kServiceName) registry = monitoring.Default.NewRegistry("http_server") cntHttpNew = monitoring.NewUint(registry, "tcp_open") cntHttpClose = monitoring.NewUint(registry, "tcp_close") diff --git a/cmd/fleet/server_integration_test.go b/cmd/fleet/server_integration_test.go index 2fc7072be..6294e351a 100644 --- a/cmd/fleet/server_integration_test.go +++ b/cmd/fleet/server_integration_test.go @@ -62,7 +62,7 @@ func startTestServer(ctx context.Context) (*tserver, error) { return nil, err } - logger.Init(cfg) + logger.Init(cfg, "fleet-server") port, err := ftesting.FreePort() if err != nil { diff --git a/internal/pkg/logger/ecs.go b/internal/pkg/logger/ecs.go index bd8bdf32f..0c688b0f4 100644 --- a/internal/pkg/logger/ecs.go +++ b/internal/pkg/logger/ecs.go @@ -38,4 +38,7 @@ const ( // Event EcsEventDuration = "event.duration" + + // Service + EcsServiceName = "service.name" ) diff --git a/internal/pkg/logger/logger.go b/internal/pkg/logger/logger.go index e39311b26..ec73c9c2b 100644 --- a/internal/pkg/logger/logger.go +++ b/internal/pkg/logger/logger.go @@ -33,6 +33,7 @@ type WriterSync interface { type Logger struct { cfg *config.Config sync WriterSync + name string } // Reload reloads the logger configuration. @@ -42,7 +43,7 @@ func (l *Logger) Reload(_ context.Context, cfg *config.Config) error { l.Sync() // reload the logger - logger, w, err := configure(cfg) + logger, w, err := configure(cfg, l.name) if err != nil { return err } @@ -61,13 +62,13 @@ func (l *Logger) Sync() { } // Init initializes the logger. -func Init(cfg *config.Config) (*Logger, error) { +func Init(cfg *config.Config, svcName string) (*Logger, error) { var err error once.Do(func() { var l zerolog.Logger var w WriterSync - l, w, err = configure(cfg) + l, w, err = configure(cfg, svcName) if err != nil { return } @@ -76,6 +77,7 @@ func Init(cfg *config.Config) (*Logger, error) { gLogger = &Logger{ cfg: cfg, sync: w, + name: svcName, } // override the field names for ECS @@ -118,35 +120,55 @@ func level(cfg *config.Config) zerolog.Level { return cfg.Logging.LogLevel() } -func configure(cfg *config.Config) (zerolog.Logger, WriterSync, error) { - if cfg.Logging.ToStderr { - out := io.Writer(os.Stderr) - if cfg.Logging.Pretty { - out = zerolog.ConsoleWriter{Out: os.Stderr, TimeFormat: "15:04:05.000"} - } - return log.Output(out).Level(level(cfg)), os.Stderr, nil +func configureStderrLogger(cfg *config.Config) (zerolog.Logger, WriterSync) { + + out := io.Writer(os.Stderr) + if cfg.Logging.Pretty { + out = zerolog.ConsoleWriter{Out: os.Stderr, TimeFormat: "15:04:05.000"} } - if cfg.Logging.ToFiles { - files := cfg.Logging.Files - if files == nil { - files = &config.LoggingFiles{} - files.InitDefaults() - } - filename := filepath.Join(files.Path, files.Name) - rotator, err := file.NewFileRotator(filename, - file.MaxSizeBytes(files.MaxSize), - file.MaxBackups(files.MaxBackups), - file.Permissions(os.FileMode(files.Permissions)), - file.Interval(files.Interval), - file.RotateOnStartup(files.RotateOnStartup), - file.RedirectStderr(files.RedirectStderr), - ) - if err != nil { - return zerolog.Logger{}, nil, err - } - return log.Output(rotator).Level(level(cfg)), rotator, nil + + return log.Output(out).Level(level(cfg)), os.Stderr +} + +func configureFileRotatorLogger(cfg *config.Config) (zerolog.Logger, WriterSync, error) { + + files := cfg.Logging.Files + if files == nil { + files = &config.LoggingFiles{} + files.InitDefaults() } - return log.Output(ioutil.Discard).Level(level(cfg)), &nopSync{}, nil + filename := filepath.Join(files.Path, files.Name) + rotator, err := file.NewFileRotator(filename, + file.MaxSizeBytes(files.MaxSize), + file.MaxBackups(files.MaxBackups), + file.Permissions(os.FileMode(files.Permissions)), + file.Interval(files.Interval), + file.RotateOnStartup(files.RotateOnStartup), + file.RedirectStderr(files.RedirectStderr), + ) + if err != nil { + return zerolog.Logger{}, nil, err + } + return log.Output(rotator).Level(level(cfg)), rotator, nil +} + +func configure(cfg *config.Config, svcName string) (lg zerolog.Logger, wr WriterSync, err error) { + + switch { + case cfg.Logging.ToStderr: + lg, wr = configureStderrLogger(cfg) + case cfg.Logging.ToFiles: + lg, wr, err = configureFileRotatorLogger(cfg) + default: + lg = log.Output(ioutil.Discard).Level(level(cfg)) + wr = &nopSync{} + } + + if svcName != "" { + lg = lg.With().Str(EcsServiceName, svcName).Logger() + } + + return } type nopSync struct { From 3baf0119da12be6be80f02e49c4e62fd01dc9694 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Tue, 29 Jun 2021 01:15:52 -0400 Subject: [PATCH 136/240] [Automation] Update elastic stack version to 7.14.0-b3f1839d for testing (#498) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index abcb3b2e8..c5bf36266 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.14.0-15b00b37-SNAPSHOT +ELASTICSEARCH_VERSION=7.14.0-b3f1839d-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 76523021759a09c3402c1c6ede497dd29a0845e2 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Tue, 29 Jun 2021 12:23:49 -0400 Subject: [PATCH 137/240] [Automation] Update elastic stack version to 7.14.0-c5e16e4e for testing (#501) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index c5bf36266..300d30317 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.14.0-b3f1839d-SNAPSHOT +ELASTICSEARCH_VERSION=7.14.0-c5e16e4e-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 3b16b5d75ad9edda37a8c035c9001f0b7a53fd60 Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Wed, 30 Jun 2021 15:28:05 -0400 Subject: [PATCH 138/240] Bump 7.x to 7.15. (#506) --- main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main.go b/main.go index 2f9973901..519d01a9a 100644 --- a/main.go +++ b/main.go @@ -16,7 +16,7 @@ import ( "github.com/elastic/fleet-server/v7/cmd/fleet" ) -const defaultVersion = "7.14.0" +const defaultVersion = "7.15.0" var ( Version string = defaultVersion From c95e63dc8326f35964e1288941fa981fb203bdd5 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 1 Jul 2021 01:16:20 -0400 Subject: [PATCH 139/240] [Automation] Update elastic stack version to 7.14.0-45f71c2c for testing (#508) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 300d30317..2424f3d2f 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.14.0-c5e16e4e-SNAPSHOT +ELASTICSEARCH_VERSION=7.14.0-45f71c2c-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 982aaa83dd61534628e671010b43db576995cd0b Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Thu, 1 Jul 2021 15:52:53 +0000 Subject: [PATCH 140/240] [7.x](backport #491) Streamline proxy settings with Beats/Agent (#510) * Add proxy_headers (cherry picked from commit 649e113619339d9d5521338d187949d27ba7bd58) * support proxy environment variables (cherry picked from commit 26e0d616bd872bd02c7a187f4c43ee3777c8f358) * Add tests (cherry picked from commit abf7f77dd7f4b8c342b99037d5576aa3f506d61a) Co-authored-by: urso --- internal/pkg/config/output.go | 25 +++++-- internal/pkg/config/output_test.go | 104 +++++++++++++++++++++++++++++ 2 files changed, 124 insertions(+), 5 deletions(-) diff --git a/internal/pkg/config/output.go b/internal/pkg/config/output.go index d7a566bd9..7de261a9a 100644 --- a/internal/pkg/config/output.go +++ b/internal/pkg/config/output.go @@ -38,6 +38,7 @@ type Elasticsearch struct { ServiceToken string `config:"service_token"` ProxyURL string `config:"proxy_url"` ProxyDisable bool `config:"proxy_disable"` + ProxyHeaders map[string]string `config:"proxy_headers"` TLS *tlscommon.Config `config:"ssl"` MaxRetries int `config:"max_retries"` MaxConnPerHost int `config:"max_conn_per_host"` @@ -118,12 +119,26 @@ func (c *Elasticsearch) ToESConfig(longPoll bool) (elasticsearch.Config, error) } httpTransport.TLSClientConfig = tls.ToConfig() } - if c.ProxyURL != "" && !c.ProxyDisable { - proxyUrl, err := common.ParseURL(c.ProxyURL) - if err != nil { - return elasticsearch.Config{}, err + + if !c.ProxyDisable { + if c.ProxyURL != "" { + proxyUrl, err := common.ParseURL(c.ProxyURL) + if err != nil { + return elasticsearch.Config{}, err + } + httpTransport.Proxy = http.ProxyURL(proxyUrl) + } else { + httpTransport.Proxy = http.ProxyFromEnvironment + } + + var proxyHeaders http.Header + if len(c.ProxyHeaders) > 0 { + proxyHeaders = make(http.Header, len(c.ProxyHeaders)) + for k, v := range c.ProxyHeaders { + proxyHeaders.Add(k, v) + } } - httpTransport.Proxy = http.ProxyURL(proxyUrl) + httpTransport.ProxyConnectHeader = proxyHeaders } h := http.Header{} diff --git a/internal/pkg/config/output_test.go b/internal/pkg/config/output_test.go index ebbed6281..013edc01a 100644 --- a/internal/pkg/config/output_test.go +++ b/internal/pkg/config/output_test.go @@ -9,6 +9,7 @@ package config import ( "crypto/tls" "net/http" + "os" "testing" "time" @@ -169,6 +170,10 @@ func TestToESConfig(t *testing.T) { t.Run(name, func(t *testing.T) { res, err := test.cfg.ToESConfig(false) require.NoError(t, err) + + // cmp.Diff can't handle function pointers. + res.Transport.(*http.Transport).Proxy = nil + test.result.Header.Set("X-elastic-product-origin", "fleet") if !assert.True(t, cmp.Equal(test.result, res, copts...)) { diff := cmp.Diff(test.result, res, copts...) @@ -179,3 +184,102 @@ func TestToESConfig(t *testing.T) { }) } } + +func TestESProxyConfig(t *testing.T) { + testcases := map[string]struct { + cfg Elasticsearch + url string + want string + headers map[string]string + env map[string]string + }{ + "no proxy": { + cfg: Elasticsearch{ProxyDisable: true}, + }, + "proxy url set": { + cfg: Elasticsearch{ + ProxyURL: "http://proxy.com", + }, + url: "http://test.com", + want: "http://proxy.com", + }, + "with headers": { + cfg: Elasticsearch{ + ProxyURL: "http://proxy.com", + ProxyHeaders: map[string]string{ + "TestProxyHeader": "Custom Value", + }, + }, + url: "http://test.com", + want: "http://proxy.com", + headers: map[string]string{ + "TestProxyHeader": "Custom Value", + }, + }, + "proxy from env by default": { + cfg: Elasticsearch{}, + url: "http://test.com", + want: "http://proxy.com", + env: map[string]string{ + "HTTP_PROXY": "http://proxy.com", + }, + }, + } + + for name, test := range testcases { + t.Run(name, func(t *testing.T) { + setTestEnv(t, test.env) + + res, err := test.cfg.ToESConfig(false) + require.NoError(t, err) + + transport := res.Transport.(*http.Transport) + if test.want == "" { + require.Nil(t, transport.Proxy) + return + } + require.NotNil(t, transport.Proxy) + + req, err := http.NewRequest("GET", test.url, nil) + require.NoError(t, err) + + got, err := transport.Proxy(req) + require.NoError(t, err) + + if len(test.headers) == 0 { + require.Len(t, transport.ProxyConnectHeader, 0) + } else { + headers := http.Header{} + for k, v := range test.headers { + headers.Add(k, v) + } + require.Equal(t, headers, transport.ProxyConnectHeader) + } + + require.Equal(t, test.want, got.String()) + }) + } +} + +func setTestEnv(t *testing.T, env map[string]string) { + var oldEnv map[string]string + for k := range env { + if v := os.Getenv(k); v != "" { + oldEnv[k] = v + } + } + + t.Cleanup(func() { + for k := range env { + if v := oldEnv[k]; v != v { + os.Setenv(k, v) + } else { + os.Unsetenv(k) + } + } + }) + + for k, v := range env { + os.Setenv(k, v) + } +} From c4020a4164f05e38c8871dc63d03ba129095a3ce Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Fri, 2 Jul 2021 02:12:37 +0000 Subject: [PATCH 141/240] Enable http2 in Fleet Server. (#511) (#512) (cherry picked from commit 8f67e5a1f78b9f9ee305062576ebf77b95e6ca0e) Co-authored-by: Sean Cunningham --- cmd/fleet/server.go | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/cmd/fleet/server.go b/cmd/fleet/server.go index bc2ee3de1..22a596f42 100644 --- a/cmd/fleet/server.go +++ b/cmd/fleet/server.go @@ -88,20 +88,33 @@ func runServer(ctx context.Context, router *httprouter.Router, cfg *config.Serve return err } - defer ln.Close() + // Bind the deferred Close() to the stack variable to handle case where 'ln' is wrapped + defer func() { ln.Close() }() + + // Conn Limiter must be before the TLS handshake in the stack; + // The server should not eat the cost of the handshake if there + // is no capacity to service the connection. + // Also, it appears the HTTP2 implementation depends on the tls.Listener + // being at the top of the stack. + ln = wrapConnLimitter(ctx, ln, cfg) if cfg.TLS != nil && cfg.TLS.IsEnabled() { - tlsCfg, err := tlscommon.LoadTLSConfig(cfg.TLS) + commonTlsCfg, err := tlscommon.LoadTLSConfig(cfg.TLS) if err != nil { return err } - server.TLSConfig = tlsCfg.ToConfig() + server.TLSConfig = commonTlsCfg.ToConfig() + + // Must enable http/2 in the configuration explicitly. + // (see https://golang.org/pkg/net/http/#Server.Serve) + server.TLSConfig.NextProtos = []string{"h2", "http/1.1"} + ln = tls.NewListener(ln, server.TLSConfig) + } else { log.Warn().Msg("exposed over insecure HTTP; enablement of TLS is strongly recommended") } - ln = wrapConnLimitter(ctx, ln, cfg) if err := server.Serve(ln); err != nil && err != http.ErrServerClosed { return err } From 1cb3251b500ed14ac3504fdd72578bfc9a4b1348 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Wed, 7 Jul 2021 16:48:49 +0000 Subject: [PATCH 142/240] Policy changes can be dropped during policy rollouts to large number of agents. Aggregates changes to avoid missing updates. (#525) (cherry picked from commit 1fc4b1f758c68858071568267f567c952ffe189e) Co-authored-by: Sean Cunningham --- cmd/fleet/server.go | 3 +- internal/pkg/monitor/subscription_monitor.go | 9 ++-- internal/pkg/policy/monitor.go | 44 +++++++++++++++++++- 3 files changed, 49 insertions(+), 7 deletions(-) diff --git a/cmd/fleet/server.go b/cmd/fleet/server.go index 22a596f42..bb9960889 100644 --- a/cmd/fleet/server.go +++ b/cmd/fleet/server.go @@ -12,6 +12,7 @@ import ( "net/http" "github.com/elastic/fleet-server/v7/internal/pkg/config" + "github.com/elastic/fleet-server/v7/internal/pkg/logger" "github.com/elastic/beats/v7/libbeat/common/transport/tlscommon" "github.com/julienschmidt/httprouter" @@ -142,7 +143,7 @@ type stubLogger struct { } func (s *stubLogger) Write(p []byte) (n int, err error) { - log.Error().Bytes("msg", p).Send() + log.Error().Bytes(logger.EcsMessage, p).Send() return len(p), nil } diff --git a/internal/pkg/monitor/subscription_monitor.go b/internal/pkg/monitor/subscription_monitor.go index ec5c71821..02e43c4d8 100644 --- a/internal/pkg/monitor/subscription_monitor.go +++ b/internal/pkg/monitor/subscription_monitor.go @@ -145,10 +145,11 @@ func (m *monitorT) notify(ctx context.Context, hits []es.HitT) { select { case s.c <- hits: case <-lc.Done(): - err := ctx.Err() - if err == context.DeadlineExceeded { - log.Err(err).Str("ctx", "subscription monitor").Dur("timeout", m.subTimeout).Msg("dropped notification") - } + log.Error(). + Err(lc.Err()). + Str("ctx", "subscription monitor"). + Dur("timeout", m.subTimeout). + Msg("dropped notification") } }(s) } diff --git a/internal/pkg/policy/monitor.go b/internal/pkg/policy/monitor.go index 49befe528..598acc849 100644 --- a/internal/pkg/policy/monitor.go +++ b/internal/pkg/policy/monitor.go @@ -101,6 +101,13 @@ func (m *monitorT) Run(ctx context.Context) error { s := m.monitor.Subscribe() defer m.monitor.Unsubscribe(s) + // The output from m.monitor times out if we don't pull data off quickly enough. + // Rollout can take a while; append upates here until we can attend to it. + // TODO: This is a workaround for 7.14. Rollout strategy will be reconsidered for 7.15. + subCtx, cfunc := context.WithCancel(ctx) + defer cfunc() + outputCh := m.monitorOutputChannel(subCtx, s.Output()) + close(m.startCh) LOOP: for { @@ -111,7 +118,7 @@ LOOP: if err := m.process(ctx); err != nil { return err } - case hits := <-s.Output(): + case hits := <-outputCh: policies := make([]model.Policy, len(hits)) for i, hit := range hits { err := hit.Unmarshal(&policies[i]) @@ -128,6 +135,35 @@ LOOP: return nil } +// Aggegates changes from the output channel until the main loop can process. +func (m *monitorT) monitorOutputChannel(ctx context.Context, outputCh <-chan []es.HitT) chan []es.HitT { + localOutputCh := make(chan []es.HitT) + + go func() { + + var hits []es.HitT + var outCh chan []es.HitT + + for { + select { + case <-ctx.Done(): + m.log.Info().Msg("Exit policy monitor local") + return + case nHits := <-outputCh: + m.log.Info().Int("nHits", len(nHits)).Msg("Received hits on local monitor") + hits = append(hits, nHits...) + outCh = localOutputCh + case outCh <- hits: + m.log.Info().Int("nHits", len(hits)).Msg("Hits dispatched to main loop") + outCh = nil + hits = nil + } + } + }() + + return localOutputCh +} + func (m *monitorT) waitStart(ctx context.Context) (err error) { select { case <-ctx.Done(): @@ -189,7 +225,11 @@ func (m *monitorT) groupByLatest(policies []model.Policy) map[string]model.Polic } func (m *monitorT) rollout(ctx context.Context, policy model.Policy) error { - zlog := m.log.With().Str("policyId", policy.PolicyId).Logger() + zlog := m.log.With(). + Str("policyId", policy.PolicyId). + Int64("revisionIdx", policy.RevisionIdx). + Int64("coordinatorIdx", policy.CoordinatorIdx). + Logger() pp, err := NewParsedPolicy(policy) if err != nil { From 310c31bc50af81bf0956bac67dc43b033aa901cf Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Wed, 7 Jul 2021 21:21:40 +0000 Subject: [PATCH 143/240] Modify hard connection limiter to Close connection on max instead of block. (#528) (cherry picked from commit de8580ff2012372baf70545e4e4bbe9d8ca9b7c7) Co-authored-by: Sean Cunningham --- NOTICE.txt | 74 +++++++++++++------------- cmd/fleet/server.go | 4 +- go.mod | 1 - internal/pkg/limit/listener.go | 94 ++++++++++++++++++++++++++++++++++ internal/pkg/logger/ecs.go | 3 ++ 5 files changed, 136 insertions(+), 40 deletions(-) create mode 100644 internal/pkg/limit/listener.go diff --git a/NOTICE.txt b/NOTICE.txt index 88b483a8a..101441a89 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -2504,43 +2504,6 @@ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. --------------------------------------------------------------------------------- -Dependency : golang.org/x/net -Version: v0.0.0-20200822124328-c89045814202 -Licence type (autodetected): BSD-3-Clause --------------------------------------------------------------------------------- - -Contents of probable licence file $GOMODCACHE/golang.org/x/net@v0.0.0-20200822124328-c89045814202/LICENSE: - -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - -------------------------------------------------------------------------------- Dependency : golang.org/x/sync Version: v0.0.0-20200625203802-6e8e738ad208 @@ -35515,6 +35478,43 @@ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +-------------------------------------------------------------------------------- +Dependency : golang.org/x/net +Version: v0.0.0-20200822124328-c89045814202 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/golang.org/x/net@v0.0.0-20200822124328-c89045814202/LICENSE: + +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + -------------------------------------------------------------------------------- Dependency : golang.org/x/oauth2 Version: v0.0.0-20200107190931-bf48bf16ab8d diff --git a/cmd/fleet/server.go b/cmd/fleet/server.go index bb9960889..025892acd 100644 --- a/cmd/fleet/server.go +++ b/cmd/fleet/server.go @@ -12,12 +12,12 @@ import ( "net/http" "github.com/elastic/fleet-server/v7/internal/pkg/config" + "github.com/elastic/fleet-server/v7/internal/pkg/limit" "github.com/elastic/fleet-server/v7/internal/pkg/logger" "github.com/elastic/beats/v7/libbeat/common/transport/tlscommon" "github.com/julienschmidt/httprouter" "github.com/rs/zerolog/log" - "golang.org/x/net/netutil" ) func diagConn(c net.Conn, s http.ConnState) { @@ -131,7 +131,7 @@ func wrapConnLimitter(ctx context.Context, ln net.Listener, cfg *config.Server) Int("hardConnLimit", hardLimit). Msg("server hard connection limiter installed") - ln = netutil.LimitListener(ln, hardLimit) + ln = limit.Listener(ln, hardLimit) } else { log.Info().Msg("server hard connection limiter disabled") } diff --git a/go.mod b/go.mod index c5c0c899b..7b12f8f3f 100644 --- a/go.mod +++ b/go.mod @@ -23,7 +23,6 @@ require ( github.com/spf13/cobra v0.0.5 github.com/stretchr/testify v1.6.1 go.uber.org/zap v1.14.0 - golang.org/x/net v0.0.0-20200822124328-c89045814202 golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208 golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e ) diff --git a/internal/pkg/limit/listener.go b/internal/pkg/limit/listener.go new file mode 100644 index 000000000..bd5fe987f --- /dev/null +++ b/internal/pkg/limit/listener.go @@ -0,0 +1,94 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package limit + +import ( + "github.com/elastic/fleet-server/v7/internal/pkg/logger" + + "github.com/rs/zerolog/log" + "net" + "sync" +) + +// Derived from netutil.LimitListener but works slightly differently. +// Instead of blocking on the semaphore before acception connection, +// this implementation immediately accepts connections and if cannot +// acquire the semaphore it forces the connection closed. +// Ideally, this limiter is run *before* the TLS handshake occurs +// to prevent DDOS attack that eats all the server's CPU. +// The downside to this is that it will Close() valid connections +// indiscriminately. + +func Listener(l net.Listener, n int) net.Listener { + return &limitListener{ + Listener: l, + sem: make(chan struct{}, n), + done: make(chan struct{}), + } +} + +type limitListener struct { + net.Listener + sem chan struct{} + closeOnce sync.Once // ensures the done chan is only closed once + done chan struct{} // no values sent; closed when Close is called +} + +func (l *limitListener) acquire() bool { + select { + case <-l.done: + return false + case l.sem <- struct{}{}: + return true + default: + return false + } +} +func (l *limitListener) release() { <-l.sem } + +func (l *limitListener) Accept() (net.Conn, error) { + + // Accept the connection irregardless + c, err := l.Listener.Accept() + if err != nil { + return nil, err + } + + // If we cannot acquire the semaphore, close the connection + if acquired := l.acquire(); !acquired { + zlog := log.Warn() + + var err error + if c != nil { + err = c.Close() + zlog.Str(logger.EcsServerAddress, c.LocalAddr().String()) + zlog.Str(logger.EcsClientAddress, c.RemoteAddr().String()) + zlog.Err(err) + } + zlog.Int("max", cap(l.sem)).Msg("Connection closed due to max limit") + + return c, nil + } + + return &limitListenerConn{Conn: c, release: l.release}, nil +} + +func (l *limitListener) Close() error { + err := l.Listener.Close() + l.closeOnce.Do(func() { close(l.done) }) + return err +} + +type limitListenerConn struct { + net.Conn + releaseOnce sync.Once + release func() +} + +func (l *limitListenerConn) Close() error { + err := l.Conn.Close() + l.releaseOnce.Do(l.release) + return err +} diff --git a/internal/pkg/logger/ecs.go b/internal/pkg/logger/ecs.go index 0c688b0f4..f52172e24 100644 --- a/internal/pkg/logger/ecs.go +++ b/internal/pkg/logger/ecs.go @@ -33,6 +33,9 @@ const ( EcsClientIp = "client.ip" EcsClientPort = "client.port" + // Server + EcsServerAddress = "server.address" + // TLS EcsTlsEstablished = "tls.established" From 03ef494f5d320ca6594b02faca76b2eb82be3748 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 15 Jul 2021 01:15:36 -0400 Subject: [PATCH 144/240] [Automation] Update elastic stack version to 7.15.0-588e7872 for testing (#557) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 2424f3d2f..066425a77 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.14.0-45f71c2c-SNAPSHOT +ELASTICSEARCH_VERSION=7.15.0-588e7872-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 2522efe05ac0e947c454b9774973bde60affd3d6 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Fri, 16 Jul 2021 01:15:07 -0400 Subject: [PATCH 145/240] [Automation] Update elastic stack version to 7.15.0-f74484b5 for testing (#561) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 066425a77..039c8724e 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.15.0-588e7872-SNAPSHOT +ELASTICSEARCH_VERSION=7.15.0-f74484b5-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 892fe793386380c7a1d18a31d78d4eb36caadb94 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Sat, 17 Jul 2021 13:55:18 +0000 Subject: [PATCH 146/240] refactor rollout to be more deterministic (#564) (cherry picked from commit 2b01b56cb1b7b155452d432487a85d1d67528670) Co-authored-by: Sean Cunningham --- internal/pkg/policy/monitor.go | 452 +++++++++++++++++--------------- internal/pkg/policy/sub.go | 117 +++++++++ internal/pkg/policy/sub_test.go | 241 +++++++++++++++++ 3 files changed, 601 insertions(+), 209 deletions(-) create mode 100644 internal/pkg/policy/sub.go create mode 100644 internal/pkg/policy/sub_test.go diff --git a/internal/pkg/policy/monitor.go b/internal/pkg/policy/monitor.go index 598acc849..0dfd9076c 100644 --- a/internal/pkg/policy/monitor.go +++ b/internal/pkg/policy/monitor.go @@ -7,9 +7,7 @@ package policy import ( "context" "errors" - "fmt" "sync" - "sync/atomic" "time" "github.com/rs/zerolog" @@ -22,7 +20,30 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/monitor" ) -var gCounter uint64 +const cloudPolicyId = "policy-elastic-agent-on-cloud" + +/* +Design should have the following properites + +Policy rollout scheduling should... +1) be fair; delivered in first come first server order. +2) be throttled to avoid uncontrolled impact on resources, particularly CPU. +3) adapt to subscribers that drop offline. +4) attempt to deliver the latest policy to each subscriber at the time of delivery. +5) prioritize delivery to agents that supervise fleet-servers. + +This implementation addresses the above issues by queuing subscription requests per +policy, and moving requests to the pending queue when the requirement is met; ie. +the policy is updateable. + +If the subscription is unsubscribed (ie. the agent drops offline), this implementation +will remove the subscription request from its current location in either the waiting +queue on the policy or the pending queue. + +Ordering is achieved with a simple double linked list implementation that allows object +migration across queues, and O(1) unlink without knowledge about which queue the subscription +is in. +*/ type Subscription interface { // Output returns a new policy that needs to be sent based on the current subscription. @@ -42,19 +63,9 @@ type Monitor interface { type policyFetcher func(ctx context.Context, bulker bulk.Bulk, opt ...dl.Option) ([]model.Policy, error) -type subT struct { - idx uint64 - - policyId string - revIdx int64 - coordIdx int64 - - c chan *ParsedPolicy -} - type policyT struct { pp ParsedPolicy - subs map[uint64]subT // map sub counter to channel + head *subT } type monitorT struct { @@ -65,7 +76,10 @@ type monitorT struct { monitor monitor.Monitor kickCh chan struct{} + deployCh chan struct{} + policies map[string]policyT + pendingQ *subT policyF policyFetcher policiesIndex string @@ -74,11 +88,6 @@ type monitorT struct { startCh chan struct{} } -// Output returns a new policy that needs to be sent based on the current subscription. -func (s *subT) Output() <-chan *ParsedPolicy { - return s.c -} - // NewMonitor creates the policy monitor for subscribing agents. func NewMonitor(bulker bulk.Bulk, monitor monitor.Monitor, throttle time.Duration) Monitor { return &monitorT{ @@ -86,7 +95,9 @@ func NewMonitor(bulker bulk.Bulk, monitor monitor.Monitor, throttle time.Duratio bulker: bulker, monitor: monitor, kickCh: make(chan struct{}, 1), + deployCh: make(chan struct{}, 1), policies: make(map[string]policyT), + pendingQ: makeHead(), throttle: throttle, policyF: dl.QueryLatestPolicies, policiesIndex: dl.FleetPolicies, @@ -96,72 +107,90 @@ func NewMonitor(bulker bulk.Bulk, monitor monitor.Monitor, throttle time.Duratio // Run runs the monitor. func (m *monitorT) Run(ctx context.Context) error { - m.log.Info().Dur("throttle", m.throttle).Msg("run policy monitor") + m.log.Info(). + Dur("throttle", m.throttle). + Msg("run policy monitor") s := m.monitor.Subscribe() defer m.monitor.Unsubscribe(s) - // The output from m.monitor times out if we don't pull data off quickly enough. - // Rollout can take a while; append upates here until we can attend to it. - // TODO: This is a workaround for 7.14. Rollout strategy will be reconsidered for 7.15. - subCtx, cfunc := context.WithCancel(ctx) - defer cfunc() - outputCh := m.monitorOutputChannel(subCtx, s.Output()) + // If no throttle set, setup a minimal spin rate. + dur := m.throttle + if dur == 0 { + dur = time.Nanosecond + } + + isDeploying := true + ticker := time.NewTicker(dur) + + startDeploy := func() { + if !isDeploying { + isDeploying = true + ticker = time.NewTicker(dur) + } + } + + stopDeploy := func() { + ticker.Stop() + isDeploying = false + } + + // begin in stopped state + stopDeploy() + + // stop timer on exit + defer stopDeploy() close(m.startCh) + LOOP: for { select { - case <-ctx.Done(): - break LOOP case <-m.kickCh: - if err := m.process(ctx); err != nil { + if err := m.loadPolicies(ctx); err != nil { return err } - case hits := <-outputCh: - policies := make([]model.Policy, len(hits)) - for i, hit := range hits { - err := hit.Unmarshal(&policies[i]) - if err != nil { - return err - } - } - if err := m.processPolicies(ctx, policies); err != nil { + startDeploy() + case <-m.deployCh: + startDeploy() + case hits := <-s.Output(): + if err := m.processHits(ctx, hits); err != nil { return err } + startDeploy() + case <-ticker.C: + if done := m.dispatchPending(); done { + stopDeploy() + } + case <-ctx.Done(): + break LOOP } } return nil } -// Aggegates changes from the output channel until the main loop can process. -func (m *monitorT) monitorOutputChannel(ctx context.Context, outputCh <-chan []es.HitT) chan []es.HitT { - localOutputCh := make(chan []es.HitT) - - go func() { - - var hits []es.HitT - var outCh chan []es.HitT - - for { - select { - case <-ctx.Done(): - m.log.Info().Msg("Exit policy monitor local") - return - case nHits := <-outputCh: - m.log.Info().Int("nHits", len(nHits)).Msg("Received hits on local monitor") - hits = append(hits, nHits...) - outCh = localOutputCh - case outCh <- hits: - m.log.Info().Int("nHits", len(hits)).Msg("Hits dispatched to main loop") - outCh = nil - hits = nil - } +func unmarshalHits(hits []es.HitT) ([]model.Policy, error) { + + policies := make([]model.Policy, len(hits)) + for i, hit := range hits { + err := hit.Unmarshal(&policies[i]) + if err != nil { + return nil, err } - }() + } + + return policies, nil +} + +func (m *monitorT) processHits(ctx context.Context, hits []es.HitT) error { + policies, err := unmarshalHits(hits) + if err != nil { + m.log.Error().Err(err).Msg("fail unmarshal hits") + return err + } - return localOutputCh + return m.processPolicies(ctx, policies) } func (m *monitorT) waitStart(ctx context.Context) (err error) { @@ -173,11 +202,53 @@ func (m *monitorT) waitStart(ctx context.Context) (err error) { return } -func (m *monitorT) process(ctx context.Context) error { +func (m *monitorT) dispatchPending() bool { + m.mut.Lock() + defer m.mut.Unlock() + + s := m.pendingQ.popFront() + if s == nil { + return true + } + + done := m.pendingQ.isEmpty() + + // Lookup the latest policy for this subscription + policy, ok := m.policies[s.policyId] + if !ok { + m.log.Warn(). + Str(dl.FieldPolicyId, s.policyId). + Msg("logic error: policy missing on dispatch") + return done + } + + select { + case s.ch <- &policy.pp: + m.log.Debug(). + Str("agent_id", s.agentId). + Str(dl.FieldPolicyId, s.policyId). + Int64("rev", s.revIdx). + Int64("coord", s.coordIdx). + Msg("dispatch") + default: + // Should never block on a channel; we created a channel of size one. + // A block here indicates a logic error somewheres. + m.log.Error(). + Str(dl.FieldPolicyId, s.policyId). + Str("agent_id", s.agentId). + Msg("logic error: should never block on policy channel") + } + + return done +} + +func (m *monitorT) loadPolicies(ctx context.Context) error { policies, err := m.policyF(ctx, m.bulker, dl.WithIndexName(m.policiesIndex)) if err != nil { if errors.Is(err, es.ErrIndexNotFound) { - m.log.Debug().Str("index", m.policiesIndex).Msg(es.ErrIndexNotFound.Error()) + m.log.Debug(). + Str("index", m.policiesIndex). + Msg(es.ErrIndexNotFound.Error()) return nil } return err @@ -191,17 +262,17 @@ func (m *monitorT) process(ctx context.Context) error { func (m *monitorT) processPolicies(ctx context.Context, policies []model.Policy) error { if len(policies) == 0 { - // nothing to do return nil } + latest := m.groupByLatest(policies) for _, policy := range latest { - if err := m.rollout(ctx, policy); err != nil { - if err == context.Canceled { - return err - } - return fmt.Errorf("failed rolling out policy %s: %w", policy.PolicyId, err) + pp, err := NewParsedPolicy(policy) + if err != nil { + return err } + + m.updatePolicy(pp) } return nil } @@ -224,131 +295,93 @@ func (m *monitorT) groupByLatest(policies []model.Policy) map[string]model.Polic return latest } -func (m *monitorT) rollout(ctx context.Context, policy model.Policy) error { +func (m *monitorT) updatePolicy(pp *ParsedPolicy) bool { + newPolicy := pp.Policy + zlog := m.log.With(). - Str("policyId", policy.PolicyId). - Int64("revisionIdx", policy.RevisionIdx). - Int64("coordinatorIdx", policy.CoordinatorIdx). + Str(dl.FieldPolicyId, newPolicy.PolicyId). + Int64("rev", newPolicy.RevisionIdx). + Int64("coord", newPolicy.CoordinatorIdx). Logger() - pp, err := NewParsedPolicy(policy) - if err != nil { - return err - } - - subs := m.updatePolicy(pp) - if subs == nil { - return nil - } - if len(subs) == 0 { - zlog.Info().Msg("no pending subscriptions to revised policy") - return nil - } - - // Not holding the mutex, however, we are blocking the main processing loop. - // No more lookups will occur will this is rolling out. - // This is by design; there is an optional throttle here. The queue will roll - // out before any new revisions are detected and will slow based on throttle. - // Note: We may want a more sophisticated system that detects new revisions during - // a throttled rollout; but that is TBD. - - var throttle *time.Ticker - if m.throttle != time.Duration(0) { - throttle = time.NewTicker(m.throttle) - defer throttle.Stop() - } - - start := time.Now() - - zlog.Info(). - Int("nSubs", len(subs)). - Dur("throttle", m.throttle). - Msg("policy rollout begin") - -LOOP: - for _, s := range subs { - - if throttle != nil { - select { - case <-throttle.C: - case <-ctx.Done(): - err = ctx.Err() - break LOOP - } - } - - select { - case s.c <- pp: - default: - // Should never block on a channel; we created a channel of size one. - // A block here indicates a logic error somewheres. - zlog.Error(). - Str("policyId", policy.PolicyId). - Msg("should never block on policy channel") - } - + if newPolicy.CoordinatorIdx <= 0 { + zlog.Info().Msg("ignore policy that has not pass through coordinator") + return false } - zlog.Info(). - Err(err). - Dur("tdiff", time.Since(start)). - Msg("policy rollout end") - - return err -} - -func (m *monitorT) updatePolicy(pp *ParsedPolicy) []subT { m.mut.Lock() defer m.mut.Unlock() - newPolicy := pp.Policy - p, ok := m.policies[newPolicy.PolicyId] if !ok { p = policyT{ pp: *pp, - subs: make(map[uint64]subT), + head: makeHead(), } m.policies[newPolicy.PolicyId] = p - m.log.Info(). - Str("policyId", newPolicy.PolicyId). - Int64("rev", newPolicy.RevisionIdx). - Int64("coord", newPolicy.CoordinatorIdx). - Msg("new policy") - return nil + zlog.Info().Msg("new policy added on update") + return false } + // Cache the old stored policy for logging oldPolicy := p.pp.Policy + // Update the policy in our data structure p.pp = *pp m.policies[newPolicy.PolicyId] = p - m.log.Info(). - Str("policyId", newPolicy.PolicyId). - Int64("orev", oldPolicy.RevisionIdx). - Int64("nrev", newPolicy.RevisionIdx). - Int64("ocoord", oldPolicy.CoordinatorIdx). - Int64("ncoord", newPolicy.CoordinatorIdx). - Msg("policy revised") + // Iterate through the subscriptions on this policy; + // schedule any subscription for delivery that requires an update. + nQueued := 0 - if newPolicy.CoordinatorIdx <= 0 { - m.log.Info(). - Str("policyId", newPolicy.PolicyId). - Msg("Do not roll out policy that has not pass through coordinator") - return nil - } + iter := NewIterator(p.head) + for sub := iter.Next(); sub != nil; sub = iter.Next() { + if sub.isUpdate(&newPolicy) { + + // Unlink the target node from the list + iter.Unlink() + + // Push the node onto the pendingQ + // HACK: if update is for cloud agent, put on front of queue + // not at the end for immediate delivery. + if newPolicy.PolicyId == cloudPolicyId { + m.pendingQ.pushFront(sub) + } else { + m.pendingQ.pushBack(sub) + } + + zlog.Debug(). + Str("agent_id", sub.agentId). + Msg("scheduled pendingQ on policy revision") - subs := make([]subT, 0, len(p.subs)) - for idx, sub := range p.subs { - if newPolicy.RevisionIdx > sub.revIdx || - (newPolicy.RevisionIdx == sub.revIdx && newPolicy.CoordinatorIdx > sub.coordIdx) { - // These subscriptions are one shot; delete from map. - delete(p.subs, idx) - subs = append(subs, sub) + nQueued += 1 } } - return subs + zlog.Info(). + Int64("oldRev", oldPolicy.RevisionIdx). + Int64("oldCoord", oldPolicy.CoordinatorIdx). + Int("nQueued", nQueued). + Msg("revised policy") + + return true +} + +func (m *monitorT) kickLoad() { + + select { + case m.kickCh <- struct{}{}: + default: + m.log.Debug().Msg("kick channel full") + } +} + +func (m *monitorT) kickDeploy() { + + select { + case m.deployCh <- struct{}{}: + default: + } } // Subscribe creates a new subscription for a policy update. @@ -361,48 +394,47 @@ func (m *monitorT) Subscribe(agentId string, policyId string, revisionIdx int64, } m.log.Debug(). - Str("agentId", agentId). - Str("policyId", policyId). - Int64("revno", revisionIdx). - Int64("coordno", coordinatorIdx). + Str("agent_id", agentId). + Str(dl.FieldPolicyId, policyId). + Int64("rev", revisionIdx). + Int64("coord", coordinatorIdx). Msg("subscribed to policy monitor") - idx := atomic.AddUint64(&gCounter, 1) - - s := subT{ - idx: idx, - policyId: policyId, - revIdx: revisionIdx, - coordIdx: coordinatorIdx, - c: make(chan *ParsedPolicy, 1), - } + s := NewSub( + policyId, + agentId, + revisionIdx, + coordinatorIdx, + ) m.mut.Lock() + defer m.mut.Unlock() p, ok := m.policies[policyId] - pRevIdx := p.pp.Policy.RevisionIdx - pCoordIdx := p.pp.Policy.CoordinatorIdx - - if (pRevIdx > revisionIdx && pCoordIdx > 0) || - (pRevIdx == revisionIdx && pCoordIdx > coordinatorIdx) { - // fill the channel, clear out id; no point putting it in map as it is already fired - s.idx = 0 - s.c <- &p.pp - } else { - if !ok { - p = policyT{subs: make(map[uint64]subT)} - m.policies[policyId] = p - select { - case m.kickCh <- struct{}{}: - default: - m.log.Debug().Msg("kick channel full") - } + switch { + case !ok: + // We've not seen this policy before, force load. + m.log.Info(). + Str(dl.FieldPolicyId, policyId). + Msg("force load on unknown policyId") + p = policyT{head: makeHead()} + p.head.pushBack(s) + m.policies[policyId] = p + m.kickLoad() + case s.isUpdate(&p.pp.Policy): + empty := m.pendingQ.isEmpty() + m.pendingQ.pushBack(s) + m.log.Debug(). + Str("agent_id", s.agentId). + Msg("scheduled pending on subscribe") + if empty { + m.kickDeploy() } - p.subs[idx] = s + default: + p.head.pushBack(s) } - m.mut.Unlock() - return &s, nil + return s, nil } // Unsubscribe removes the current subscription. @@ -411,15 +443,17 @@ func (m *monitorT) Unsubscribe(sub Subscription) error { if !ok { return errors.New("not a subscription returned from this monitor") } - if s.idx == 0 { - return nil - } m.mut.Lock() - if policy, ok := m.policies[s.policyId]; ok { - delete(policy.subs, s.idx) - } + s.unlink() m.mut.Unlock() + m.log.Debug(). + Str("agent_id", s.agentId). + Str(dl.FieldPolicyId, s.policyId). + Int64("rev", s.revIdx). + Int64("coord", s.coordIdx). + Msg("unsubscribe") + return nil } diff --git a/internal/pkg/policy/sub.go b/internal/pkg/policy/sub.go new file mode 100644 index 000000000..65f37188d --- /dev/null +++ b/internal/pkg/policy/sub.go @@ -0,0 +1,117 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package policy + +import ( + "github.com/elastic/fleet-server/v7/internal/pkg/model" +) + +type subT struct { + policyId string + agentId string // not logically necessary; cached for logging + revIdx int64 + coordIdx int64 + + next *subT + prev *subT + + ch chan *ParsedPolicy +} + +func NewSub(policyId, agentId string, revIdx, coordIdx int64) *subT { + return &subT{ + policyId: policyId, + agentId: agentId, + revIdx: revIdx, + coordIdx: coordIdx, + ch: make(chan *ParsedPolicy, 1), + } +} + +func makeHead() *subT { + sub := &subT{} + sub.next = sub + sub.prev = sub + return sub +} + +func (n *subT) pushFront(nn *subT) { + nn.next = n.next + nn.prev = n + n.next.prev = nn + n.next = nn +} + +func (n *subT) pushBack(nn *subT) { + nn.next = n + nn.prev = n.prev + n.prev.next = nn + n.prev = nn +} + +func (n *subT) popFront() *subT { + if n.next == n { + return nil + } + s := n.next + s.unlink() + return s +} + +func (n *subT) unlink() bool { + if n.next == nil || n.prev == nil { + return false + } + + n.prev.next = n.next + n.next.prev = n.prev + n.next = nil + n.prev = nil + return true +} + +func (n *subT) isEmpty() bool { + return n.next == n +} + +func (s *subT) isUpdate(policy *model.Policy) bool { + + pRevIdx := policy.RevisionIdx + pCoordIdx := policy.CoordinatorIdx + + return (pRevIdx > s.revIdx && pCoordIdx > 0) || (pRevIdx == s.revIdx && pCoordIdx > s.coordIdx) +} + +// Output returns a new policy that needs to be sent based on the current subscription. +func (sub *subT) Output() <-chan *ParsedPolicy { + return sub.ch +} + +type subIterT struct { + head *subT + cur *subT +} + +func NewIterator(head *subT) *subIterT { + return &subIterT{ + head: head, + cur: head, + } +} + +func (it *subIterT) Next() *subT { + next := it.cur.next + if next == it.head { + return nil + } + it.cur = next + return next +} + +func (it *subIterT) Unlink() { + prev := it.cur.prev + it.cur.unlink() + it.cur = prev +} diff --git a/internal/pkg/policy/sub_test.go b/internal/pkg/policy/sub_test.go new file mode 100644 index 000000000..18aa85f2f --- /dev/null +++ b/internal/pkg/policy/sub_test.go @@ -0,0 +1,241 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// +build !integration + +package policy + +import ( + "fmt" + "math/rand" + "testing" +) + +// Base case, should be empty +func TestSub_Empty(t *testing.T) { + + head := makeHead() + + if !head.isEmpty() { + t.Error("Expected empty list with only head") + } +} + +// Iteratively pushBack n items up to N. +// Validate order on popFront. +func TestSub_PushBackN(t *testing.T) { + + head := makeHead() + + N := 32 + + for n := 1; n <= N; n++ { + + nodes := make([]*subT, 0, n) + for i := 0; i < n; i++ { + name := fmt.Sprintf("policy%d", i) + nn := NewSub(name, "", 0, 0) + head.pushBack(nn) + nodes = append(nodes, nn) + } + + if head.isEmpty() { + t.Error("head should not be empty after push") + } + + // Validate all there + j := 0 + iter := NewIterator(head) + for sub := iter.Next(); sub != nil; sub = iter.Next() { + if sub.policyId != nodes[j].policyId { + t.Error(j, ": misaligned unlink", sub.policyId, nodes[j].policyId) + } + j = j + 1 + } + + for i := 0; i < n; i++ { + + sub := head.popFront() + if sub.policyId != nodes[i].policyId { + t.Error("misalign on popFront") + } + } + + if !head.isEmpty() { + t.Error("Expect empty list after popFront") + } + + } +} + +// Iteratively pushFront n items up to N. +// Validate order on popFront. +func TestSub_PushFrontN(t *testing.T) { + + head := makeHead() + + N := 32 + + for n := 1; n <= N; n++ { + + nodes := make([]*subT, 0, n) + for i := 0; i < n; i++ { + name := fmt.Sprintf("policy%d", i) + nn := NewSub(name, "", 0, 0) + head.pushFront(nn) + nodes = append(nodes, nn) + } + + if head.isEmpty() { + t.Error("head should not be empty after push") + } + + // Validate all there + j := n - 1 + iter := NewIterator(head) + for sub := iter.Next(); sub != nil; sub = iter.Next() { + if sub.policyId != nodes[j].policyId { + t.Error(j, ": misaligned unlink", sub.policyId, nodes[j].policyId) + } + j = j - 1 + } + + for i := 0; i < n; i++ { + + sub := head.popFront() + if sub.policyId != nodes[n-i-1].policyId { + t.Error("misalign on popFront") + } + } + + if !head.isEmpty() { + t.Error("Expect empty list after popFront") + } + + } +} + +// Push either to front or back randomly. Validate order. +func TestSub_PushRandom(t *testing.T) { + + head := makeHead() + + N := rand.Intn(4096) + 1 + + nodes := make([]*subT, 0, N) + for i := 0; i < N; i++ { + name := fmt.Sprintf("policy%d", i) + nn := NewSub(name, "", 0, 0) + + if rand.Intn(2) == 1 { + head.pushBack(nn) + nodes = append(nodes, nn) + } else { + head.pushFront(nn) + nodes = append([]*subT{nn}, nodes...) + } + } + + if head.isEmpty() { + t.Error("head should not be empty after push") + } + + j := 0 + iter := NewIterator(head) + for sub := iter.Next(); sub != nil; sub = iter.Next() { + if sub.policyId != nodes[j].policyId { + t.Error(j, ": misaligned unlink", sub.policyId, nodes[j].policyId) + } + j = j + 1 + } +} + +// Generate N nodes. Unlink randomly. +// Validate order on each unlink. +func TestSub_UnlinkRandomN(t *testing.T) { + + head := makeHead() + + N := rand.Intn(4096) + 1 + + nodes := make([]*subT, 0, N) + for i := 0; i < N; i++ { + name := fmt.Sprintf("policy%d", i) + nn := NewSub(name, "", 0, 0) + head.pushBack(nn) + nodes = append(nodes, nn) + } + + if head.isEmpty() { + t.Error("head should not be empty after push") + } + + for i := 0; i < N; i++ { + idx := rand.Intn(len(nodes)) + sub := nodes[idx] + sub.unlink() + nodes = append(nodes[:idx], nodes[idx+1:]...) + + j := 0 + iter := NewIterator(head) + for sub = iter.Next(); sub != nil; sub = iter.Next() { + if sub.policyId != nodes[j].policyId { + t.Error(j, ": misaligned unlink", sub.policyId, nodes[j].policyId) + } + j = j + 1 + } + } + + if !head.isEmpty() { + t.Error("head should be empty") + } +} + +func BenchmarkSubsSimple(b *testing.B) { + + head := makeHead() + nn := NewSub("", "", 0, 0) + for i := 0; i < b.N; i++ { + head.pushBack(nn) + head.popFront() + } +} + +func BenchmarkSubs(b *testing.B) { + benchmarks := []int{ + 32, + 1024, + 2048, + 65536, + 131072, + 524288, + } + + max := benchmarks[len(benchmarks)-1] + + head := makeHead() + subs := make([]*subT, 0, max) + + for i := 0; i < max; i++ { + name := fmt.Sprintf("policy%d", i) + nn := NewSub(name, "", 0, 0) + subs = append(subs, nn) + } + + for _, bm := range benchmarks { + b.Run(fmt.Sprintf("%d", bm), func(b *testing.B) { + + for i := 0; i < b.N; i++ { + for j := 0; j < bm; j++ { + head.pushBack(subs[j]) + } + + for j := 0; j < bm; j++ { + subs[j].unlink() + } + } + + }) + } +} From dfb1ab7fd3f650e8fbbe716207a7c9df2d10d925 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 19 Jul 2021 01:15:52 -0400 Subject: [PATCH 147/240] [Automation] Update elastic stack version to 7.15.0-876ab971 for testing (#567) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 039c8724e..678019a77 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.15.0-f74484b5-SNAPSHOT +ELASTICSEARCH_VERSION=7.15.0-876ab971-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 343889a150d3d560274e2c54485b24ac73958413 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Tue, 20 Jul 2021 01:16:47 -0400 Subject: [PATCH 148/240] [Automation] Update elastic stack version to 7.15.0-af619a87 for testing (#573) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 678019a77..d6c570458 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.15.0-876ab971-SNAPSHOT +ELASTICSEARCH_VERSION=7.15.0-af619a87-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 20f6377d4c89382de12ac67a2514434687f9d8e6 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 21 Jul 2021 01:17:56 -0400 Subject: [PATCH 149/240] [Automation] Update elastic stack version to 7.15.0-c23a5439 for testing (#578) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index d6c570458..9096233e0 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.15.0-af619a87-SNAPSHOT +ELASTICSEARCH_VERSION=7.15.0-c23a5439-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 74db288d3431123d8b2f35b02489249f54d5741b Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Wed, 21 Jul 2021 12:29:12 +0000 Subject: [PATCH 150/240] Add action timeout (#575) (#579) (cherry picked from commit 070638c59628b707f69fcc3b6001a06fb993676a) Co-authored-by: Aleksandr Maus --- cmd/fleet/handleCheckin.go | 1 + cmd/fleet/schema.go | 1 + internal/pkg/es/mapping.go | 3 +++ internal/pkg/model/schema.go | 3 +++ model/schema.json | 4 ++++ 5 files changed, 12 insertions(+) diff --git a/cmd/fleet/handleCheckin.go b/cmd/fleet/handleCheckin.go index 56718722d..ff45a3449 100644 --- a/cmd/fleet/handleCheckin.go +++ b/cmd/fleet/handleCheckin.go @@ -388,6 +388,7 @@ func convertActions(agentId string, actions []model.Action) ([]ActionResp, strin Id: action.ActionId, Type: action.Type, InputType: action.InputType, + Timeout: action.Timeout, }) } diff --git a/cmd/fleet/schema.go b/cmd/fleet/schema.go index 8c28436e7..f8f300b46 100644 --- a/cmd/fleet/schema.go +++ b/cmd/fleet/schema.go @@ -98,6 +98,7 @@ type ActionResp struct { Id string `json:"id"` Type string `json:"type"` InputType string `json:"input_type"` + Timeout int64 `json:"timeout,omitempty"` } type Event struct { diff --git a/internal/pkg/es/mapping.go b/internal/pkg/es/mapping.go index 6cca74500..ccc0a6b51 100644 --- a/internal/pkg/es/mapping.go +++ b/internal/pkg/es/mapping.go @@ -27,6 +27,9 @@ const ( "input_type": { "type": "keyword" }, + "timeout": { + "type": "integer" + }, "@timestamp": { "type": "date" }, diff --git a/internal/pkg/model/schema.go b/internal/pkg/model/schema.go index e93e95684..58728bae9 100644 --- a/internal/pkg/model/schema.go +++ b/internal/pkg/model/schema.go @@ -48,6 +48,9 @@ type Action struct { // The input type the actions should be routed to. InputType string `json:"input_type,omitempty"` + // The optional action timeout in seconds + Timeout int64 `json:"timeout,omitempty"` + // Date/time the action was created Timestamp string `json:"@timestamp,omitempty"` diff --git a/model/schema.json b/model/schema.json index 8def4eca5..eaf3e872c 100644 --- a/model/schema.json +++ b/model/schema.json @@ -38,6 +38,10 @@ "description": "The input type the actions should be routed to.", "type": "string" }, + "timeout": { + "description": "The optional action timeout in seconds", + "type": "integer" + }, "user_id": { "description": "The ID of the user who created the action.", "type": "string" From b7f7774049811937a78828a946baaee43dae9695 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 22 Jul 2021 01:16:54 -0400 Subject: [PATCH 151/240] [Automation] Update elastic stack version to 7.15.0-192e1003 for testing (#580) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 9096233e0..35dfe3b27 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.15.0-c23a5439-SNAPSHOT +ELASTICSEARCH_VERSION=7.15.0-192e1003-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From d0bf8272daa2fd975c1d9500c1da92883d393dbf Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Fri, 23 Jul 2021 01:18:30 -0400 Subject: [PATCH 152/240] [Automation] Update elastic stack version to 7.15.0-5e783247 for testing (#584) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 35dfe3b27..facfa9c82 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.15.0-192e1003-SNAPSHOT +ELASTICSEARCH_VERSION=7.15.0-5e783247-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 5bdc198a53bb57824ba2f21bb991f76ad1549208 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 26 Jul 2021 01:18:28 -0400 Subject: [PATCH 153/240] [Automation] Update elastic stack version to 7.15.0-514bcfaf for testing (#587) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index facfa9c82..2fa3e2d18 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.15.0-5e783247-SNAPSHOT +ELASTICSEARCH_VERSION=7.15.0-514bcfaf-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 0900e1d03d02a87259a34c0a081f24a712c7efe4 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Tue, 27 Jul 2021 01:18:22 -0400 Subject: [PATCH 154/240] [Automation] Update elastic stack version to 7.15.0-6e66e5d1 for testing (#589) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 2fa3e2d18..63a9d228b 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.15.0-514bcfaf-SNAPSHOT +ELASTICSEARCH_VERSION=7.15.0-6e66e5d1-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 0c9273607bd13c8e06589db98ca374ab14671acf Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 27 Jul 2021 14:55:02 +0000 Subject: [PATCH 155/240] [7.x](backport #592) Updates elastic agent client dependency (#593) * update (cherry picked from commit 4e62b67ee86a33f3b35299f6e54744002b24f3e1) * sum (cherry picked from commit ae38a8e3c53796d26a9f6b4b3d2fa78da792825d) Co-authored-by: Michal Pristas --- NOTICE.txt | 4 ++-- go.mod | 2 +- go.sum | 3 ++- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/NOTICE.txt b/NOTICE.txt index 101441a89..5e0a378ef 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -250,11 +250,11 @@ License Version 2.0. -------------------------------------------------------------------------------- Dependency : github.com/elastic/elastic-agent-client/v7 -Version: v7.0.0-20200709172729-d43b7ad5833a +Version: v7.0.0-20210727140539-f0905d9377f6 Licence type (autodetected): Elastic -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-client/v7@v7.0.0-20200709172729-d43b7ad5833a/LICENSE.txt: +Contents of probable licence file $GOMODCACHE/github.com/elastic/elastic-agent-client/v7@v7.0.0-20210727140539-f0905d9377f6/LICENSE.txt: ELASTIC LICENSE AGREEMENT diff --git a/go.mod b/go.mod index 7b12f8f3f..33d2f19f2 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ require ( github.com/Pallinder/go-randomdata v1.2.0 github.com/dgraph-io/ristretto v0.1.0 github.com/elastic/beats/v7 v7.11.1 - github.com/elastic/elastic-agent-client/v7 v7.0.0-20200709172729-d43b7ad5833a + github.com/elastic/elastic-agent-client/v7 v7.0.0-20210727140539-f0905d9377f6 github.com/elastic/go-elasticsearch/v7 v7.13.1 github.com/elastic/go-ucfg v0.8.3 github.com/gofrs/uuid v3.3.0+incompatible diff --git a/go.sum b/go.sum index 516e38661..74b0ce1a1 100644 --- a/go.sum +++ b/go.sum @@ -246,8 +246,9 @@ github.com/elastic/beats/v7 v7.11.1 h1:eYJRKc/mA6rhQNujUV9lUADQ0S9SZvI5d782BnNvg github.com/elastic/beats/v7 v7.11.1/go.mod h1:2gJ+JvWjTYuMA37chVSfsolz7Z2ca+gL39HpmSLO+z8= github.com/elastic/ecs v1.6.0 h1:8NmgfnsjmKXh9hVsK3H2tZtfUptepNc3msJOAynhtmc= github.com/elastic/ecs v1.6.0/go.mod h1:pgiLbQsijLOJvFR8OTILLu0Ni/R/foUNg0L+T6mU9b4= -github.com/elastic/elastic-agent-client/v7 v7.0.0-20200709172729-d43b7ad5833a h1:2NHgf1RUw+f240lpTnLrCp1aBNvq2wDi0E1A423/S1k= github.com/elastic/elastic-agent-client/v7 v7.0.0-20200709172729-d43b7ad5833a/go.mod h1:uh/Gj9a0XEbYoM4NYz4LvaBVARz3QXLmlNjsrKY9fTc= +github.com/elastic/elastic-agent-client/v7 v7.0.0-20210727140539-f0905d9377f6 h1:nFvXHBjYK3e9+xF0WKDeAKK4aOO51uC28s+L9rBmilo= +github.com/elastic/elastic-agent-client/v7 v7.0.0-20210727140539-f0905d9377f6/go.mod h1:uh/Gj9a0XEbYoM4NYz4LvaBVARz3QXLmlNjsrKY9fTc= github.com/elastic/fsevents v0.0.0-20181029231046-e1d381a4d270 h1:cWPqxlPtir4RoQVCpGSRXmLqjEHpJKbR60rxh1nQZY4= github.com/elastic/fsevents v0.0.0-20181029231046-e1d381a4d270/go.mod h1:Msl1pdboCbArMF/nSCDUXgQuWTeoMmE/z8607X+k7ng= github.com/elastic/go-concert v0.0.4 h1:pzgYCmJ/xMJsW8PSk33inAWZ065hrwSeP79TpwAbsLE= From ba8ae15494853b37d1f24dea6cf3a8d5e18e2993 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 28 Jul 2021 01:18:43 -0400 Subject: [PATCH 156/240] [Automation] Update elastic stack version to 7.15.0-bd471fd6 for testing (#597) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 63a9d228b..9593a331b 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.15.0-6e66e5d1-SNAPSHOT +ELASTICSEARCH_VERSION=7.15.0-bd471fd6-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From cfd3564d098d410ca44eb9a47218af8f3493b243 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 29 Jul 2021 01:18:24 -0400 Subject: [PATCH 157/240] [Automation] Update elastic stack version to 7.15.0-2cefb6a4 for testing (#600) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 9593a331b..91f71d109 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.15.0-bd471fd6-SNAPSHOT +ELASTICSEARCH_VERSION=7.15.0-2cefb6a4-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 4bc5091ba69c6c29ab1e429ec508d58b12b7937e Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Mon, 2 Aug 2021 22:03:57 +0000 Subject: [PATCH 158/240] [7.x](backport #610) Improve version checking for 8.0.0-alpha1 (#611) * Improve version checking. (cherry picked from commit f99312d851dc6b934016bf6afbf2481170fc6348) * Fix alpha check on elasticsearch version. (cherry picked from commit 92285c1fc7bc5b2ba55fcfcc4e2c2e760d35f424) Co-authored-by: Blake Rouse --- cmd/fleet/userAgent.go | 4 ++-- cmd/fleet/userAgent_test.go | 30 ++++++++++++++++++++++++++++++ internal/pkg/ver/check.go | 2 +- internal/pkg/ver/check_test.go | 12 ++++++++++++ 4 files changed, 45 insertions(+), 3 deletions(-) diff --git a/cmd/fleet/userAgent.go b/cmd/fleet/userAgent.go index 1773be60f..51b3bbec2 100644 --- a/cmd/fleet/userAgent.go +++ b/cmd/fleet/userAgent.go @@ -66,8 +66,8 @@ func validateUserAgent(r *http.Request, verConst version.Constraints) error { if !strings.HasPrefix(userAgent, userAgentPrefix) { return ErrInvalidUserAgent } - verStr := strings.TrimSpace(strings.TrimSuffix(strings.TrimPrefix(userAgent, userAgentPrefix), "-snapshot")) - ver, err := version.NewVersion(verStr) + verSep := strings.Split(strings.TrimPrefix(userAgent, userAgentPrefix), "-") + ver, err := version.NewVersion(verSep[0]) if err != nil { return ErrInvalidUserAgent } diff --git a/cmd/fleet/userAgent_test.go b/cmd/fleet/userAgent_test.go index e9c8d9926..376a1f769 100644 --- a/cmd/fleet/userAgent_test.go +++ b/cmd/fleet/userAgent_test.go @@ -77,6 +77,36 @@ func TestValidateUserAgent(t *testing.T) { verCon: mustBuildConstraints("8.0.0"), err: nil, }, + { + userAgent: "eLaStIc AGeNt v7.13.0", + verCon: mustBuildConstraints("8.0.0-alpha1"), + err: nil, + }, + { + userAgent: "eLaStIc AGeNt v8.0.0-alpha1", + verCon: mustBuildConstraints("8.0.0-alpha1"), + err: nil, + }, + { + userAgent: "eLaStIc AGeNt v8.0.0-alpha1", + verCon: mustBuildConstraints("8.0.0"), + err: nil, + }, + { + userAgent: "eLaStIc AGeNt v8.0.0-anything", + verCon: mustBuildConstraints("8.0.0"), + err: nil, + }, + { + userAgent: "eLaStIc AGeNt v7.15.0-anything", + verCon: mustBuildConstraints("8.0.0"), + err: nil, + }, + { + userAgent: "eLaStIc AGeNt v7.15.0-anything", + verCon: mustBuildConstraints("8.0.0-beta1"), + err: nil, + }, } for _, tr := range tests { t.Run(tr.userAgent, func(t *testing.T) { diff --git a/internal/pkg/ver/check.go b/internal/pkg/ver/check.go index 84a87d7f7..3cd0ec088 100644 --- a/internal/pkg/ver/check.go +++ b/internal/pkg/ver/check.go @@ -83,7 +83,7 @@ func minimizePatch(ver *version.Version) string { } func parseVersion(sver string) (*version.Version, error) { - ver, err := version.NewVersion(sver) + ver, err := version.NewVersion(strings.Split(sver, "-")[0]) if err != nil { return nil, fmt.Errorf("%v: %w", err, ErrMalformedVersion) } diff --git a/internal/pkg/ver/check_test.go b/internal/pkg/ver/check_test.go index 03f49fc7a..0a95c7477 100644 --- a/internal/pkg/ver/check_test.go +++ b/internal/pkg/ver/check_test.go @@ -70,6 +70,18 @@ func TestCheckCompatibilityInternal(t *testing.T) { esVersion: "7.18.0", err: ErrUnsupportedVersion, }, + { + name: "supported elasticsearch 800a1", + fleetVersion: "8.0.0-alpha1", + esVersion: "8.0.0-alpha1", + err: nil, + }, + { + name: "supported elasticsearch 715-800a1", + fleetVersion: "7.15.2", + esVersion: "8.0.0-alpha1", + err: nil, + }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { From 045ebadf0aa347a34c6477dbcab8947710e286c6 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 3 Aug 2021 17:21:16 +0000 Subject: [PATCH 159/240] Populate agent.id field in .fleet-agents index (#609) (#614) * Populate agent.id field in .fleet-agents index * Address feedback on the draft * Put back the version spaces trimming that was lost in the master changes (cherry picked from commit cb77724a20fd351a7179bd96a32aca0319a000a6) Co-authored-by: Aleksandr Maus --- cmd/fleet/handleCheckin.go | 11 ++++++++--- cmd/fleet/handleEnroll.go | 10 +++++++--- cmd/fleet/userAgent.go | 25 +++++++++++++++++-------- cmd/fleet/userAgent_test.go | 2 +- internal/pkg/checkin/bulk.go | 12 ++++++++++-- internal/pkg/checkin/bulk_test.go | 14 ++++++++++++-- internal/pkg/dl/constants.go | 1 + 7 files changed, 56 insertions(+), 19 deletions(-) diff --git a/cmd/fleet/handleCheckin.go b/cmd/fleet/handleCheckin.go index ff45a3449..5d5d1e379 100644 --- a/cmd/fleet/handleCheckin.go +++ b/cmd/fleet/handleCheckin.go @@ -152,11 +152,16 @@ func (ct *CheckinT) _handleCheckin(zlog zerolog.Logger, w http.ResponseWriter, r return err } - err = validateUserAgent(r, ct.verCon) + ver, err := validateUserAgent(r, ct.verCon) if err != nil { return err } + var newVer string + if ver != agent.Agent.Version { + newVer = ver + } + // Metrics; serenity now. dfunc := cntCheckin.IncStart() defer dfunc() @@ -225,7 +230,7 @@ func (ct *CheckinT) _handleCheckin(zlog zerolog.Logger, w http.ResponseWriter, r defer longPoll.Stop() // Intial update on checkin, and any user fields that might have changed - ct.bc.CheckIn(agent.Id, req.Status, rawMeta, seqno) + ct.bc.CheckIn(agent.Id, req.Status, rawMeta, seqno, newVer) // Initial fetch for pending actions var ( @@ -262,7 +267,7 @@ func (ct *CheckinT) _handleCheckin(zlog zerolog.Logger, w http.ResponseWriter, r zlog.Trace().Msg("fire long poll") break LOOP case <-tick.C: - ct.bc.CheckIn(agent.Id, req.Status, nil, nil) + ct.bc.CheckIn(agent.Id, req.Status, nil, nil, newVer) } } } diff --git a/cmd/fleet/handleEnroll.go b/cmd/fleet/handleEnroll.go index b25dcf874..3b5538fa4 100644 --- a/cmd/fleet/handleEnroll.go +++ b/cmd/fleet/handleEnroll.go @@ -135,7 +135,7 @@ func (et *EnrollerT) handleEnroll(w http.ResponseWriter, r *http.Request) (*Enro return nil, err } - err = validateUserAgent(r, et.verCon) + ver, err := validateUserAgent(r, et.verCon) if err != nil { return nil, err } @@ -167,10 +167,10 @@ func (et *EnrollerT) handleEnroll(w http.ResponseWriter, r *http.Request) (*Enro cntEnroll.bodyIn.Add(readCounter.Count()) - return _enroll(r.Context(), et.bulker, et.cache, *req, *erec) + return _enroll(r.Context(), et.bulker, et.cache, *req, *erec, ver) } -func _enroll(ctx context.Context, bulker bulk.Bulk, c cache.Cache, req EnrollRequest, erec model.EnrollmentApiKey) (*EnrollResponse, error) { +func _enroll(ctx context.Context, bulker bulk.Bulk, c cache.Cache, req EnrollRequest, erec model.EnrollmentApiKey, ver string) (*EnrollResponse, error) { if req.SharedId != "" { // TODO: Support pre-existing install @@ -210,6 +210,10 @@ func _enroll(ctx context.Context, bulker bulk.Bulk, c cache.Cache, req EnrollReq LocalMetadata: localMeta, AccessApiKeyId: accessApiKey.Id, ActionSeqNo: []int64{sqn.UndefinedSeqNo}, + Agent: &model.AgentMetadata{ + Id: agentId, + Version: ver, + }, } err = createFleetAgent(ctx, bulker, agentId, agentData) diff --git a/cmd/fleet/userAgent.go b/cmd/fleet/userAgent.go index 51b3bbec2..76feb425f 100644 --- a/cmd/fleet/userAgent.go +++ b/cmd/fleet/userAgent.go @@ -57,22 +57,31 @@ func maximizePatch(ver *version.Version) string { // validateUserAgent validates that the User-Agent of the connecting Elastic Agent is valid and that the version is // supported for this Fleet Server. -func validateUserAgent(r *http.Request, verConst version.Constraints) error { +func validateUserAgent(r *http.Request, verConst version.Constraints) (string, error) { userAgent := r.Header.Get("User-Agent") if userAgent == "" { - return ErrInvalidUserAgent + return "", ErrInvalidUserAgent } userAgent = strings.ToLower(userAgent) if !strings.HasPrefix(userAgent, userAgentPrefix) { - return ErrInvalidUserAgent + return "", ErrInvalidUserAgent } - verSep := strings.Split(strings.TrimPrefix(userAgent, userAgentPrefix), "-") - ver, err := version.NewVersion(verSep[0]) + + // Trim "elastic agent " prefix + s := strings.TrimPrefix(userAgent, userAgentPrefix) + + // Split the version to accommodate versions with suffixes such as v8.0.0-snapshot v8.0.0-alpha1 + verSep := strings.Split(s, "-") + + // Trim leading and traling spaces + verStr := strings.TrimSpace(verSep[0]) + + ver, err := version.NewVersion(verStr) if err != nil { - return ErrInvalidUserAgent + return "", ErrInvalidUserAgent } if !verConst.Check(ver) { - return ErrUnsupportedVersion + return "", ErrUnsupportedVersion } - return nil + return ver.String(), nil } diff --git a/cmd/fleet/userAgent_test.go b/cmd/fleet/userAgent_test.go index 376a1f769..671d412a5 100644 --- a/cmd/fleet/userAgent_test.go +++ b/cmd/fleet/userAgent_test.go @@ -112,7 +112,7 @@ func TestValidateUserAgent(t *testing.T) { t.Run(tr.userAgent, func(t *testing.T) { req := httptest.NewRequest("GET", "/", nil) req.Header.Set("User-Agent", tr.userAgent) - res := validateUserAgent(req, tr.verCon) + _, res := validateUserAgent(req, tr.verCon) if tr.err != res { t.Fatalf("err mismatch: %v != %v", tr.err, res) } diff --git a/internal/pkg/checkin/bulk.go b/internal/pkg/checkin/bulk.go index ec9bdc937..17a2c6b93 100644 --- a/internal/pkg/checkin/bulk.go +++ b/internal/pkg/checkin/bulk.go @@ -34,6 +34,7 @@ func WithFlushInterval(d time.Duration) Opt { type extraT struct { meta []byte seqNo sqn.SeqNo + ver string } // Minimize the size of this structure. @@ -94,16 +95,17 @@ func (bc *Bulk) timestamp() string { // WARNING: Bulk will take ownership of fields, // so do not use after passing in. -func (bc *Bulk) CheckIn(id string, status string, meta []byte, seqno sqn.SeqNo) error { +func (bc *Bulk) CheckIn(id string, status string, meta []byte, seqno sqn.SeqNo, newVer string) error { // Separate out the extra data to minimize // the memory footprint of the 90% case of just // updating the timestamp. var extra *extraT - if meta != nil || seqno.IsSet() { + if meta != nil || seqno.IsSet() || newVer != "" { extra = &extraT{ meta: meta, seqNo: seqno, + ver: newVer, } } @@ -192,6 +194,12 @@ func (bc *Bulk) flush(ctx context.Context) error { dl.FieldLastCheckinStatus: pendingData.status, // Set the pending status } + // If the agent version is not empty it needs to be updated + // Assuming the agent can by upgraded keeping the same id, but incrementing the version + if pendingData.extra.ver != "" { + fields[dl.FieldAgentVersion] = pendingData.extra.ver + } + // Update local metadata if provided if pendingData.extra.meta != nil { // Surprise: The json encodeer compacts this raw JSON during diff --git a/internal/pkg/checkin/bulk_test.go b/internal/pkg/checkin/bulk_test.go index 897450242..8a11a1ff3 100644 --- a/internal/pkg/checkin/bulk_test.go +++ b/internal/pkg/checkin/bulk_test.go @@ -43,12 +43,14 @@ func TestBulkSimple(t *testing.T) { bc := NewBulk(&mockBulk) + const ver = "8.0.0" cases := []struct { desc string id string status string meta []byte seqno sqn.SeqNo + ver string }{ { "Simple case", @@ -56,6 +58,7 @@ func TestBulkSimple(t *testing.T) { "online", nil, nil, + "", }, { "Singled field case", @@ -63,6 +66,7 @@ func TestBulkSimple(t *testing.T) { "online", []byte(`{"hey":"now"}`), nil, + "", }, { "Multi field case", @@ -70,6 +74,7 @@ func TestBulkSimple(t *testing.T) { "online", []byte(`{"hey":"now","brown":"cow"}`), nil, + ver, }, { "Multi field nested case", @@ -77,6 +82,7 @@ func TestBulkSimple(t *testing.T) { "online", []byte(`{"hey":"now","wee":{"little":"doggie"}}`), nil, + "", }, { "Simple case with seqNo", @@ -84,6 +90,7 @@ func TestBulkSimple(t *testing.T) { "online", nil, sqn.SeqNo{1, 2, 3, 4}, + ver, }, { "Field case with seqNo", @@ -91,6 +98,7 @@ func TestBulkSimple(t *testing.T) { "online", []byte(`{"uncle":"fester"}`), sqn.SeqNo{5, 6, 7, 8}, + ver, }, { "Unusual status", @@ -98,6 +106,7 @@ func TestBulkSimple(t *testing.T) { "unusual", nil, nil, + "", }, { "Empty status", @@ -105,13 +114,14 @@ func TestBulkSimple(t *testing.T) { "", nil, nil, + "", }, } for _, c := range cases { t.Run(c.desc, func(t *testing.T) { - if err := bc.CheckIn(c.id, c.status, c.meta, c.seqno); err != nil { + if err := bc.CheckIn(c.id, c.status, c.meta, c.seqno, c.ver); err != nil { t.Fatal(err) } @@ -205,7 +215,7 @@ func benchmarkBulk(n int, flush bool, b *testing.B) { for i := 0; i < b.N; i++ { for _, id := range ids { - err := bc.CheckIn(id, "", nil, nil) + err := bc.CheckIn(id, "", nil, nil, "") if err != nil { b.Fatal(err) } diff --git a/internal/pkg/dl/constants.go b/internal/pkg/dl/constants.go index 7a1ed13d0..725fad1aa 100644 --- a/internal/pkg/dl/constants.go +++ b/internal/pkg/dl/constants.go @@ -40,6 +40,7 @@ const ( FieldDefaultApiKeyId = "default_api_key_id" FieldPolicyOutputPermissionsHash = "policy_output_permissions_hash" FieldUnenrolledReason = "unenrolled_reason" + FieldAgentVersion = "agent.version" FieldActive = "active" FieldUpdatedAt = "updated_at" From f3ce32b42f19c0c99d14dae80d8d9da8d5842a5c Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Thu, 5 Aug 2021 18:36:22 +0000 Subject: [PATCH 160/240] Add additional diagnostic logging to trace policy assignment issues. (#621) (cherry picked from commit 1bbd89c7f8dc06402531070dc65c1b10ba4abeb9) Co-authored-by: Sean Cunningham --- cmd/fleet/handleCheckin.go | 11 ++++++ cmd/fleet/main.go | 53 ++++++++++++++++++++++++++--- internal/pkg/coordinator/monitor.go | 4 ++- 3 files changed, 63 insertions(+), 5 deletions(-) diff --git a/cmd/fleet/handleCheckin.go b/cmd/fleet/handleCheckin.go index 5d5d1e379..c990315a1 100644 --- a/cmd/fleet/handleCheckin.go +++ b/cmd/fleet/handleCheckin.go @@ -272,6 +272,17 @@ func (ct *CheckinT) _handleCheckin(zlog zerolog.Logger, w http.ResponseWriter, r } } + for _, action := range actions { + zlog.Info(). + Str("ackToken", ackToken). + Str("createdAt", action.CreatedAt). + Str("id", action.Id). + Str("type", action.Type). + Str("inputType", action.InputType). + Int64("timeout", action.Timeout). + Msg("Action delivered to agent on checkin") + } + resp := CheckinResponse{ AckToken: ackToken, Action: "checkin", diff --git a/cmd/fleet/main.go b/cmd/fleet/main.go index 6d6f5c00c..c965d842b 100644 --- a/cmd/fleet/main.go +++ b/cmd/fleet/main.go @@ -9,6 +9,7 @@ import ( "fmt" "io" "os" + "reflect" "runtime/debug" "sync" "time" @@ -471,9 +472,10 @@ LOOP: // Create or recreate cache if configCacheChanged(curCfg, newCfg) { + log.Info().Msg("reconfigure cache on configuration change") cacheCfg := makeCacheConfig(newCfg) err := f.cache.Reconfigure(cacheCfg) - log.Info().Err(err).Interface("cfg", cacheCfg).Msg("Reconfigure cache") + log.Info().Err(err).Interface("cfg", cacheCfg).Msg("reconfigure cache complete") if err != nil { return err } @@ -481,9 +483,13 @@ LOOP: // Start or restart profiler if configChangedProfiler(curCfg, newCfg) { - stop(proCancel, proEg) + if proCancel != nil { + log.Info().Msg("stopping profiler on configuration change") + stop(proCancel, proEg) + } proEg, proCancel = nil, nil if newCfg.Inputs[0].Server.Profiler.Enabled { + log.Info().Msg("starting profiler on configuration change") proEg, proCancel = start(ctx, func(ctx context.Context) error { return profile.RunProfiler(ctx, newCfg.Inputs[0].Server.Profiler.Bind) }, ech) @@ -492,7 +498,11 @@ LOOP: // Start or restart server if configChangedServer(curCfg, newCfg) { - stop(srvCancel, srvEg) + if srvCancel != nil { + log.Info().Msg("stopping server on configuration change") + stop(srvCancel, srvEg) + } + log.Info().Msg("starting server on configuration change") srvEg, srvCancel = start(ctx, func(ctx context.Context) error { return f.runServer(ctx, newCfg) }, ech) @@ -541,8 +551,43 @@ func configChangedProfiler(curCfg, newCfg *config.Config) bool { return changed } +func redactServerCfg(cfg *config.Config) config.Server { + const kRedacted = "[redacted]" + redacted := cfg.Inputs[0].Server + + if redacted.TLS != nil { + newTLS := *redacted.TLS + + if newTLS.Certificate.Key != "" { + newTLS.Certificate.Key = kRedacted + } + if newTLS.Certificate.Passphrase != "" { + newTLS.Certificate.Passphrase = kRedacted + } + + redacted.TLS = &newTLS + } + + return redacted +} + func configChangedServer(curCfg, newCfg *config.Config) bool { - return curCfg == nil || curCfg.Inputs[0].Server != newCfg.Inputs[0].Server + + zlog := log.With().Interface("new", redactServerCfg(newCfg)).Logger() + + changed := true + switch { + case curCfg == nil: + zlog.Info().Msg("initial server configuration") + case !reflect.DeepEqual(curCfg.Inputs[0].Server, newCfg.Inputs[0].Server): + zlog.Info(). + Interface("old", redactServerCfg(curCfg)). + Msg("server configuration has changed") + default: + changed = false + } + + return changed } func configCacheChanged(curCfg, newCfg *config.Config) bool { diff --git a/internal/pkg/coordinator/monitor.go b/internal/pkg/coordinator/monitor.go index 391ac429e..e1ee47615 100644 --- a/internal/pkg/coordinator/monitor.go +++ b/internal/pkg/coordinator/monitor.go @@ -396,8 +396,9 @@ func (m *monitorT) rescheduleUnenroller(ctx context.Context, pt *policyT, p *mod } func runCoordinator(ctx context.Context, cord Coordinator, l zerolog.Logger, d time.Duration) { + cnt := 0 for { - l.Info().Str("coordinator", cord.Name()).Msg("starting coordinator for policy") + l.Info().Int("count", cnt).Str("coordinator", cord.Name()).Msg("starting coordinator for policy") err := cord.Run(ctx) if err != context.Canceled { l.Err(err).Msg("coordinator failed") @@ -407,6 +408,7 @@ func runCoordinator(ctx context.Context, cord Coordinator, l zerolog.Logger, d t } else { break } + cnt += 1 } } From 8f33cdaae87e65c0c2648b262fbe606aa3782c1b Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Fri, 6 Aug 2021 19:10:56 +0000 Subject: [PATCH 161/240] Additional logs to track policy change. (#625) (cherry picked from commit 15cb7809c45b88edc565277bea038eb486a0836b) Co-authored-by: Sean Cunningham --- cmd/fleet/handleAck.go | 7 +++++++ cmd/fleet/handleCheckin.go | 2 ++ 2 files changed, 9 insertions(+) diff --git a/cmd/fleet/handleAck.go b/cmd/fleet/handleAck.go index 7048d1f23..c19e1a97e 100644 --- a/cmd/fleet/handleAck.go +++ b/cmd/fleet/handleAck.go @@ -237,6 +237,13 @@ func (ack *AckT) handlePolicyChange(ctx context.Context, agent *model.Agent, act bulk.WithRetryOnConflict(3), ) + log.Info().Err(err). + Str("agentId", agent.Id). + Str("policyId", agent.PolicyId). + Int64("policyRevision", currRev). + Int64("policyCoordinator", currCoord). + Msg("Policy ACK") + return errors.Wrap(err, "handlePolicyChange update") } diff --git a/cmd/fleet/handleCheckin.go b/cmd/fleet/handleCheckin.go index c990315a1..2ef6fc1f9 100644 --- a/cmd/fleet/handleCheckin.go +++ b/cmd/fleet/handleCheckin.go @@ -423,6 +423,8 @@ func processPolicy(ctx context.Context, zlog zerolog.Logger, bulker bulk.Bulk, a zlog = zlog.With(). Str("ctx", "processPolicy"). + Int64("policyRevision", pp.Policy.RevisionIdx). + Int64("policyCoordinator", pp.Policy.CoordinatorIdx). Str("policyId", pp.Policy.PolicyId). Logger() From d267c4dee088944ca2012a815fcea0eafcaa39ec Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 11 Aug 2021 01:16:46 -0400 Subject: [PATCH 162/240] [Automation] Update elastic stack version to 7.15.0-83f2fe8a for testing (#630) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 91f71d109..8a79b172c 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.15.0-2cefb6a4-SNAPSHOT +ELASTICSEARCH_VERSION=7.15.0-83f2fe8a-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From e5f361aa3758b7218ae839913571d72cda3303a4 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 12 Aug 2021 01:19:42 -0400 Subject: [PATCH 163/240] [Automation] Update elastic stack version to 7.15.0-af472020 for testing (#631) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 8a79b172c..365c699c8 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.15.0-83f2fe8a-SNAPSHOT +ELASTICSEARCH_VERSION=7.15.0-af472020-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From e4875751ef54fa99ad1e9eda7d8c398f459d5917 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Fri, 13 Aug 2021 01:19:16 -0400 Subject: [PATCH 164/240] [Automation] Update elastic stack version to 7.15.0-ba42231a for testing (#635) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 365c699c8..2cfe6feda 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.15.0-af472020-SNAPSHOT +ELASTICSEARCH_VERSION=7.15.0-ba42231a-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From aabbff619aa1ebc515a358f9b41dcf699cf4a1d0 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 18 Aug 2021 01:19:37 -0400 Subject: [PATCH 165/240] [Automation] Update elastic stack version to 7.15.0-4b8a78a4 for testing (#641) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 2cfe6feda..7d2396c3d 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.15.0-ba42231a-SNAPSHOT +ELASTICSEARCH_VERSION=7.15.0-4b8a78a4-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From c237057a80bd8ffab26575c571cfa4ac50a7693e Mon Sep 17 00:00:00 2001 From: Blake Rouse Date: Wed, 18 Aug 2021 09:43:58 -0400 Subject: [PATCH 166/240] Bump to 7.16. (#642) --- main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main.go b/main.go index 519d01a9a..bf9b0e0b0 100644 --- a/main.go +++ b/main.go @@ -16,7 +16,7 @@ import ( "github.com/elastic/fleet-server/v7/cmd/fleet" ) -const defaultVersion = "7.15.0" +const defaultVersion = "7.16.0" var ( Version string = defaultVersion From 7e6ebb3782724842aeee5f4415c280afde54ea04 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Fri, 20 Aug 2021 01:19:52 -0400 Subject: [PATCH 167/240] [Automation] Update elastic stack version to 7.16.0-ab6de48d for testing (#647) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 7d2396c3d..9ce13451c 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.15.0-4b8a78a4-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-ab6de48d-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 5c9f05d6179bc6c15e23a562f46ab8bac5681743 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 23 Aug 2021 01:19:59 -0400 Subject: [PATCH 168/240] [Automation] Update elastic stack version to 7.16.0-2d73c7e8 for testing (#653) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 9ce13451c..6b16c0896 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-ab6de48d-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-2d73c7e8-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 1706e6da7b77062bfc611966b80b291703bef39f Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Tue, 24 Aug 2021 01:20:05 -0400 Subject: [PATCH 169/240] [Automation] Update elastic stack version to 7.16.0-c3b80e7b for testing (#656) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 6b16c0896..fca3ee4a5 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-2d73c7e8-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-c3b80e7b-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From f42cecb0268f8b7f0d0d19eb56746f13ae454962 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 25 Aug 2021 01:19:42 -0400 Subject: [PATCH 170/240] [Automation] Update elastic stack version to 7.16.0-13c65289 for testing (#658) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index fca3ee4a5..f7a605c8d 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-c3b80e7b-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-13c65289-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 4c33706e35f5f28d2421a5d0c750a84dc0ceaf07 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 26 Aug 2021 01:19:13 -0400 Subject: [PATCH 171/240] [Automation] Update elastic stack version to 7.16.0-6320ab58 for testing (#662) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index f7a605c8d..0e078f670 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-13c65289-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-6320ab58-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From f88e471d044936f8fff943b04d1545e4df5396e9 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Fri, 27 Aug 2021 01:19:46 -0400 Subject: [PATCH 172/240] [Automation] Update elastic stack version to 7.16.0-d98a671f for testing (#667) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 0e078f670..3bcd2cc4c 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-6320ab58-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-d98a671f-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 5778d0460ccbe7dca21d484a6152020f6b4b1a52 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 30 Aug 2021 01:19:16 -0400 Subject: [PATCH 173/240] [Automation] Update elastic stack version to 7.16.0-d17acf1e for testing (#669) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 3bcd2cc4c..52af908bc 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-d98a671f-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-d17acf1e-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 6097637715c48e22373f6c93879208ab9203ebba Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Mon, 30 Aug 2021 14:01:15 +0000 Subject: [PATCH 174/240] Improve PR description template (#591) (#673) One area I think we can improve as a team is on descriptions of pull requests. A pull request description should not only reference an issue that it solves but also be able to stand on its own. It is important to describe how a certain problem is solved. What approach was taken and why. This allows anyone reviewing a PR to build a mental model on what change is in the pull request and if it aligns with the described solution. There are cases where the PR description takes more time then the actual fix in code. Anyone reading the PR description should understand why the changes was made and what the change contains. I opened to discuss a change to the template with these two question on the top. The template itself from my perspective is mostly meant as a guideline/helper but it should be up to each engineer to decide what is really needed for this PR. This PR is made on the fleet-server repository as a first trial but the idea is to use the same template for Beats and other repositories. (cherry picked from commit 0d1a384c45047e7f75acd57129ea2b0eeb6b343c) Co-authored-by: Nicolas Ruflin --- .github/PULL_REQUEST_TEMPLATE.md | 57 +++++++------------------------- 1 file changed, 12 insertions(+), 45 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index ef4ecda88..934167b94 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -8,45 +8,33 @@ Please label this PR with one of the following labels, depending on the scope of - Docs --> -## What does this PR do? +## What is the problem this PR solves? - +// Please do not just reference an issue. Explain WHAT the problem this PR solves here. -## Why is it important? +## How does this PR solve the problem? - ## Checklist -- [ ] My code follows the style guidelines of this project - [ ] I have commented my code, particularly in hard-to-understand areas - [ ] I have made corresponding changes to the documentation - [ ] I have made corresponding change to the default configuration files - [ ] I have added tests that prove my fix is effective or that my feature works - [ ] I have added an entry in `CHANGELOG.next.asciidoc` or `CHANGELOG-developer.next.asciidoc`. -## Author's Checklist - - -- [ ] - -## How to test this PR locally - - ## Related issues @@ -57,25 +45,4 @@ Link related issues below. Insert the issue link or reference after the word "Cl - Relates #123 - Requires #123 - Superseds #123 ---> -- - -## Use cases - - - -## Screenshots - - - -## Logs - - +--> \ No newline at end of file From c000772a7788c7a42fa895fdc155cfa443b58e80 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Tue, 31 Aug 2021 01:19:37 -0400 Subject: [PATCH 175/240] [Automation] Update elastic stack version to 7.16.0-978c8c33 for testing (#676) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 52af908bc..c9a4dbe80 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-d17acf1e-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-978c8c33-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 775660ce02479e9dd8edf66ccff2e8fb9e3a6128 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 31 Aug 2021 12:15:27 +0000 Subject: [PATCH 176/240] Add index refresh call after fetch if the list of hits has holes (#665) (#679) * Add index refresh call after fetch if the list of list has holes After discussion with Elasticsearch team it looks like the current global checkpoints API doesn't not gurantee the list of document without holes on subsequent index search and there is still a slim chance of this with our current implementation. Elastisearch team is working on getting the refresh implemented on thier side, and for now recommended to do the refresh before fetch if there are holes detected in the result. Added the check for the holes and here are the steps: 1. Call Global checkpoints = 5 2. Search = 1, 2, 3, 5. 3. Manual refresh 4. Search and get 4,5 5. Return to step 1 * Add the hole in the beginning check * Fix spelling error in the comment * Updated the comment for "refresh" code, added reference to Elasticsearch ticket (cherry picked from commit d10d0c675f4e65d6b8ab1dec2b2975fa573b0046) Co-authored-by: Aleksandr Maus --- internal/pkg/monitor/monitor.go | 103 +++++++++++++++++++++++++-- internal/pkg/monitor/monitor_test.go | 88 +++++++++++++++++++++++ 2 files changed, 186 insertions(+), 5 deletions(-) create mode 100644 internal/pkg/monitor/monitor_test.go diff --git a/internal/pkg/monitor/monitor.go b/internal/pkg/monitor/monitor.go index b1c53b5cd..cc606297f 100644 --- a/internal/pkg/monitor/monitor.go +++ b/internal/pkg/monitor/monitor.go @@ -259,19 +259,81 @@ func (m *simpleMonitorT) Run(ctx context.Context) (err error) { } } + // This is an example of steps for fetching the documents without "holes" (not-yet-indexed documents in between) + // as recommended by Elasticsearch team on August 25th, 2021 + // 1. Call Global checkpoints = 5 + // 2. Search = 1, 2, 3, 5. + // 3. Manual refresh + // 4. Search and get 4,5 + // 5. Return to step 1 + // Fetch up to known checkpoint count := m.fetchSize for count == m.fetchSize { - hits, err := m.fetch(ctx, newCheckpoint) + hits, err := m.fetch(ctx, checkpoint, newCheckpoint) if err != nil { m.log.Error().Err(err).Msg("failed checking new documents") break } + + // Check if the list of hits has holes + if hasHoles(checkpoint, hits) { + m.log.Debug().Msg("hits list has holes, refresh index") + err = m.refresh(ctx) + if err != nil { + m.log.Error().Err(err).Msg("failed to refresh index") + break + } + + // Refetch + hits, err = m.fetch(ctx, checkpoint, newCheckpoint) + if err != nil { + m.log.Error().Err(err).Msg("failed checking new documents after refresh") + break + } + } + + // Notify call updates checkpoint count = m.notify(ctx, hits) + + // Get the latest checkpoint for the next fetch iteration + if count == m.fetchSize { + checkpoint = m.loadCheckpoint() + } } } } +func hasHoles(checkpoint sqn.SeqNo, hits []es.HitT) bool { + sz := len(hits) + if sz == 0 { + return false + } + + // Check if the hole is in the beginning of hits + seqNo := checkpoint.Value() + if seqNo != sqn.UndefinedSeqNo && (hits[0].SeqNo-seqNo) > 1 { + return true + } + + // No holes in the beginning, check if size <= 1 then there is no holes + if sz <= 1 { + return false + } + + // Set initial seqNo value from the last hit in the array + seqNo = hits[sz-1].SeqNo + + // Iterate from the end since that's where it more likely to have holes + for i := sz - 2; i >= 0; i-- { + if (seqNo - hits[i].SeqNo) > 1 { + return true + } + seqNo = hits[i].SeqNo + } + return false +} + func (m *simpleMonitorT) notify(ctx context.Context, hits []es.HitT) int { sz := len(hits) if sz > 0 { @@ -286,11 +348,9 @@ func (m *simpleMonitorT) notify(ctx context.Context, hits []es.HitT) int { return 0 } -func (m *simpleMonitorT) fetch(ctx context.Context, maxCheckpoint sqn.SeqNo) ([]es.HitT, error) { +func (m *simpleMonitorT) fetch(ctx context.Context, checkpoint, maxCheckpoint sqn.SeqNo) ([]es.HitT, error) { now := time.Now().UTC().Format(time.RFC3339) - checkpoint := m.loadCheckpoint() - // Run check query that detects that there are new documents available params := map[string]interface{}{ dl.FieldSeqNo: checkpoint.Value(), @@ -308,6 +368,39 @@ func (m *simpleMonitorT) fetch(ctx context.Context, maxCheckpoint sqn.SeqNo) ([] return hits, nil } +// Refreshes index. This is temporary code +// TODO: Remove this when the refresh is properly implemented on Eleasticsearch side +// The issue for "refresh" falls under phase 2 of https://github.com/elastic/elasticsearch/issues/71449. +// Once the phase 2 is complete we can remove the refreshes from fleet-server. +func (m *simpleMonitorT) refresh(ctx context.Context) error { + res, err := m.esCli.Indices.Refresh( + m.esCli.Indices.Refresh.WithContext(ctx), + m.esCli.Indices.Refresh.WithIndex(m.index), + ) + if err != nil { + return err + } + defer res.Body.Close() + var esres es.Response + err = json.NewDecoder(res.Body).Decode(&esres) + if err != nil { + return err + } + + if res.IsError() { + err = es.TranslateError(res.StatusCode, &esres.Error) + } + + if err != nil { + if errors.Is(err, es.ErrIndexNotFound) { + m.log.Debug().Msg(es.ErrIndexNotFound.Error()) + return nil + } + return err + } + return nil +} + func (m *simpleMonitorT) search(ctx context.Context, tmpl *dsl.Tmpl, params map[string]interface{}) ([]es.HitT, error) { query, err := tmpl.Render(params) if err != nil { @@ -335,7 +428,7 @@ func (m *simpleMonitorT) search(ctx context.Context, tmpl *dsl.Tmpl, params map[ if err != nil { if errors.Is(err, es.ErrIndexNotFound) { - m.log.Debug().Str("index", m.index).Msg(es.ErrIndexNotFound.Error()) + m.log.Debug().Msg(es.ErrIndexNotFound.Error()) return nil, nil } return nil, err diff --git a/internal/pkg/monitor/monitor_test.go b/internal/pkg/monitor/monitor_test.go new file mode 100644 index 000000000..257b9296f --- /dev/null +++ b/internal/pkg/monitor/monitor_test.go @@ -0,0 +1,88 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// +build !integration + +package monitor + +import ( + "testing" + + "github.com/elastic/fleet-server/v7/internal/pkg/es" + "github.com/elastic/fleet-server/v7/internal/pkg/sqn" + "github.com/google/go-cmp/cmp" +) + +// Sanity test of interal check if hits lits has holes +func TestHashHoles(t *testing.T) { + + tests := []struct { + Name string + SeqNo sqn.SeqNo + Hits []es.HitT + HasHoles bool + }{ + { + Name: "nil", + Hits: genHitsSequence(nil), + }, + { + Name: "empty", + Hits: genHitsSequence([]int64{}), + }, + { + Name: "one", + SeqNo: sqn.SeqNo([]int64{2}), + Hits: genHitsSequence([]int64{3}), + }, + { + Name: "two", + Hits: genHitsSequence([]int64{2, 3}), + }, + { + Name: "two with hole", + Hits: genHitsSequence([]int64{2, 4}), + HasHoles: true, + }, + { + Name: "holed", + Hits: genHitsSequence([]int64{2, 3, 4, 6}), + HasHoles: true, + }, + { + Name: "hole in the beginning", + SeqNo: sqn.SeqNo([]int64{1}), + Hits: genHitsSequence([]int64{3, 4, 5}), + HasHoles: true, + }, + { + Name: "four no holes", + SeqNo: sqn.SeqNo([]int64{1}), + Hits: genHitsSequence([]int64{2, 3, 4, 5}), + }, + } + + for _, tc := range tests { + t.Run(tc.Name, func(t *testing.T) { + diff := cmp.Diff(tc.HasHoles, hasHoles(tc.SeqNo, tc.Hits)) + if diff != "" { + t.Fatal(diff) + } + }) + } +} + +func genHitsSequence(seq []int64) []es.HitT { + if seq == nil { + return nil + } + + hits := make([]es.HitT, 0, len(seq)) + for _, s := range seq { + hits = append(hits, es.HitT{ + SeqNo: s, + }) + } + return hits +} From d0f52ee0b3eeea1b59cc2ed767d342a5b24e84b2 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 31 Aug 2021 21:05:35 +0000 Subject: [PATCH 177/240] Fix panic. (#683) (#684) (cherry picked from commit c859cf06f7d18d8d798cec4489347059bc762587) Co-authored-by: Blake Rouse --- cmd/fleet/handleCheckin.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cmd/fleet/handleCheckin.go b/cmd/fleet/handleCheckin.go index 2ef6fc1f9..768cafe0f 100644 --- a/cmd/fleet/handleCheckin.go +++ b/cmd/fleet/handleCheckin.go @@ -158,7 +158,7 @@ func (ct *CheckinT) _handleCheckin(zlog zerolog.Logger, w http.ResponseWriter, r } var newVer string - if ver != agent.Agent.Version { + if agent.Agent == nil || ver != agent.Agent.Version { newVer = ver } From 61ac1f70682b8caaffd1ed7e87cdff8cb1bfe72d Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 1 Sep 2021 01:18:50 -0400 Subject: [PATCH 178/240] [Automation] Update elastic stack version to 7.16.0-8a18fe34 for testing (#687) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index c9a4dbe80..a867d2f4e 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-978c8c33-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-8a18fe34-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From e956b565eee51deec94737df53737f98242e9b27 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Wed, 1 Sep 2021 13:18:28 +0000 Subject: [PATCH 179/240] Encapsulate the agent new version safe check, full unit test coverage (#688) (#689) * Encapsulate the agent new version safe check, full unit test coverage * Simplify GetNewVersion * Rename GetNewVersion to CheckDifferentVersion (cherry picked from commit cf7d4b67e005fcdff917dae7249f1d0d18405361) Co-authored-by: Aleksandr Maus --- cmd/fleet/handleCheckin.go | 6 +-- internal/pkg/model/ext.go | 13 +++++ internal/pkg/model/ext_test.go | 86 ++++++++++++++++++++++++++++++++++ 3 files changed, 101 insertions(+), 4 deletions(-) create mode 100644 internal/pkg/model/ext_test.go diff --git a/cmd/fleet/handleCheckin.go b/cmd/fleet/handleCheckin.go index 768cafe0f..1bf06f8f9 100644 --- a/cmd/fleet/handleCheckin.go +++ b/cmd/fleet/handleCheckin.go @@ -157,10 +157,8 @@ func (ct *CheckinT) _handleCheckin(zlog zerolog.Logger, w http.ResponseWriter, r return err } - var newVer string - if agent.Agent == nil || ver != agent.Agent.Version { - newVer = ver - } + // Safely check if the agent version is different, return empty string otherwise + newVer := agent.CheckDifferentVersion(ver) // Metrics; serenity now. dfunc := cntCheckin.IncStart() diff --git a/internal/pkg/model/ext.go b/internal/pkg/model/ext.go index 4d6a4bf84..d89787855 100644 --- a/internal/pkg/model/ext.go +++ b/internal/pkg/model/ext.go @@ -25,3 +25,16 @@ func (m *Server) Time() (time.Time, error) { func (m *Server) SetTime(t time.Time) { m.Timestamp = t.Format(time.RFC3339Nano) } + +// CheckDifferentVersion returns Agent version if it is different from ver, otherwise return empty string +func (m *Agent) CheckDifferentVersion(ver string) string { + if m == nil { + return "" + } + + if m.Agent == nil || ver != m.Agent.Version { + return ver + } + + return "" +} diff --git a/internal/pkg/model/ext_test.go b/internal/pkg/model/ext_test.go new file mode 100644 index 000000000..efea34521 --- /dev/null +++ b/internal/pkg/model/ext_test.go @@ -0,0 +1,86 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// +build !integration + +package model + +import ( + "testing" + + "github.com/google/go-cmp/cmp" +) + +func TestAgentGetNewVersion(t *testing.T) { + tests := []struct { + Name string + Agent *Agent + Ver string + WantVer string + }{ + { + Name: "nil", + }, + { + Name: "agent no meta empty version", + Agent: &Agent{}, + }, + { + Name: "agent no meta nonempty version", + Agent: &Agent{}, + Ver: "7.14", + WantVer: "7.14", + }, + { + Name: "agent with meta empty new version", + Agent: &Agent{ + Agent: &AgentMetadata{ + Version: "7.14", + }, + }, + Ver: "", + WantVer: "", + }, + { + Name: "agent with meta empty version", + Agent: &Agent{ + Agent: &AgentMetadata{ + Version: "", + }, + }, + Ver: "7.15", + WantVer: "7.15", + }, + { + Name: "agent with meta non empty version", + Agent: &Agent{ + Agent: &AgentMetadata{ + Version: "7.14", + }, + }, + Ver: "7.14", + WantVer: "", + }, + { + Name: "agent with meta new version", + Agent: &Agent{ + Agent: &AgentMetadata{ + Version: "7.14", + }, + }, + Ver: "7.15", + WantVer: "7.15", + }, + } + + for _, tc := range tests { + t.Run(tc.Name, func(t *testing.T) { + newVer := tc.Agent.CheckDifferentVersion(tc.Ver) + diff := cmp.Diff(tc.WantVer, newVer) + if diff != "" { + t.Error(diff) + } + }) + } +} From c4749f2ce53f0c2ef63efeea75c02dbb357dd619 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Wed, 1 Sep 2021 13:25:58 +0000 Subject: [PATCH 180/240] Set User-Agent in HTTP requests (#654) (#690) * Set User-Agent in HTTP requests * Adjust integration test (cherry picked from commit 96f184e59cc2fabcab646d4994c5a56c245f6e72) Co-authored-by: Aleksandr Maus --- .gitignore | 2 +- Makefile | 3 +- NOTICE.txt | 4 +-- cmd/fleet/main.go | 48 +++++++++++++++------------- cmd/fleet/metrics.go | 2 +- cmd/fleet/server_integration_test.go | 3 +- go.mod | 2 +- go.sum | 4 +-- internal/pkg/build/build.go | 20 ++++++++++++ internal/pkg/es/client.go | 30 ++++++++++++++++- main.go | 12 +++++-- 11 files changed, 94 insertions(+), 36 deletions(-) create mode 100644 internal/pkg/build/build.go diff --git a/.gitignore b/.gitignore index 2f491dea8..c159582f8 100644 --- a/.gitignore +++ b/.gitignore @@ -4,7 +4,7 @@ .vscode/ bin/ -build/ +/build/ fleet-server fleet_server diff --git a/Makefile b/Makefile index 4608aa41e..9a51fa286 100644 --- a/Makefile +++ b/Makefile @@ -19,7 +19,8 @@ endif PLATFORM_TARGETS=$(addprefix release-, $(PLATFORMS)) COMMIT=$(shell git rev-parse --short HEAD) -LDFLAGS=-w -s -X main.Version=${VERSION} -X main.Commit=${COMMIT} +NOW=$(shell date -u '+%Y-%m-%dT%H:%M:%SZ') +LDFLAGS=-w -s -X main.Version=${VERSION} -X main.Commit=${COMMIT} -X main.BuildTime=$(NOW) CMD_COLOR_ON=\033[32m\xE2\x9c\x93 CMD_COLOR_OFF=\033[0m diff --git a/NOTICE.txt b/NOTICE.txt index 5e0a378ef..a1407b24c 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -483,11 +483,11 @@ SOFTWARE -------------------------------------------------------------------------------- Dependency : github.com/elastic/go-elasticsearch/v7 -Version: v7.13.1 +Version: v7.5.1-0.20210823155509-845c8efe54a7 Licence type (autodetected): Apache-2.0 -------------------------------------------------------------------------------- -Contents of probable licence file $GOMODCACHE/github.com/elastic/go-elasticsearch/v7@v7.13.1/LICENSE: +Contents of probable licence file $GOMODCACHE/github.com/elastic/go-elasticsearch/v7@v7.5.1-0.20210823155509-845c8efe54a7/LICENSE: Apache License Version 2.0, January 2004 diff --git a/cmd/fleet/main.go b/cmd/fleet/main.go index c965d842b..558ea63bd 100644 --- a/cmd/fleet/main.go +++ b/cmd/fleet/main.go @@ -18,6 +18,7 @@ import ( "github.com/elastic/go-ucfg/yaml" "github.com/elastic/fleet-server/v7/internal/pkg/action" + "github.com/elastic/fleet-server/v7/internal/pkg/build" "github.com/elastic/fleet-server/v7/internal/pkg/bulk" "github.com/elastic/fleet-server/v7/internal/pkg/cache" "github.com/elastic/fleet-server/v7/internal/pkg/checkin" @@ -49,6 +50,8 @@ const ( kServiceName = "fleet-server" kAgentMode = "agent-mode" kAgentModeRestartLoopDelay = 2 * time.Second + + kUAFleetServer = "Fleet-Server" ) func installSignalHandler() context.Context { @@ -95,7 +98,7 @@ func initLogger(cfg *config.Config, version, commit string) (*logger.Logger, err return l, err } -func getRunCommand(version, commit string) func(cmd *cobra.Command, args []string) error { +func getRunCommand(bi build.Info) func(cmd *cobra.Command, args []string) error { return func(cmd *cobra.Command, args []string) error { cfgObject := cmd.Flags().Lookup("E").Value.(*config.Flag) cliCfg := cfgObject.Config() @@ -112,12 +115,12 @@ func getRunCommand(version, commit string) func(cmd *cobra.Command, args []strin if err != nil { return err } - l, err = initLogger(cfg, version, commit) + l, err = initLogger(cfg, bi.Version, bi.Commit) if err != nil { return err } - agent, err := NewAgentMode(cliCfg, os.Stdin, version, l) + agent, err := NewAgentMode(cliCfg, os.Stdin, bi, l) if err != nil { return err } @@ -141,12 +144,12 @@ func getRunCommand(version, commit string) func(cmd *cobra.Command, args []strin return err } - l, err = initLogger(cfg, version, commit) + l, err = initLogger(cfg, bi.Version, bi.Commit) if err != nil { return err } - srv, err := NewFleetServer(cfg, version, status.NewLog()) + srv, err := NewFleetServer(cfg, bi, status.NewLog()) if err != nil { return err } @@ -164,11 +167,11 @@ func getRunCommand(version, commit string) func(cmd *cobra.Command, args []strin } } -func NewCommand(version, commit string) *cobra.Command { +func NewCommand(bi build.Info) *cobra.Command { cmd := &cobra.Command{ Use: kServiceName, Short: "Fleet Server controls a fleet of Elastic Agents", - RunE: getRunCommand(version, commit), + RunE: getRunCommand(bi), } cmd.Flags().StringP("config", "c", "fleet-server.yml", "Configuration for Fleet Server") cmd.Flags().Bool(kAgentMode, false, "Running under execution of the Elastic Agent") @@ -182,9 +185,8 @@ type firstCfg struct { } type AgentMode struct { - cliCfg *ucfg.Config - version string - + cliCfg *ucfg.Config + bi build.Info reloadables []reload.Reloadable agent client.Client @@ -197,12 +199,12 @@ type AgentMode struct { startChan chan struct{} } -func NewAgentMode(cliCfg *ucfg.Config, reader io.Reader, version string, reloadables ...reload.Reloadable) (*AgentMode, error) { +func NewAgentMode(cliCfg *ucfg.Config, reader io.Reader, bi build.Info, reloadables ...reload.Reloadable) (*AgentMode, error) { var err error a := &AgentMode{ cliCfg: cliCfg, - version: version, + bi: bi, reloadables: reloadables, } a.agent, err = client.NewFromReader(reader, a) @@ -247,7 +249,7 @@ func (a *AgentMode) Run(ctx context.Context) error { srvCtx, srvCancel := context.WithCancel(ctx) defer srvCancel() log.Info().Msg("received initial configuration starting Fleet Server") - srv, err := NewFleetServer(cfg.cfg, a.version, status.NewChained(status.NewLog(), a.agent)) + srv, err := NewFleetServer(cfg.cfg, a.bi, status.NewChained(status.NewLog(), a.agent)) if err != nil { // unblock startChan even though there was an error a.startChan <- struct{}{} @@ -384,7 +386,7 @@ func (a *AgentMode) OnError(err error) { } type FleetServer struct { - ver string + bi build.Info verCon version.Constraints policyId string @@ -395,8 +397,8 @@ type FleetServer struct { } // NewFleetServer creates the actual fleet server service. -func NewFleetServer(cfg *config.Config, verStr string, reporter status.Reporter) (*FleetServer, error) { - verCon, err := buildVersionConstraint(verStr) +func NewFleetServer(cfg *config.Config, bi build.Info, reporter status.Reporter) (*FleetServer, error) { + verCon, err := buildVersionConstraint(bi.Version) if err != nil { return nil, err } @@ -407,7 +409,7 @@ func NewFleetServer(cfg *config.Config, verStr string, reporter status.Reporter) } return &FleetServer{ - ver: verStr, + bi: bi, verCon: verCon, cfg: cfg, cfgCh: make(chan *config.Config, 1), @@ -646,8 +648,8 @@ func initRuntime(cfg *config.Config) { } } -func initBulker(ctx context.Context, cfg *config.Config) (*bulk.Bulker, error) { - es, err := es.NewClient(ctx, cfg, false) +func (f *FleetServer) initBulker(ctx context.Context, cfg *config.Config) (*bulk.Bulker, error) { + es, err := es.NewClient(ctx, cfg, false, es.WithUserAgent(kUAFleetServer, f.bi)) if err != nil { return nil, err } @@ -675,7 +677,7 @@ func (f *FleetServer) runServer(ctx context.Context, cfg *config.Config) (err er defer bulkCancel() // Create the bulker subsystem - bulker, err := initBulker(bulkCtx, cfg) + bulker, err := f.initBulker(bulkCtx, cfg) if err != nil { return err } @@ -723,13 +725,13 @@ func (f *FleetServer) runSubsystems(ctx context.Context, cfg *config.Config, g * esCli := bulker.Client() // Check version compatibility with Elasticsearch - err = ver.CheckCompatibility(ctx, esCli, f.ver) + err = ver.CheckCompatibility(ctx, esCli, f.bi.Version) if err != nil { return fmt.Errorf("failed version compatibility check with elasticsearch: %w", err) } // Monitoring es client, longer timeout, no retries - monCli, err := es.NewClient(ctx, cfg, true) + monCli, err := es.NewClient(ctx, cfg, true, es.WithUserAgent(kUAFleetServer, f.bi)) if err != nil { return err } @@ -744,7 +746,7 @@ func (f *FleetServer) runSubsystems(ctx context.Context, cfg *config.Config, g * } g.Go(loggedRunFunc(ctx, "Policy index monitor", pim.Run)) - cord := coordinator.NewMonitor(cfg.Fleet, f.ver, bulker, pim, coordinator.NewCoordinatorZero) + cord := coordinator.NewMonitor(cfg.Fleet, f.bi.Version, bulker, pim, coordinator.NewCoordinatorZero) g.Go(loggedRunFunc(ctx, "Coordinator policy monitor", cord.Run)) // Policy monitor diff --git a/cmd/fleet/metrics.go b/cmd/fleet/metrics.go index 43885a678..9b7cff0d7 100644 --- a/cmd/fleet/metrics.go +++ b/cmd/fleet/metrics.go @@ -35,7 +35,7 @@ var ( func (f *FleetServer) initMetrics(ctx context.Context, cfg *config.Config) (*api.Server, error) { registry := monitoring.GetNamespace("info").GetRegistry() if registry.Get("version") == nil { - monitoring.NewString(registry, "version").Set(f.ver) + monitoring.NewString(registry, "version").Set(f.bi.Version) } if registry.Get("name") == nil { monitoring.NewString(registry, "name").Set(kServiceName) diff --git a/cmd/fleet/server_integration_test.go b/cmd/fleet/server_integration_test.go index 6294e351a..c0d846fb4 100644 --- a/cmd/fleet/server_integration_test.go +++ b/cmd/fleet/server_integration_test.go @@ -24,6 +24,7 @@ import ( "github.com/stretchr/testify/require" "golang.org/x/sync/errgroup" + "github.com/elastic/fleet-server/v7/internal/pkg/build" "github.com/elastic/fleet-server/v7/internal/pkg/config" "github.com/elastic/fleet-server/v7/internal/pkg/logger" "github.com/elastic/fleet-server/v7/internal/pkg/sleep" @@ -76,7 +77,7 @@ func startTestServer(ctx context.Context) (*tserver, error) { cfg.Inputs[0].Server = *srvcfg log.Info().Uint16("port", port).Msg("Test fleet server") - srv, err := NewFleetServer(cfg, serverVersion, status.NewLog()) + srv, err := NewFleetServer(cfg, build.Info{Version: serverVersion}, status.NewLog()) if err != nil { return nil, err } diff --git a/go.mod b/go.mod index 33d2f19f2..5f2ebf3d9 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ require ( github.com/dgraph-io/ristretto v0.1.0 github.com/elastic/beats/v7 v7.11.1 github.com/elastic/elastic-agent-client/v7 v7.0.0-20210727140539-f0905d9377f6 - github.com/elastic/go-elasticsearch/v7 v7.13.1 + github.com/elastic/go-elasticsearch/v7 v7.5.1-0.20210823155509-845c8efe54a7 github.com/elastic/go-ucfg v0.8.3 github.com/gofrs/uuid v3.3.0+incompatible github.com/google/go-cmp v0.4.0 diff --git a/go.sum b/go.sum index 74b0ce1a1..1b6ac7f52 100644 --- a/go.sum +++ b/go.sum @@ -253,8 +253,8 @@ github.com/elastic/fsevents v0.0.0-20181029231046-e1d381a4d270 h1:cWPqxlPtir4RoQ github.com/elastic/fsevents v0.0.0-20181029231046-e1d381a4d270/go.mod h1:Msl1pdboCbArMF/nSCDUXgQuWTeoMmE/z8607X+k7ng= github.com/elastic/go-concert v0.0.4 h1:pzgYCmJ/xMJsW8PSk33inAWZ065hrwSeP79TpwAbsLE= github.com/elastic/go-concert v0.0.4/go.mod h1:9MtFarjXroUgmm0m6HY3NSe1XiKhdktiNRRj9hWvIaM= -github.com/elastic/go-elasticsearch/v7 v7.13.1 h1:PaM3V69wPlnwR+ne50rSKKn0RNDYnnOFQcuGEI0ce80= -github.com/elastic/go-elasticsearch/v7 v7.13.1/go.mod h1:OJ4wdbtDNk5g503kvlHLyErCgQwwzmDtaFC4XyOxXA4= +github.com/elastic/go-elasticsearch/v7 v7.5.1-0.20210823155509-845c8efe54a7 h1:Nq382VeELkUSC7y8JIXBNj0YfOqmq/d8mX+crl4xdrM= +github.com/elastic/go-elasticsearch/v7 v7.5.1-0.20210823155509-845c8efe54a7/go.mod h1:OJ4wdbtDNk5g503kvlHLyErCgQwwzmDtaFC4XyOxXA4= github.com/elastic/go-libaudit/v2 v2.1.0 h1:yWSKoGaoWLGFPjqWrQ4gwtuM77pTk7K4CsPxXss8he4= github.com/elastic/go-libaudit/v2 v2.1.0/go.mod h1:MM/l/4xV7ilcl+cIblL8Zn448J7RZaDwgNLE4gNKYPg= github.com/elastic/go-licenser v0.3.1 h1:RmRukU/JUmts+rpexAw0Fvt2ly7VVu6mw8z4HrEzObU= diff --git a/internal/pkg/build/build.go b/internal/pkg/build/build.go new file mode 100644 index 000000000..06c168dc0 --- /dev/null +++ b/internal/pkg/build/build.go @@ -0,0 +1,20 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package build + +import "time" + +type Info struct { + Version, Commit string + BuildTime time.Time +} + +func Time(stime string) time.Time { + t, err := time.Parse(time.RFC3339, stime) + if err != nil { + return time.Time{} + } + return t +} diff --git a/internal/pkg/es/client.go b/internal/pkg/es/client.go index b3c6423b6..c5ca8f129 100644 --- a/internal/pkg/es/client.go +++ b/internal/pkg/es/client.go @@ -8,14 +8,19 @@ import ( "context" "encoding/json" "fmt" + "net/http" + "runtime" + "github.com/elastic/fleet-server/v7/internal/pkg/build" "github.com/elastic/fleet-server/v7/internal/pkg/config" "github.com/elastic/go-elasticsearch/v7" "github.com/rs/zerolog/log" ) -func NewClient(ctx context.Context, cfg *config.Config, longPoll bool) (*elasticsearch.Client, error) { +type ConfigOption func(config elasticsearch.Config) + +func NewClient(ctx context.Context, cfg *config.Config, longPoll bool, opts ...ConfigOption) (*elasticsearch.Client, error) { escfg, err := cfg.Output.Elasticsearch.ToESConfig(longPoll) if err != nil { return nil, err @@ -24,6 +29,11 @@ func NewClient(ctx context.Context, cfg *config.Config, longPoll bool) (*elastic user := cfg.Output.Elasticsearch.Username mcph := cfg.Output.Elasticsearch.MaxConnPerHost + // Apply configuration options + for _, opt := range opts { + opt(escfg) + } + log.Debug(). Strs("addr", addr). Str("user", user). @@ -50,6 +60,24 @@ func NewClient(ctx context.Context, cfg *config.Config, longPoll bool) (*elastic return es, nil } +func WithUserAgent(name string, bi build.Info) func(config elasticsearch.Config) { + return func(config elasticsearch.Config) { + ua := userAgent(name, bi) + // Set User-Agent header + if config.Header == nil { + config.Header = http.Header{} + } + config.Header.Set("User-Agent", ua) + } +} + +func userAgent(name string, bi build.Info) string { + return fmt.Sprintf("Elastic-%s/%s (%s; %s; %s; %s)", + name, + bi.Version, runtime.GOOS, runtime.GOARCH, + bi.Commit, bi.BuildTime) +} + type InfoResponse struct { ClusterName string `json:"cluster_name"` ClusterUUID string `json:"cluster_uuid"` diff --git a/main.go b/main.go index bf9b0e0b0..27330d702 100644 --- a/main.go +++ b/main.go @@ -14,17 +14,23 @@ import ( "os" "github.com/elastic/fleet-server/v7/cmd/fleet" + "github.com/elastic/fleet-server/v7/internal/pkg/build" ) const defaultVersion = "7.16.0" var ( - Version string = defaultVersion - Commit string + Version string = defaultVersion + Commit string + BuildTime string ) func main() { - cmd := fleet.NewCommand(Version, Commit) + cmd := fleet.NewCommand(build.Info{ + Version: Version, + Commit: Commit, + BuildTime: build.Time(BuildTime), + }) if err := cmd.Execute(); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) From 12118114cccccd366d82da00334358b81e951401 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 7 Sep 2021 18:07:25 +0000 Subject: [PATCH 181/240] Fix issues with config reloading (#692) (#693) * Fix issues with config reloading. * Use SetupES. * Rename to Cacher. (cherry picked from commit 983c2748b588fc44d34468558911e3ce360e76dc) Co-authored-by: Blake Rouse --- cmd/fleet/main.go | 37 ++++ cmd/fleet/main_integration_test.go | 232 +++++++++++++++++++++++++ internal/pkg/cache/cache.go | 13 +- internal/pkg/cache/impl.go | 16 ++ internal/pkg/cache/impl_integration.go | 32 ++++ internal/pkg/cache/impl_ristretto.go | 21 +++ internal/pkg/dl/enrollment_api_key.go | 11 ++ internal/pkg/testing/esutil/index.go | 28 +++ internal/pkg/testing/suite/suite.go | 56 ++++++ 9 files changed, 434 insertions(+), 12 deletions(-) create mode 100644 cmd/fleet/main_integration_test.go create mode 100644 internal/pkg/cache/impl.go create mode 100644 internal/pkg/cache/impl_integration.go create mode 100644 internal/pkg/cache/impl_ristretto.go create mode 100644 internal/pkg/testing/suite/suite.go diff --git a/cmd/fleet/main.go b/cmd/fleet/main.go index 558ea63bd..ebb33cd68 100644 --- a/cmd/fleet/main.go +++ b/cmd/fleet/main.go @@ -511,6 +511,7 @@ LOOP: } curCfg = newCfg + f.cfg = curCfg select { case newCfg = <-f.cfgCh: @@ -553,6 +554,34 @@ func configChangedProfiler(curCfg, newCfg *config.Config) bool { return changed } +func redactOutputCfg(cfg *config.Config) config.Output { + const kRedacted = "[redacted]" + redacted := cfg.Output + + if redacted.Elasticsearch.Password != "" { + redacted.Elasticsearch.Password = kRedacted + } + + if redacted.Elasticsearch.APIKey != "" { + redacted.Elasticsearch.APIKey = kRedacted + } + + if redacted.Elasticsearch.TLS != nil { + newTLS := *redacted.Elasticsearch.TLS + + if newTLS.Certificate.Key != "" { + newTLS.Certificate.Key = kRedacted + } + if newTLS.Certificate.Passphrase != "" { + newTLS.Certificate.Passphrase = kRedacted + } + + redacted.Elasticsearch.TLS = &newTLS + } + + return redacted +} + func redactServerCfg(cfg *config.Config) config.Server { const kRedacted = "[redacted]" redacted := cfg.Inputs[0].Server @@ -581,6 +610,14 @@ func configChangedServer(curCfg, newCfg *config.Config) bool { switch { case curCfg == nil: zlog.Info().Msg("initial server configuration") + case !reflect.DeepEqual(curCfg.Fleet, newCfg.Fleet): + zlog.Info(). + Interface("old", curCfg). + Msg("fleet configuration has changed") + case !reflect.DeepEqual(curCfg.Output, newCfg.Output): + zlog.Info(). + Interface("old", redactOutputCfg(curCfg)). + Msg("output configuration has changed") case !reflect.DeepEqual(curCfg.Inputs[0].Server, newCfg.Inputs[0].Server): zlog.Info(). Interface("old", redactServerCfg(curCfg)). diff --git a/cmd/fleet/main_integration_test.go b/cmd/fleet/main_integration_test.go new file mode 100644 index 000000000..e3d7a4bbd --- /dev/null +++ b/cmd/fleet/main_integration_test.go @@ -0,0 +1,232 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// +build integration + +package fleet + +import ( + "context" + "fmt" + "io" + "sync" + "testing" + "time" + + "github.com/elastic/beats/v7/libbeat/logp" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/logger" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/core/server" + "github.com/elastic/elastic-agent-client/v7/pkg/proto" + "github.com/elastic/go-ucfg" + "github.com/gofrs/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/elastic/fleet-server/v7/internal/pkg/build" + "github.com/elastic/fleet-server/v7/internal/pkg/dl" + "github.com/elastic/fleet-server/v7/internal/pkg/model" + ftesting "github.com/elastic/fleet-server/v7/internal/pkg/testing" + "github.com/elastic/fleet-server/v7/internal/pkg/testing/suite" +) + +var biInfo = build.Info{ + Version: "1.0.0", + Commit: "integration", +} + +var policyData = []byte(` +{ + "inputs": [ + { + "type": "fleet-server" + } + ] +} +`) + +var initialCfgData = ` +output: + elasticsearch: + hosts: '${ELASTICSEARCH_HOSTS:localhost:9200}' + username: '${ELASTICSEARCH_USERNAME:elastic}' + password: '${ELASTICSEARCH_PASSWORD:changeme}' +` + +var agentIdCfgData = ` +output: + elasticsearch: + hosts: '${ELASTICSEARCH_HOSTS:localhost:9200}' + username: '${ELASTICSEARCH_USERNAME:elastic}' + password: '${ELASTICSEARCH_PASSWORD:changeme}' +fleet: + agent: + id: 1e4954ce-af37-4731-9f4a-407b08e69e42 +` + +var badCfgData = ` +output: + elasticsearch: + hosts: 'localhost:63542' + username: '${ELASTICSEARCH_USERNAME:elastic}' + password: '${ELASTICSEARCH_PASSWORD:changeme}' +fleet: + agent: + id: 1e4954ce-af37-4731-9f4a-407b08e69e42 +` + +type agentSuite struct { + suite.RunningSuite +} + +func (s *agentSuite) TestAgentMode(t *testing.T) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + + bulker := ftesting.SetupBulk(ctx, t) + + // add a real default fleet server policy + policyId := uuid.Must(uuid.NewV4()).String() + _, err := dl.CreatePolicy(ctx, bulker, model.Policy{ + PolicyId: policyId, + RevisionIdx: 1, + DefaultFleetServer: true, + Data: policyData, + }) + require.NoError(t, err) + + // add entry for enrollment key (doesn't have to be a real key) + _, err = dl.CreateEnrollmentAPIKey(ctx, bulker, model.EnrollmentApiKey{ + Name: "Default", + ApiKey: "keyvalue", + ApiKeyId: "keyid", + PolicyId: policyId, + Active: true, + }) + require.NoError(t, err) + + app := &StubApp{} + control := createAndStartControlServer(t, app) + defer control.Stop() + appState, err := control.Register(app, initialCfgData) + require.NoError(t, err) + + r, w := io.Pipe() + + var wg sync.WaitGroup + wg.Add(1) + go func() { + defer wg.Done() + agent, err := NewAgentMode(ucfg.New(), r, biInfo) + require.NoError(t, err) + err = agent.Run(ctx) + assert.NoError(t, err) + }() + + err = appState.WriteConnInfo(w) + require.NoError(t, err) + + // wait for fleet-server to report as degraded (starting mode without agent.id) + ftesting.Retry(t, ctx, func(ctx context.Context) error { + status := app.Status() + if status != proto.StateObserved_DEGRADED { + return fmt.Errorf("should be reported as degraded; instead its %s", status) + } + return nil + }, ftesting.RetrySleep(100*time.Millisecond), ftesting.RetryCount(120)) + + // reconfigure with agent ID set + err = appState.UpdateConfig(agentIdCfgData) + require.NoError(t, err) + + // wait for fleet-server to report as healthy + ftesting.Retry(t, ctx, func(ctx context.Context) error { + status := app.Status() + if status != proto.StateObserved_HEALTHY { + return fmt.Errorf("should be reported as healthy; instead its %s", status) + } + return nil + }, ftesting.RetrySleep(100*time.Millisecond), ftesting.RetryCount(120)) + + // trigger update with bad configuration + err = appState.UpdateConfig(badCfgData) + require.NoError(t, err) + + // wait for fleet-server to report as failed + ftesting.Retry(t, ctx, func(ctx context.Context) error { + status := app.Status() + if status != proto.StateObserved_FAILED { + return fmt.Errorf("should be reported as failed; instead its %s", status) + } + return nil + }, ftesting.RetrySleep(100*time.Millisecond), ftesting.RetryCount(120)) + + // reconfigure to good config + err = appState.UpdateConfig(agentIdCfgData) + require.NoError(t, err) + + // wait for fleet-server to report as healthy + ftesting.Retry(t, ctx, func(ctx context.Context) error { + status := app.Status() + if status != proto.StateObserved_HEALTHY { + return fmt.Errorf("should be reported as healthy; instead its %s", status) + } + return nil + }, ftesting.RetrySleep(100*time.Millisecond), ftesting.RetryCount(120)) + + // trigger stop + err = appState.Stop(10 * time.Second) + assert.NoError(t, err) + + // wait for go routine to exit + wg.Wait() +} + +func newDebugLogger(t *testing.T) *logger.Logger { + t.Helper() + + loggerCfg := logger.DefaultLoggingConfig() + loggerCfg.Level = logp.DebugLevel + + log, err := logger.NewFromConfig("", loggerCfg) + require.NoError(t, err) + return log +} + +func createAndStartControlServer(t *testing.T, handler server.Handler, extraConfigs ...func(*server.Server)) *server.Server { + t.Helper() + srv, err := server.New(newDebugLogger(t), "localhost:0", handler) + require.NoError(t, err) + for _, extra := range extraConfigs { + extra(srv) + } + require.NoError(t, srv.Start()) + return srv +} + +type StubApp struct { + lock sync.RWMutex + status proto.StateObserved_Status + message string + payload map[string]interface{} +} + +func (a *StubApp) Status() proto.StateObserved_Status { + a.lock.RLock() + defer a.lock.RUnlock() + return a.status +} + +func (a *StubApp) Message() string { + a.lock.RLock() + defer a.lock.RUnlock() + return a.message +} + +func (a *StubApp) OnStatusChange(_ *server.ApplicationState, status proto.StateObserved_Status, message string, payload map[string]interface{}) { + a.lock.Lock() + defer a.lock.Unlock() + a.status = status + a.message = message + a.payload = payload +} diff --git a/internal/pkg/cache/cache.go b/internal/pkg/cache/cache.go index 10a1bd10f..52e676ab8 100644 --- a/internal/pkg/cache/cache.go +++ b/internal/pkg/cache/cache.go @@ -10,7 +10,6 @@ import ( "sync" "time" - "github.com/dgraph-io/ristretto" "github.com/rs/zerolog" "github.com/rs/zerolog/log" @@ -38,7 +37,7 @@ type ApiKey = apikey.ApiKey type SecurityInfo = apikey.SecurityInfo type CacheT struct { - cache *ristretto.Cache + cache Cacher cfg Config mut sync.RWMutex } @@ -83,16 +82,6 @@ func New(cfg Config) (*CacheT, error) { return &c, nil } -func newCache(cfg Config) (*ristretto.Cache, error) { - rcfg := &ristretto.Config{ - NumCounters: cfg.NumCounters, - MaxCost: cfg.MaxCost, - BufferItems: 64, - } - - return ristretto.NewCache(rcfg) -} - // Reconfigure will drop cache func (c *CacheT) Reconfigure(cfg Config) error { c.mut.Lock() diff --git a/internal/pkg/cache/impl.go b/internal/pkg/cache/impl.go new file mode 100644 index 000000000..ef7a70d8a --- /dev/null +++ b/internal/pkg/cache/impl.go @@ -0,0 +1,16 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package cache + +import ( + "time" +) + +type Cacher interface { + Get(key interface{}) (interface{}, bool) + Set(key, value interface{}, cost int64) bool + SetWithTTL(key, value interface{}, cost int64, ttl time.Duration) bool + Close() +} diff --git a/internal/pkg/cache/impl_integration.go b/internal/pkg/cache/impl_integration.go new file mode 100644 index 000000000..e7e87b3d5 --- /dev/null +++ b/internal/pkg/cache/impl_integration.go @@ -0,0 +1,32 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// +build integration + +package cache + +import ( + "time" +) + +func newCache(_ Config) (Cacher, error) { + return &NoCache{}, nil +} + +type NoCache struct{} + +func (c *NoCache) Get(_ interface{}) (interface{}, bool) { + return nil, false +} + +func (c *NoCache) Set(_ interface{}, _ interface{}, _ int64) bool { + return true +} + +func (c *NoCache) SetWithTTL(_, _ interface{}, _ int64, _ time.Duration) bool { + return true +} + +func (c *NoCache) Close() { +} diff --git a/internal/pkg/cache/impl_ristretto.go b/internal/pkg/cache/impl_ristretto.go new file mode 100644 index 000000000..877e3023a --- /dev/null +++ b/internal/pkg/cache/impl_ristretto.go @@ -0,0 +1,21 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// +build !integration + +package cache + +import ( + "github.com/dgraph-io/ristretto" +) + +func newCache(cfg Config) (Cacher, error) { + rcfg := &ristretto.Config{ + NumCounters: cfg.NumCounters, + MaxCost: cfg.MaxCost, + BufferItems: 64, + } + + return ristretto.NewCache(rcfg) +} diff --git a/internal/pkg/dl/enrollment_api_key.go b/internal/pkg/dl/enrollment_api_key.go index 5e268f239..176c51d1d 100644 --- a/internal/pkg/dl/enrollment_api_key.go +++ b/internal/pkg/dl/enrollment_api_key.go @@ -6,6 +6,7 @@ package dl import ( "context" + "encoding/json" "fmt" "github.com/elastic/fleet-server/v7/internal/pkg/bulk" @@ -79,3 +80,13 @@ func findEnrollmentAPIKeys(ctx context.Context, bulker bulk.Bulk, index string, } return recs, nil } + +// CreateEnrollmentAPIKey creates a new enrollment API key +func CreateEnrollmentAPIKey(ctx context.Context, bulker bulk.Bulk, key model.EnrollmentApiKey, opt ...Option) (string, error) { + o := newOption(FleetEnrollmentAPIKeys, opt...) + data, err := json.Marshal(&key) + if err != nil { + return "", err + } + return bulker.Create(ctx, o.indexName, "", data, bulk.WithRefresh()) +} diff --git a/internal/pkg/testing/esutil/index.go b/internal/pkg/testing/esutil/index.go index 2e1d9304d..1cb7eff53 100644 --- a/internal/pkg/testing/esutil/index.go +++ b/internal/pkg/testing/esutil/index.go @@ -45,3 +45,31 @@ func CreateIndex(ctx context.Context, cli *elasticsearch.Client, name string) er return nil } + +func DeleteIndices(ctx context.Context, cli *elasticsearch.Client, names ...string) error { + res, err := cli.Indices.Delete(names, + cli.Indices.Delete.WithContext(ctx), + ) + + if err != nil { + return err + } + + defer res.Body.Close() + + err = checkResponseError(res) + if err != nil { + return err + } + + var r AckResponse + err = json.NewDecoder(res.Body).Decode(&r) + if err != nil { + return fmt.Errorf("failed to parse delete indices response: %v, err: %v", names, err) + } + if !r.Acknowledged { + return fmt.Errorf("failed to receive acknowledgment for delete indices request: %v", names) + } + + return nil +} diff --git a/internal/pkg/testing/suite/suite.go b/internal/pkg/testing/suite/suite.go new file mode 100644 index 000000000..5ddd787a6 --- /dev/null +++ b/internal/pkg/testing/suite/suite.go @@ -0,0 +1,56 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// +build integration + +package suite + +import ( + "context" + + "github.com/stretchr/testify/require" + tsuite "github.com/stretchr/testify/suite" + + "github.com/elastic/fleet-server/v7/internal/pkg/dl" + "github.com/elastic/fleet-server/v7/internal/pkg/es" + ftesting "github.com/elastic/fleet-server/v7/internal/pkg/testing" + "github.com/elastic/fleet-server/v7/internal/pkg/testing/esutil" +) + +var prepareIndexes = map[string]string{ + dl.FleetActions: es.MappingAction, + dl.FleetActionsResults: es.MappingActionResult, + dl.FleetAgents: es.MappingAgent, + dl.FleetArtifacts: es.MappingArtifact, + dl.FleetEnrollmentAPIKeys: es.MappingEnrollmentApiKey, + dl.FleetPolicies: es.MappingPolicy, + dl.FleetPoliciesLeader: es.MappingPolicyLeader, + dl.FleetServers: es.MappingServer, +} + +type RunningSuite struct { + tsuite.Suite +} + +func (s *RunningSuite) SetupSuite() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + c := ftesting.SetupES(ctx, s.T()) + for index, mapping := range prepareIndexes { + err := esutil.EnsureIndex(ctx, c, index, mapping) + require.NoError(s.T(), err) + } +} + +func (s *RunningSuite) TearDownSuite() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + c := ftesting.SetupES(ctx, s.T()) + names := make([]string, 0, len(prepareIndexes)) + for index, _ := range prepareIndexes { + names = append(names, index) + } + err := esutil.DeleteIndices(ctx, c, names...) + require.NoError(s.T(), err) +} From 8135e2e82466630c7c6b7e244539e0a205e2ad68 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 8 Sep 2021 01:19:32 -0400 Subject: [PATCH 182/240] [Automation] Update elastic stack version to 7.16.0-a96d8aa5 for testing (#697) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index a867d2f4e..93692bd9c 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-8a18fe34-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-a96d8aa5-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 2b390d4852dd8f6a52ec532474108c75a2356a67 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Wed, 8 Sep 2021 12:17:51 +0000 Subject: [PATCH 183/240] Add "hole" detection and refresh for the agent actions fetch upon initial check in (#681) (#698) * Remove unused Hits structs from monitor * Add "hole" detection and refresh for the agent actions fetch upon initial check in (cherry picked from commit dcdbd1bba9ce25796249ac6dd4343ac47820e424) Co-authored-by: Aleksandr Maus --- cmd/fleet/handleCheckin.go | 10 +-- internal/pkg/dl/actions.go | 50 ++++++++++- internal/pkg/es/holes.go | 37 ++++++++ .../monitor_test.go => es/holes_test.go} | 13 ++- internal/pkg/es/refresh.go | 45 ++++++++++ internal/pkg/monitor/monitor.go | 84 +------------------ 6 files changed, 138 insertions(+), 101 deletions(-) create mode 100644 internal/pkg/es/holes.go rename internal/pkg/{monitor/monitor_test.go => es/holes_test.go} (84%) create mode 100644 internal/pkg/es/refresh.go diff --git a/cmd/fleet/handleCheckin.go b/cmd/fleet/handleCheckin.go index 1bf06f8f9..5a5e8703a 100644 --- a/cmd/fleet/handleCheckin.go +++ b/cmd/fleet/handleCheckin.go @@ -373,14 +373,8 @@ func (ct *CheckinT) resolveSeqNo(ctx context.Context, req CheckinRequest, agent } func (ct *CheckinT) fetchAgentPendingActions(ctx context.Context, seqno sqn.SeqNo, agentId string) ([]model.Action, error) { - now := time.Now().UTC().Format(time.RFC3339) - - actions, err := dl.FindActions(ctx, ct.bulker, dl.QueryAgentActions, map[string]interface{}{ - dl.FieldSeqNo: seqno.Value(), - dl.FieldMaxSeqNo: ct.gcp.GetCheckpoint().Value(), - dl.FieldExpiration: now, - dl.FieldAgents: []string{agentId}, - }) + + actions, err := dl.FindAgentActions(ctx, ct.bulker, seqno, ct.gcp.GetCheckpoint(), agentId) if err != nil { return nil, errors.Wrap(err, "fetchAgentPendingActions") diff --git a/internal/pkg/dl/actions.go b/internal/pkg/dl/actions.go index 2c34dec16..4db876281 100644 --- a/internal/pkg/dl/actions.go +++ b/internal/pkg/dl/actions.go @@ -7,11 +7,13 @@ package dl import ( "context" "errors" + "time" "github.com/elastic/fleet-server/v7/internal/pkg/bulk" "github.com/elastic/fleet-server/v7/internal/pkg/dsl" "github.com/elastic/fleet-server/v7/internal/pkg/es" "github.com/elastic/fleet-server/v7/internal/pkg/model" + "github.com/elastic/fleet-server/v7/internal/pkg/sqn" "github.com/rs/zerolog/log" ) @@ -83,7 +85,35 @@ func FindActions(ctx context.Context, bulker bulk.Bulk, tmpl *dsl.Tmpl, params m return findActions(ctx, bulker, tmpl, FleetActions, params) } -func findActions(ctx context.Context, bulker bulk.Bulk, tmpl *dsl.Tmpl, index string, params map[string]interface{}) ([]model.Action, error) { +func FindAgentActions(ctx context.Context, bulker bulk.Bulk, minSeqNo, maxSeqNo sqn.SeqNo, agentId string) ([]model.Action, error) { + const index = FleetActions + params := map[string]interface{}{ + FieldSeqNo: minSeqNo.Value(), + FieldMaxSeqNo: maxSeqNo.Value(), + FieldExpiration: time.Now().UTC().Format(time.RFC3339), + FieldAgents: []string{agentId}, + } + + res, err := findActionsHits(ctx, bulker, QueryAgentActions, index, params) + if err != nil || res == nil { + return nil, err + } + + if es.HasHoles(minSeqNo, res.Hits) { + err = es.Refresh(ctx, bulker.Client(), index) + if err != nil { + log.Error().Err(err).Msg("failed to refresh index") + } + res, err := findActionsHits(ctx, bulker, QueryAgentActions, index, params) + if err != nil || res == nil { + return nil, err + } + } + + return hitsToActions(res.Hits) +} + +func findActionsHits(ctx context.Context, bulker bulk.Bulk, tmpl *dsl.Tmpl, index string, params map[string]interface{}) (*es.HitsT, error) { res, err := Search(ctx, bulker, tmpl, index, params) if err != nil { if errors.Is(err, es.ErrIndexNotFound) { @@ -92,10 +122,22 @@ func findActions(ctx context.Context, bulker bulk.Bulk, tmpl *dsl.Tmpl, index st } return nil, err } + return res, nil +} + +func findActions(ctx context.Context, bulker bulk.Bulk, tmpl *dsl.Tmpl, index string, params map[string]interface{}) ([]model.Action, error) { + res, err := findActionsHits(ctx, bulker, tmpl, index, params) + if err != nil || res == nil { + return nil, err + } + + return hitsToActions(res.Hits) +} - actions := make([]model.Action, 0, len(res.Hits)) +func hitsToActions(hits []es.HitT) ([]model.Action, error) { + actions := make([]model.Action, 0, len(hits)) - for _, hit := range res.Hits { + for _, hit := range hits { var action model.Action err := hit.Unmarshal(&action) if err != nil { @@ -103,5 +145,5 @@ func findActions(ctx context.Context, bulker bulk.Bulk, tmpl *dsl.Tmpl, index st } actions = append(actions, action) } - return actions, err + return actions, nil } diff --git a/internal/pkg/es/holes.go b/internal/pkg/es/holes.go new file mode 100644 index 000000000..f74df08c2 --- /dev/null +++ b/internal/pkg/es/holes.go @@ -0,0 +1,37 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package es + +import "github.com/elastic/fleet-server/v7/internal/pkg/sqn" + +func HasHoles(checkpoint sqn.SeqNo, hits []HitT) bool { + sz := len(hits) + if sz == 0 { + return false + } + + // Check if the hole is in the beginning of hits + seqNo := checkpoint.Value() + if seqNo != sqn.UndefinedSeqNo && (hits[0].SeqNo-seqNo) > 1 { + return true + } + + // No holes in the beginning, check if size <= 1 then there is no holes + if sz <= 1 { + return false + } + + // Set initial seqNo value from the last hit in the array + seqNo = hits[sz-1].SeqNo + + // Iterate from the end since that's where it more likely to have holes + for i := sz - 2; i >= 0; i-- { + if (seqNo - hits[i].SeqNo) > 1 { + return true + } + seqNo = hits[i].SeqNo + } + return false +} diff --git a/internal/pkg/monitor/monitor_test.go b/internal/pkg/es/holes_test.go similarity index 84% rename from internal/pkg/monitor/monitor_test.go rename to internal/pkg/es/holes_test.go index 257b9296f..79fde9843 100644 --- a/internal/pkg/monitor/monitor_test.go +++ b/internal/pkg/es/holes_test.go @@ -4,12 +4,11 @@ // +build !integration -package monitor +package es import ( "testing" - "github.com/elastic/fleet-server/v7/internal/pkg/es" "github.com/elastic/fleet-server/v7/internal/pkg/sqn" "github.com/google/go-cmp/cmp" ) @@ -20,7 +19,7 @@ func TestHashHoles(t *testing.T) { tests := []struct { Name string SeqNo sqn.SeqNo - Hits []es.HitT + Hits []HitT HasHoles bool }{ { @@ -65,7 +64,7 @@ func TestHashHoles(t *testing.T) { for _, tc := range tests { t.Run(tc.Name, func(t *testing.T) { - diff := cmp.Diff(tc.HasHoles, hasHoles(tc.SeqNo, tc.Hits)) + diff := cmp.Diff(tc.HasHoles, HasHoles(tc.SeqNo, tc.Hits)) if diff != "" { t.Fatal(diff) } @@ -73,14 +72,14 @@ func TestHashHoles(t *testing.T) { } } -func genHitsSequence(seq []int64) []es.HitT { +func genHitsSequence(seq []int64) []HitT { if seq == nil { return nil } - hits := make([]es.HitT, 0, len(seq)) + hits := make([]HitT, 0, len(seq)) for _, s := range seq { - hits = append(hits, es.HitT{ + hits = append(hits, HitT{ SeqNo: s, }) } diff --git a/internal/pkg/es/refresh.go b/internal/pkg/es/refresh.go new file mode 100644 index 000000000..b138aeb5b --- /dev/null +++ b/internal/pkg/es/refresh.go @@ -0,0 +1,45 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package es + +import ( + "context" + "encoding/json" + "errors" + + "github.com/elastic/go-elasticsearch/v7" +) + +// Refresh refreshes index. This is temporary code +// TODO: Remove this when the refresh is properly implemented on Eleasticsearch side +// The issue for "refresh" falls under phase 2 of https://github.com/elastic/elasticsearch/issues/71449. +// Once the phase 2 is complete we can remove the refreshes from fleet-server. +func Refresh(ctx context.Context, esCli *elasticsearch.Client, index string) error { + res, err := esCli.Indices.Refresh( + esCli.Indices.Refresh.WithContext(ctx), + esCli.Indices.Refresh.WithIndex(index), + ) + if err != nil { + return err + } + defer res.Body.Close() + var esres Response + err = json.NewDecoder(res.Body).Decode(&esres) + if err != nil { + return err + } + + if res.IsError() { + err = TranslateError(res.StatusCode, &esres.Error) + } + + if err != nil { + if errors.Is(err, ErrIndexNotFound) { + return nil + } + return err + } + return nil +} diff --git a/internal/pkg/monitor/monitor.go b/internal/pkg/monitor/monitor.go index cc606297f..f7081bf3e 100644 --- a/internal/pkg/monitor/monitor.go +++ b/internal/pkg/monitor/monitor.go @@ -52,23 +52,6 @@ const ( fieldExpiration = "expiration" ) -type HitT struct { - Id string `json:"_id"` - SeqNo int64 `json:"_seq_no"` - Index string `json:"_index"` - Source json.RawMessage `json:"_source"` - Score *float64 `json:"_score"` -} - -type HitsT struct { - Hits []HitT `json:"hits"` - Total struct { - Relation string `json:"relation"` - Value uint64 `json:"value"` - } `json:"total"` - MaxScore *float64 `json:"max_score"` -} - type GlobalCheckpointProvider interface { GetCheckpoint() sqn.SeqNo } @@ -277,9 +260,9 @@ func (m *simpleMonitorT) Run(ctx context.Context) (err error) { } // Check if the list of hits has holes - if hasHoles(checkpoint, hits) { + if es.HasHoles(checkpoint, hits) { m.log.Debug().Msg("hits list has holes, refresh index") - err = m.refresh(ctx) + err = es.Refresh(ctx, m.esCli, m.index) if err != nil { m.log.Error().Err(err).Msg("failed to refresh index") break @@ -304,36 +287,6 @@ func (m *simpleMonitorT) Run(ctx context.Context) (err error) { } } -func hasHoles(checkpoint sqn.SeqNo, hits []es.HitT) bool { - sz := len(hits) - if sz == 0 { - return false - } - - // Check if the hole is in the beginning of hits - seqNo := checkpoint.Value() - if seqNo != sqn.UndefinedSeqNo && (hits[0].SeqNo-seqNo) > 1 { - return true - } - - // No holes in the beginning, check if size <= 1 then there is no holes - if sz <= 1 { - return false - } - - // Set initial seqNo value from the last hit in the array - seqNo = hits[sz-1].SeqNo - - // Iterate from the end since that's where it more likely to have holes - for i := sz - 2; i >= 0; i-- { - if (seqNo - hits[i].SeqNo) > 1 { - return true - } - seqNo = hits[i].SeqNo - } - return false -} - func (m *simpleMonitorT) notify(ctx context.Context, hits []es.HitT) int { sz := len(hits) if sz > 0 { @@ -368,39 +321,6 @@ func (m *simpleMonitorT) fetch(ctx context.Context, checkpoint, maxCheckpoint sq return hits, nil } -// Refreshes index. This is temporary code -// TODO: Remove this when the refresh is properly implemented on Eleasticsearch side -// The issue for "refresh" falls under phase 2 of https://github.com/elastic/elasticsearch/issues/71449. -// Once the phase 2 is complete we can remove the refreshes from fleet-server. -func (m *simpleMonitorT) refresh(ctx context.Context) error { - res, err := m.esCli.Indices.Refresh( - m.esCli.Indices.Refresh.WithContext(ctx), - m.esCli.Indices.Refresh.WithIndex(m.index), - ) - if err != nil { - return err - } - defer res.Body.Close() - var esres es.Response - err = json.NewDecoder(res.Body).Decode(&esres) - if err != nil { - return err - } - - if res.IsError() { - err = es.TranslateError(res.StatusCode, &esres.Error) - } - - if err != nil { - if errors.Is(err, es.ErrIndexNotFound) { - m.log.Debug().Msg(es.ErrIndexNotFound.Error()) - return nil - } - return err - } - return nil -} - func (m *simpleMonitorT) search(ctx context.Context, tmpl *dsl.Tmpl, params map[string]interface{}) ([]es.HitT, error) { query, err := tmpl.Render(params) if err != nil { From 9339ae07ccc432cf390a389b75d17cdabd27b69d Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 9 Sep 2021 01:20:22 -0400 Subject: [PATCH 184/240] [Automation] Update elastic stack version to 7.16.0-468522bc for testing (#701) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 93692bd9c..264a0a479 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-a96d8aa5-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-468522bc-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From b2933c8296cbf44984ea59e6cb9354f47eb0e52e Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 13 Sep 2021 01:22:43 -0400 Subject: [PATCH 185/240] [Automation] Update elastic stack version to 7.16.0-fbd5fab2 for testing (#706) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 264a0a479..3e66f5bc0 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-468522bc-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-fbd5fab2-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 34918a097360a349da1c88013597f0ee08f1c723 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Tue, 14 Sep 2021 01:20:57 -0400 Subject: [PATCH 186/240] [Automation] Update elastic stack version to 7.16.0-f4fa0a64 for testing (#708) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 3e66f5bc0..157ac2230 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-fbd5fab2-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-f4fa0a64-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 4693359ba5795c9afdb4bbe245a4aa52db1a313d Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 15 Sep 2021 01:19:22 -0400 Subject: [PATCH 187/240] [Automation] Update elastic stack version to 7.16.0-b40a7e80 for testing (#711) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 157ac2230..7fec37a7d 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-f4fa0a64-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-b40a7e80-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From c0342c90934ed2169023650fac9f8d53f0234a4a Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 16 Sep 2021 01:19:07 -0400 Subject: [PATCH 188/240] [Automation] Update elastic stack version to 7.16.0-63cdb007 for testing (#716) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 7fec37a7d..49008eaa3 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-b40a7e80-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-63cdb007-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From babca29d384458ef59d8df67cc9ab623600b85d6 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 20 Sep 2021 01:20:24 -0400 Subject: [PATCH 189/240] [Automation] Update elastic stack version to 7.16.0-8df9bb12 for testing (#718) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 49008eaa3..358876a83 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-63cdb007-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-8df9bb12-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 293fc0df7081a5886ad8f4993d49f40d4c17ff2c Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Tue, 21 Sep 2021 01:23:14 -0400 Subject: [PATCH 190/240] [Automation] Update elastic stack version to 7.16.0-3f2f8446 for testing (#721) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 358876a83..30477ac73 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-8df9bb12-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-3f2f8446-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 1245f0ce9b23c576b66b223c525e3be1511d52fc Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 22 Sep 2021 01:19:25 -0400 Subject: [PATCH 191/240] [Automation] Update elastic stack version to 7.16.0-e2daa6e3 for testing (#726) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 30477ac73..2c0c658df 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-3f2f8446-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-e2daa6e3-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From eaebfdae44a90d61edb2c771631b15ed995ba054 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Wed, 22 Sep 2021 11:21:36 +0000 Subject: [PATCH 192/240] [CI] use obltGitHubComments (#728) (#729) (cherry picked from commit a31ecb56bc13fabd52adfc9263a4afffe562acd5) Co-authored-by: Victor Martinez --- .ci/Jenkinsfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.ci/Jenkinsfile b/.ci/Jenkinsfile index 771d61765..f0af86779 100644 --- a/.ci/Jenkinsfile +++ b/.ci/Jenkinsfile @@ -22,7 +22,7 @@ pipeline { quietPeriod(10) } triggers { - issueCommentTrigger('(?i)(.*(?:jenkins\\W+)?run\\W+(?:the\\W+)?tests(?:\\W+please)?.*|^\\/test$)') + issueCommentTrigger("${obltGitHubComments()}") } stages { /** @@ -103,4 +103,4 @@ def cleanup(){ deleteDir() } unstash 'source' -} \ No newline at end of file +} From 51833fbd216a4783cedd728c69e1d11ff0271811 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 23 Sep 2021 01:19:47 -0400 Subject: [PATCH 193/240] [Automation] Update elastic stack version to 7.16.0-51a7a70c for testing (#732) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 2c0c658df..73cdbaf22 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-e2daa6e3-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-51a7a70c-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 3d9efb0b0fc1746d0059d7094c1ae5dca19d0114 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Thu, 23 Sep 2021 12:25:11 +0000 Subject: [PATCH 194/240] Do not require that the default output use the default key in the outputs of policy (#713) (#734) * Do not require default key for output name. * Fix some issues. * Fix test. * Fix integration tests. (cherry picked from commit d4972782a36966fc7d6cd81a823c3cae3b620789) Co-authored-by: Blake Rouse --- cmd/fleet/handleCheckin.go | 19 +- .../pkg/policy/monitor_integration_test.go | 4 +- internal/pkg/policy/monitor_test.go | 10 +- internal/pkg/policy/output_permissions.go | 99 -------- .../pkg/policy/output_permissions_test.go | 218 ------------------ internal/pkg/policy/parsed_policy.go | 89 ++++++- internal/pkg/policy/parsed_policy_test.go | 41 +++- 7 files changed, 133 insertions(+), 347 deletions(-) delete mode 100644 internal/pkg/policy/output_permissions.go delete mode 100644 internal/pkg/policy/output_permissions_test.go diff --git a/cmd/fleet/handleCheckin.go b/cmd/fleet/handleCheckin.go index 5a5e8703a..af8c6a7eb 100644 --- a/cmd/fleet/handleCheckin.go +++ b/cmd/fleet/handleCheckin.go @@ -421,9 +421,8 @@ func processPolicy(ctx context.Context, zlog zerolog.Logger, bulker bulk.Bulk, a Logger() // The parsed policy object contains a map of name->role with a precalculated sha2. - defaultRole, ok := pp.Roles[policy.DefaultOutputName] - if !ok { - zlog.Error().Str("name", policy.DefaultOutputName).Msg("policy does not contain required output permission section") + if pp.Default.Role == nil { + zlog.Error().Str("name", pp.Default.Name).Msg("policy does not contain required output permission section") return nil, ErrNoOutputPerms } @@ -441,7 +440,7 @@ func processPolicy(ctx context.Context, zlog zerolog.Logger, bulker bulk.Bulk, a switch { case agent.DefaultApiKey == "": zlog.Debug().Msg("must generate api key as default API key is not present") - case defaultRole.Sha2 != agent.PolicyOutputPermissionsHash: + case pp.Default.Role.Sha2 != agent.PolicyOutputPermissionsHash: zlog.Debug().Msg("must generate api key as policy output permissions changed") default: needKey = false @@ -450,26 +449,26 @@ func processPolicy(ctx context.Context, zlog zerolog.Logger, bulker bulk.Bulk, a if needKey { zlog.Debug(). - RawJSON("roles", defaultRole.Raw). + RawJSON("roles", pp.Default.Role.Raw). Str("oldHash", agent.PolicyOutputPermissionsHash). - Str("newHash", defaultRole.Sha2). + Str("newHash", pp.Default.Role.Sha2). Msg("Generating a new API key") - defaultOutputApiKey, err := generateOutputApiKey(ctx, bulker, agent.Id, policy.DefaultOutputName, defaultRole.Raw) + defaultOutputApiKey, err := generateOutputApiKey(ctx, bulker, agent.Id, pp.Default.Name, pp.Default.Role.Raw) if err != nil { zlog.Error().Err(err).Msg("fail generate output key") return nil, err } zlog.Info(). - Str("hash.sha256", defaultRole.Sha2). + Str("hash.sha256", pp.Default.Role.Sha2). Str("apiKeyId", defaultOutputApiKey.Id). Msg("Updating agent record to pick up default output key.") fields := map[string]interface{}{ dl.FieldDefaultApiKey: defaultOutputApiKey.Agent(), dl.FieldDefaultApiKeyId: defaultOutputApiKey.Id, - dl.FieldPolicyOutputPermissionsHash: defaultRole.Sha2, + dl.FieldPolicyOutputPermissionsHash: pp.Default.Role.Sha2, } body, err := json.Marshal(map[string]interface{}{ @@ -520,7 +519,7 @@ func rewritePolicy(pp *policy.ParsedPolicy, apiKey string) (interface{}, error) return nil, ErrNoPolicyOutput } - if ok := setMapObj(outputs, apiKey, "default", "api_key"); !ok { + if ok := setMapObj(outputs, apiKey, pp.Default.Name, "api_key"); !ok { return nil, ErrFailInjectApiKey } diff --git a/internal/pkg/policy/monitor_integration_test.go b/internal/pkg/policy/monitor_integration_test.go index 6bbb83b7f..6b70b8621 100644 --- a/internal/pkg/policy/monitor_integration_test.go +++ b/internal/pkg/policy/monitor_integration_test.go @@ -22,6 +22,8 @@ import ( ftesting "github.com/elastic/fleet-server/v7/internal/pkg/testing" ) +var policyBytes = []byte(`{"outputs":{"default":{"type":"elasticsearch"}}}`) + func setupIndex(ctx context.Context, t *testing.T) (string, bulk.Bulk) { index, bulker := ftesting.SetupIndexWithBulk(ctx, t, es.MappingPolicy) return index, bulker @@ -75,7 +77,7 @@ func TestMonitor_Integration(t *testing.T) { policy := model.Policy{ PolicyId: policyId, CoordinatorIdx: 1, - Data: []byte("{}"), + Data: policyBytes, RevisionIdx: 1, } go func() { diff --git a/internal/pkg/policy/monitor_test.go b/internal/pkg/policy/monitor_test.go index 5be221c1c..3c63feffd 100644 --- a/internal/pkg/policy/monitor_test.go +++ b/internal/pkg/policy/monitor_test.go @@ -25,6 +25,8 @@ import ( ftesting "github.com/elastic/fleet-server/v7/internal/pkg/testing" ) +var policyBytes = []byte(`{"outputs":{"default":{"type":"elasticsearch"}}}`) + func TestMonitor_NewPolicy(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() @@ -66,7 +68,7 @@ func TestMonitor_NewPolicy(t *testing.T) { }, PolicyId: policyId, CoordinatorIdx: 1, - Data: []byte("{}"), + Data: policyBytes, RevisionIdx: 1, } policyData, err := json.Marshal(&policy) @@ -144,7 +146,7 @@ func TestMonitor_SamePolicy(t *testing.T) { }, PolicyId: policyId, CoordinatorIdx: 1, - Data: []byte("{}"), + Data: policyBytes, RevisionIdx: 1, } policyData, err := json.Marshal(&policy) @@ -218,7 +220,7 @@ func TestMonitor_NewPolicyUncoordinated(t *testing.T) { }, PolicyId: policyId, CoordinatorIdx: 0, - Data: []byte("{}"), + Data: policyBytes, RevisionIdx: 2, } policyData, err := json.Marshal(&policy) @@ -295,7 +297,7 @@ func runTestMonitor_NewPolicyExists(t *testing.T, delay time.Duration) { }, PolicyId: policyId, CoordinatorIdx: 1, - Data: []byte("{}"), + Data: policyBytes, RevisionIdx: 2, } diff --git a/internal/pkg/policy/output_permissions.go b/internal/pkg/policy/output_permissions.go deleted file mode 100644 index 799e73cce..000000000 --- a/internal/pkg/policy/output_permissions.go +++ /dev/null @@ -1,99 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -package policy - -import ( - "crypto/sha256" - "encoding/hex" - "encoding/json" - "errors" - - "github.com/elastic/fleet-server/v7/internal/pkg/smap" -) - -const ( - DefaultOutputName = "default" - FieldOutputPermissions = "output_permissions" -) - -var ( - ErrOutputPermissionsNotFound = errors.New("output_permissions not found") - ErrDefaultOutputNotFound = errors.New("default output not found") - ErrInvalidPermissionsFormat = errors.New("invalid permissions format") -) - -func GetRoleDescriptors(outputPermissionsRaw []byte) (hash string, roles []byte, err error) { - if len(outputPermissionsRaw) == 0 { - return - } - - output, err := getDefaultOutputMap(outputPermissionsRaw) - if err != nil { - return - } - - // Calculating the hash of the original output map - hash, err = output.Hash() - if err != nil { - return - } - - roles, err = json.Marshal(output) - if err != nil { - return - } - - return -} - -func CheckOutputPermissionsChanged(hash string, outputPermissionsRaw []byte) (newHash string, roles []byte, changed bool, err error) { - if len(outputPermissionsRaw) == 0 { - return - } - - // shotcuircut, hash and compare as is, if equals the json is serialized consistently from jsacascript and go - newHash, err = getDefaultOutputHash(outputPermissionsRaw) - if err != nil { - return - } - if hash == newHash { - return hash, nil, false, nil - } - - newHash, roles, err = GetRoleDescriptors(outputPermissionsRaw) - if err != nil { - return - } - - return newHash, roles, (newHash != hash), nil -} - -func getDefaultOutputHash(outputPermissionsRaw []byte) (hash string, err error) { - var m map[string]json.RawMessage - err = json.Unmarshal(outputPermissionsRaw, &m) - if err != nil { - return - } - - if len(m[DefaultOutputName]) == 0 { - return - } - - b := sha256.Sum256(m[DefaultOutputName]) - return hex.EncodeToString(b[:]), nil -} - -func getDefaultOutputMap(outputPermissionsRaw []byte) (defaultOutput smap.Map, err error) { - outputPermissions, err := smap.Parse(outputPermissionsRaw) - if err != nil { - return - } - - defaultOutput = outputPermissions.GetMap(DefaultOutputName) - if defaultOutput == nil { - err = ErrDefaultOutputNotFound - } - return -} diff --git a/internal/pkg/policy/output_permissions_test.go b/internal/pkg/policy/output_permissions_test.go deleted file mode 100644 index 472d9ee74..000000000 --- a/internal/pkg/policy/output_permissions_test.go +++ /dev/null @@ -1,218 +0,0 @@ -// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one -// or more contributor license agreements. Licensed under the Elastic License; -// you may not use this file except in compliance with the Elastic License. - -// +build !integration - -package policy - -import ( - "testing" - - "github.com/elastic/fleet-server/v7/internal/pkg/smap" - "github.com/google/go-cmp/cmp" -) - -const ( - fallbackPermissions = ` - { - "default": { - "_fallback": { - "indices": [ - { - "names": [ - "logs-*", - "metrics-*", - "traces-*", - ".logs-endpoint.diagnostic.collection-*" - ], - "privileges": [ - "auto_configure", - "create_doc" - ] - } - ] - } - } - } -` - fallbackPermissionsHash = "48e2e1dfe0e64df0dd841e96e28bb82ff6273432e9ebccca259a3278ff86ee4c" - - outputPermissions = ` - { - "default": { - "nginx-logs-1": { - "indices": [ - { - "names": [ - "logs-nginx.access-*", - "logs-nginx.error-*" - ], - "privileges": [ - "append" - ] - } - ] - }, - "nginx-metrics-1": { - "indices": [ - { - "names": [ - "metrics-nginx.substatus-*" - ], - "privileges": [ - "append" - ] - } - ] - }, - "endpoint-policy1-part1": { - "indices": [ - { - "names": [ - ".logs-endpoint.diagnostic.collection-*" - ], - "privileges": [ - "read" - ] - } - ] - }, - "endpoint-policy1-part2": { - "indices": [ - { - "names": [ - "metrics-endpoint-*" - ], - "privileges": [ - "append" - ] - } - ] - } - } - } -` - outputPermissionsHash = "42c955b5df44eec374dc66a97ab8c2045a88583af499aba81345c4221e473ead" - - resultDescriptors = ` -{ - "endpoint-policy1-part1": { - "indices": [ - { - "names": [ - ".logs-endpoint.diagnostic.collection-*" - ], - "privileges": [ - "read" - ] - } - ] - }, - "endpoint-policy1-part2": { - "indices": [ - { - "names": [ - "metrics-endpoint-*" - ], - "privileges": [ - "append" - ] - } - ] - }, - "nginx-logs-1": { - "indices": [ - { - "names": [ - "logs-nginx.access-*", - "logs-nginx.error-*" - ], - "privileges": [ - "append" - ] - } - ] - }, - "nginx-metrics-1": { - "indices": [ - { - "names": [ - "metrics-nginx.substatus-*" - ], - "privileges": [ - "append" - ] - } - ] - } -} -` -) - -func TestGetRoleDescriptors(t *testing.T) { - - hash, roles, err := GetRoleDescriptors([]byte(outputPermissions)) - if err != nil { - t.Fatal(err) - } - - m, err := smap.Parse([]byte(resultDescriptors)) - if err != nil { - t.Fatal(err) - } - expected, err := m.Marshal() - if err != nil { - t.Fatal(err) - } - - diff := cmp.Diff(expected, roles) - if diff != "" { - t.Fatal(diff) - } - - diff = cmp.Diff(outputPermissionsHash, hash) - if diff != "" { - t.Fatal(diff) - } -} - -func TestCheckOutputPermissionsChanged(t *testing.T) { - // Detect change with initially empty hash - hash, roles, changed, err := CheckOutputPermissionsChanged("", []byte(fallbackPermissions)) - if err != nil { - t.Fatal(err) - } - diff := cmp.Diff(fallbackPermissionsHash, hash) - if diff != "" { - t.Error(diff) - } - - if !changed { - t.Error("expected policy hash change detected") - } - - if len(roles) == 0 { - t.Error("expected non empty roles descriptors") - } - - // Detect no change with the same hash and the content - newHash, roles, changed, err := CheckOutputPermissionsChanged(hash, []byte(fallbackPermissions)) - diff = cmp.Diff(hash, newHash) - if diff != "" { - t.Error(diff) - } - if changed { - t.Error("expected policy hash no change detected") - } - - // Detect the change with the new output permissions - newHash, roles, changed, err = CheckOutputPermissionsChanged(hash, []byte(outputPermissions)) - diff = cmp.Diff(outputPermissionsHash, newHash) - if diff != "" { - t.Error(diff) - } - if !changed { - t.Error("expected policy hash change detected") - } -} diff --git a/internal/pkg/policy/parsed_policy.go b/internal/pkg/policy/parsed_policy.go index 96429124c..75160eac5 100644 --- a/internal/pkg/policy/parsed_policy.go +++ b/internal/pkg/policy/parsed_policy.go @@ -6,11 +6,29 @@ package policy import ( "encoding/json" + "errors" "github.com/elastic/fleet-server/v7/internal/pkg/model" "github.com/elastic/fleet-server/v7/internal/pkg/smap" ) +const ( + FieldOutputs = "outputs" + FieldOutputType = "type" + FieldOutputFleetServer = "fleet_server" + FieldOutputServiceToken = "service_token" + FieldOutputPermissions = "output_permissions" + + OutputTypeElasticsearch = "elasticsearch" +) + +var ( + ErrOutputsNotFound = errors.New("outputs not found") + ErrDefaultOutputNotFound = errors.New("default output not found") + ErrMultipleDefaultOutputsFound = errors.New("multiple default outputs found") + ErrInvalidPermissionsFormat = errors.New("invalid permissions format") +) + type RoleT struct { Raw []byte Sha2 string @@ -18,10 +36,16 @@ type RoleT struct { type RoleMapT map[string]RoleT +type ParsedPolicyDefaults struct { + Name string + Role *RoleT +} + type ParsedPolicy struct { - Policy model.Policy - Fields map[string]json.RawMessage - Roles RoleMapT + Policy model.Policy + Fields map[string]json.RawMessage + Roles RoleMapT + Default ParsedPolicyDefaults } func NewParsedPolicy(p model.Policy) (*ParsedPolicy, error) { @@ -34,17 +58,35 @@ func NewParsedPolicy(p model.Policy) (*ParsedPolicy, error) { // Interpret the output permissions if available var roles map[string]RoleT - if perms := fields[FieldOutputPermissions]; len(perms) != 0 { + if perms, _ := fields[FieldOutputPermissions]; len(perms) != 0 { if roles, err = parsePerms(perms); err != nil { return nil, err } } + // Find the default role. + outputs, ok := fields[FieldOutputs] + if !ok { + return nil, ErrOutputsNotFound + } + defaultName, err := findDefaultOutputName(outputs) + if err != nil { + return nil, err + } + var roleP *RoleT + if role, ok := roles[defaultName]; ok { + roleP = &role + } + // We are cool and the gang pp := &ParsedPolicy{ Policy: p, Fields: fields, Roles: roles, + Default: ParsedPolicyDefaults{ + Name: defaultName, + Role: roleP, + }, } return pp, nil @@ -80,3 +122,42 @@ func parsePerms(permsRaw json.RawMessage) (RoleMapT, error) { return m, nil } + +func findDefaultOutputName(outputsRaw json.RawMessage) (string, error) { + outputsMap, err := smap.Parse(outputsRaw) + if err != nil { + return "", err + } + + // iterate across the keys finding the defaults + var defaults []string + for k := range outputsMap { + + v := outputsMap.GetMap(k) + + if v != nil { + outputType := v.GetString(FieldOutputType) + if outputType != OutputTypeElasticsearch { + continue + } + fleetServer := v.GetMap(FieldOutputFleetServer) + if fleetServer == nil { + defaults = append(defaults, k) + continue + } + serviceToken := fleetServer.GetString(FieldOutputServiceToken) + if serviceToken == "" { + defaults = append(defaults, k) + continue + } + } + } + + if len(defaults) == 0 { + return "", ErrDefaultOutputNotFound + } + if len(defaults) == 1 { + return defaults[0], nil + } + return "", ErrMultipleDefaultOutputsFound +} diff --git a/internal/pkg/policy/parsed_policy_test.go b/internal/pkg/policy/parsed_policy_test.go index f15f118e5..116f8948e 100644 --- a/internal/pkg/policy/parsed_policy_test.go +++ b/internal/pkg/policy/parsed_policy_test.go @@ -16,15 +16,31 @@ const testPolicy = ` "id": "63f4e6d0-9626-11eb-b486-6de1529a4151", "revision": 33, "outputs": { - "default": { + "other": { "type": "elasticsearch", + "hosts": [ + "https://5a8bb94bfbe0401a909e1496a9e884c2.us-central1.gcp.foundit.no:443" + ], + "fleet_server": {} + }, + "remote_not_es": { + "type": "logstash", "hosts": [ "https://5a8bb94bfbe0401a909e1496a9e884c2.us-central1.gcp.foundit.no:443" ] + }, + "remote_with_token": { + "type": "elasticsearch", + "hosts": [ + "https://5a8bb94bfbe0401a909e1496a9e884c2.us-central1.gcp.foundit.no:443" + ], + "fleet_server": { + "service_token": "abc123" + } } }, "output_permissions": { - "default": { + "other": { "_fallback": { "cluster": [ "monitor" @@ -49,7 +65,7 @@ const testPolicy = ` "agent": { "monitoring": { "enabled": true, - "use_output": "default", + "use_output": "other", "logs": true, "metrics": true } @@ -60,7 +76,7 @@ const testPolicy = ` "name": "system-1", "revision": 2, "type": "logfile", - "use_output": "default", + "use_output": "other", "meta": { "package": { "name": "system", @@ -140,7 +156,7 @@ const testPolicy = ` "name": "system-1", "revision": 2, "type": "system/metrics", - "use_output": "default", + "use_output": "other", "meta": { "package": { "name": "system", @@ -308,7 +324,7 @@ const testPolicy = ` "name": "Endgame", "revision": 28, "type": "endpoint", - "use_output": "default", + "use_output": "other", "meta": { "package": { "name": "endpoint", @@ -494,14 +510,17 @@ func TestNewParsedPolicy(t *testing.T) { t.Error("Only expected one role") } - r, ok := pp.Roles["default"] - if !ok { - t.Fatal("Missing default role") + // Validate that default was found + if pp.Default.Name != "other" { + t.Error("other output should be identified as default") + } + if pp.Default.Role == nil { + t.Error("other output role should be identified") } expectedSha2 := "d4d0840fe28ca4900129a749b56cee729562c0a88c935192c659252b5b0d762a" - if r.Sha2 != expectedSha2 { - t.Fatal(fmt.Sprintf("Expected sha2: '%s', got '%s'.", expectedSha2, r.Sha2)) + if pp.Default.Role.Sha2 != expectedSha2 { + t.Fatal(fmt.Sprintf("Expected sha2: '%s', got '%s'.", expectedSha2, pp.Default.Role.Sha2)) } } } From 805b9ddc1813eb8219be2bcfe3dc7137762de3af Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Fri, 24 Sep 2021 01:19:22 -0400 Subject: [PATCH 195/240] [Automation] Update elastic stack version to 7.16.0-70330f11 for testing (#736) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 73cdbaf22..47a865607 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-51a7a70c-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-70330f11-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 044e8ef302242474efa00d6c347345c5f6d18b6b Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 27 Sep 2021 01:19:16 -0400 Subject: [PATCH 196/240] [Automation] Update elastic stack version to 7.16.0-4cbd636a for testing (#740) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 47a865607..73baf0ae4 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-70330f11-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-4cbd636a-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 930136b709144cb1bede3ba71d4590a43cdee6de Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Tue, 28 Sep 2021 01:19:25 -0400 Subject: [PATCH 197/240] [Automation] Update elastic stack version to 7.16.0-25009c74 for testing (#744) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 73baf0ae4..4f8ed20ad 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-4cbd636a-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-25009c74-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 9d869fdb13f360240b547cd7cacc05d243652ec1 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 30 Sep 2021 01:19:31 -0400 Subject: [PATCH 198/240] [Automation] Update elastic stack version to 7.16.0-f67263fd for testing (#752) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 4f8ed20ad..6c86fb164 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-25009c74-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-f67263fd-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 06fe47b2a3cff87c84dda687f7a39071c8977819 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Fri, 1 Oct 2021 01:19:08 -0400 Subject: [PATCH 199/240] [Automation] Update elastic stack version to 7.16.0-806be5a9 for testing (#754) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 6c86fb164..de9dd0792 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-f67263fd-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-806be5a9-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 165533e3765d4487b8981849ff1cc2b2ec956a46 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 4 Oct 2021 01:18:18 -0400 Subject: [PATCH 200/240] [Automation] Update elastic stack version to 7.16.0-76e5f71e for testing (#757) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index de9dd0792..1b11ac696 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-806be5a9-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-76e5f71e-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From e3bcb9f221712435bcd8fc7522a4d230615ffb87 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Mon, 11 Oct 2021 18:13:36 +0000 Subject: [PATCH 201/240] Bump golang to 1.17.2. (#769) (#772) (cherry picked from commit a4659ad8f653ac2eb0871400d5f06cd6b2f1728c) Co-authored-by: Blake Rouse --- .go-version | 2 +- cmd/fleet/main_integration_test.go | 1 + cmd/fleet/server_integration_test.go | 1 + cmd/fleet/server_test.go | 1 + internal/pkg/apikey/apikey_integration_test.go | 1 + internal/pkg/apikey/apikey_test.go | 1 + internal/pkg/bulk/bulk_integration_test.go | 1 + internal/pkg/bulk/opMulti_integration_test.go | 1 + internal/pkg/cache/impl_integration.go | 1 + internal/pkg/cache/impl_ristretto.go | 1 + internal/pkg/config/config_test.go | 1 + internal/pkg/config/input_test.go | 1 + internal/pkg/config/output_test.go | 1 + internal/pkg/coordinator/monitor_integration_test.go | 1 + internal/pkg/coordinator/v0_test.go | 1 + internal/pkg/dl/action_results_integration_test.go | 1 + internal/pkg/dl/actions_integration_test.go | 1 + internal/pkg/dl/agent_integration_test.go | 1 + internal/pkg/dl/enrollment_api_key_integration_test.go | 1 + internal/pkg/dl/policies_integration_test.go | 1 + internal/pkg/dl/policies_leader_integration_test.go | 1 + internal/pkg/dl/servers_integration_test.go | 1 + internal/pkg/es/holes_test.go | 1 + internal/pkg/es/result_test.go | 1 + internal/pkg/model/ext_test.go | 1 + internal/pkg/monitor/monitor_integration_test.go | 1 + internal/pkg/monitor/subscription_monitor_integration_test.go | 1 + internal/pkg/policy/monitor_integration_test.go | 1 + internal/pkg/policy/monitor_test.go | 1 + internal/pkg/policy/self_test.go | 1 + internal/pkg/policy/sub_test.go | 1 + internal/pkg/testing/actions.go | 1 + internal/pkg/testing/setup.go | 1 + 33 files changed, 33 insertions(+), 1 deletion(-) diff --git a/.go-version b/.go-version index 0d92a1028..06fb41b63 100644 --- a/.go-version +++ b/.go-version @@ -1 +1 @@ -1.16.5 +1.17.2 diff --git a/cmd/fleet/main_integration_test.go b/cmd/fleet/main_integration_test.go index e3d7a4bbd..964d36016 100644 --- a/cmd/fleet/main_integration_test.go +++ b/cmd/fleet/main_integration_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build integration // +build integration package fleet diff --git a/cmd/fleet/server_integration_test.go b/cmd/fleet/server_integration_test.go index c0d846fb4..f8abce330 100644 --- a/cmd/fleet/server_integration_test.go +++ b/cmd/fleet/server_integration_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build integration // +build integration package fleet diff --git a/cmd/fleet/server_test.go b/cmd/fleet/server_test.go index 846b078ce..797c95fdf 100644 --- a/cmd/fleet/server_test.go +++ b/cmd/fleet/server_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build !integration // +build !integration package fleet diff --git a/internal/pkg/apikey/apikey_integration_test.go b/internal/pkg/apikey/apikey_integration_test.go index f5dbb6a1f..0bb6da14e 100644 --- a/internal/pkg/apikey/apikey_integration_test.go +++ b/internal/pkg/apikey/apikey_integration_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build integration // +build integration package apikey diff --git a/internal/pkg/apikey/apikey_test.go b/internal/pkg/apikey/apikey_test.go index 12ca70613..d9a2dba1b 100644 --- a/internal/pkg/apikey/apikey_test.go +++ b/internal/pkg/apikey/apikey_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build !integration // +build !integration package apikey diff --git a/internal/pkg/bulk/bulk_integration_test.go b/internal/pkg/bulk/bulk_integration_test.go index 5c289da7d..353da82f7 100644 --- a/internal/pkg/bulk/bulk_integration_test.go +++ b/internal/pkg/bulk/bulk_integration_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build integration // +build integration package bulk diff --git a/internal/pkg/bulk/opMulti_integration_test.go b/internal/pkg/bulk/opMulti_integration_test.go index 16df21bcd..45d13fefa 100644 --- a/internal/pkg/bulk/opMulti_integration_test.go +++ b/internal/pkg/bulk/opMulti_integration_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build integration // +build integration package bulk diff --git a/internal/pkg/cache/impl_integration.go b/internal/pkg/cache/impl_integration.go index e7e87b3d5..013b4a6f5 100644 --- a/internal/pkg/cache/impl_integration.go +++ b/internal/pkg/cache/impl_integration.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build integration // +build integration package cache diff --git a/internal/pkg/cache/impl_ristretto.go b/internal/pkg/cache/impl_ristretto.go index 877e3023a..582ba23e7 100644 --- a/internal/pkg/cache/impl_ristretto.go +++ b/internal/pkg/cache/impl_ristretto.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build !integration // +build !integration package cache diff --git a/internal/pkg/config/config_test.go b/internal/pkg/config/config_test.go index 25862e25a..bfffcd9da 100644 --- a/internal/pkg/config/config_test.go +++ b/internal/pkg/config/config_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build !integration // +build !integration package config diff --git a/internal/pkg/config/input_test.go b/internal/pkg/config/input_test.go index 10a8ec520..0379aac19 100644 --- a/internal/pkg/config/input_test.go +++ b/internal/pkg/config/input_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build !integration // +build !integration package config diff --git a/internal/pkg/config/output_test.go b/internal/pkg/config/output_test.go index 013edc01a..6d38a67e8 100644 --- a/internal/pkg/config/output_test.go +++ b/internal/pkg/config/output_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build !integration // +build !integration package config diff --git a/internal/pkg/coordinator/monitor_integration_test.go b/internal/pkg/coordinator/monitor_integration_test.go index 782e83047..6e0d0a853 100644 --- a/internal/pkg/coordinator/monitor_integration_test.go +++ b/internal/pkg/coordinator/monitor_integration_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build integration // +build integration package coordinator diff --git a/internal/pkg/coordinator/v0_test.go b/internal/pkg/coordinator/v0_test.go index 12ebc6966..41e340e63 100644 --- a/internal/pkg/coordinator/v0_test.go +++ b/internal/pkg/coordinator/v0_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build !integration // +build !integration package coordinator diff --git a/internal/pkg/dl/action_results_integration_test.go b/internal/pkg/dl/action_results_integration_test.go index c2f0ccf48..938736cbc 100644 --- a/internal/pkg/dl/action_results_integration_test.go +++ b/internal/pkg/dl/action_results_integration_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build integration // +build integration package dl diff --git a/internal/pkg/dl/actions_integration_test.go b/internal/pkg/dl/actions_integration_test.go index 7f9e26e74..6883c7875 100644 --- a/internal/pkg/dl/actions_integration_test.go +++ b/internal/pkg/dl/actions_integration_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build integration // +build integration package dl diff --git a/internal/pkg/dl/agent_integration_test.go b/internal/pkg/dl/agent_integration_test.go index dff60492f..df9d637df 100644 --- a/internal/pkg/dl/agent_integration_test.go +++ b/internal/pkg/dl/agent_integration_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build integration // +build integration package dl diff --git a/internal/pkg/dl/enrollment_api_key_integration_test.go b/internal/pkg/dl/enrollment_api_key_integration_test.go index d2322ba4b..9c2194ea7 100644 --- a/internal/pkg/dl/enrollment_api_key_integration_test.go +++ b/internal/pkg/dl/enrollment_api_key_integration_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build integration // +build integration package dl diff --git a/internal/pkg/dl/policies_integration_test.go b/internal/pkg/dl/policies_integration_test.go index 742feb629..66027c794 100644 --- a/internal/pkg/dl/policies_integration_test.go +++ b/internal/pkg/dl/policies_integration_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build integration // +build integration package dl diff --git a/internal/pkg/dl/policies_leader_integration_test.go b/internal/pkg/dl/policies_leader_integration_test.go index 3c026162e..e6308c725 100644 --- a/internal/pkg/dl/policies_leader_integration_test.go +++ b/internal/pkg/dl/policies_leader_integration_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build integration // +build integration package dl diff --git a/internal/pkg/dl/servers_integration_test.go b/internal/pkg/dl/servers_integration_test.go index 911e957c2..aba768efe 100644 --- a/internal/pkg/dl/servers_integration_test.go +++ b/internal/pkg/dl/servers_integration_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build integration // +build integration package dl diff --git a/internal/pkg/es/holes_test.go b/internal/pkg/es/holes_test.go index 79fde9843..ad261cae7 100644 --- a/internal/pkg/es/holes_test.go +++ b/internal/pkg/es/holes_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build !integration // +build !integration package es diff --git a/internal/pkg/es/result_test.go b/internal/pkg/es/result_test.go index 8484613fc..83808f2ad 100644 --- a/internal/pkg/es/result_test.go +++ b/internal/pkg/es/result_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build !integration // +build !integration package es diff --git a/internal/pkg/model/ext_test.go b/internal/pkg/model/ext_test.go index efea34521..e48194b30 100644 --- a/internal/pkg/model/ext_test.go +++ b/internal/pkg/model/ext_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build !integration // +build !integration package model diff --git a/internal/pkg/monitor/monitor_integration_test.go b/internal/pkg/monitor/monitor_integration_test.go index 82cdd87cc..c9704f409 100644 --- a/internal/pkg/monitor/monitor_integration_test.go +++ b/internal/pkg/monitor/monitor_integration_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build integration // +build integration package monitor diff --git a/internal/pkg/monitor/subscription_monitor_integration_test.go b/internal/pkg/monitor/subscription_monitor_integration_test.go index 22d226a33..e3371764d 100644 --- a/internal/pkg/monitor/subscription_monitor_integration_test.go +++ b/internal/pkg/monitor/subscription_monitor_integration_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build integration // +build integration package monitor diff --git a/internal/pkg/policy/monitor_integration_test.go b/internal/pkg/policy/monitor_integration_test.go index 6b70b8621..94148c50a 100644 --- a/internal/pkg/policy/monitor_integration_test.go +++ b/internal/pkg/policy/monitor_integration_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build integration // +build integration package policy diff --git a/internal/pkg/policy/monitor_test.go b/internal/pkg/policy/monitor_test.go index 3c63feffd..eac8e4419 100644 --- a/internal/pkg/policy/monitor_test.go +++ b/internal/pkg/policy/monitor_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build !integration // +build !integration package policy diff --git a/internal/pkg/policy/self_test.go b/internal/pkg/policy/self_test.go index 92b07e09f..ce2bec87d 100644 --- a/internal/pkg/policy/self_test.go +++ b/internal/pkg/policy/self_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build !integration // +build !integration package policy diff --git a/internal/pkg/policy/sub_test.go b/internal/pkg/policy/sub_test.go index 18aa85f2f..770225454 100644 --- a/internal/pkg/policy/sub_test.go +++ b/internal/pkg/policy/sub_test.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build !integration // +build !integration package policy diff --git a/internal/pkg/testing/actions.go b/internal/pkg/testing/actions.go index 63e66dd87..95815b444 100644 --- a/internal/pkg/testing/actions.go +++ b/internal/pkg/testing/actions.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build integration // +build integration package testing diff --git a/internal/pkg/testing/setup.go b/internal/pkg/testing/setup.go index 249aed8e7..dc692262f 100644 --- a/internal/pkg/testing/setup.go +++ b/internal/pkg/testing/setup.go @@ -2,6 +2,7 @@ // or more contributor license agreements. Licensed under the Elastic License; // you may not use this file except in compliance with the Elastic License. +//go:build integration // +build integration package testing From 30ff1e1e8a9a458b26c6723ac860e5820147ac29 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Mon, 11 Oct 2021 18:18:43 +0000 Subject: [PATCH 202/240] Fix self monitor to report starting and wait for fleet-server input to be added to policy. (#768) (#773) (cherry picked from commit 334cd2d18c7c1204dcaf68dca491c874604ed158) Co-authored-by: Blake Rouse Co-authored-by: mergify[bot] <37929162+mergify[bot]@users.noreply.github.com> --- internal/pkg/policy/self.go | 9 ++- internal/pkg/policy/self_test.go | 112 +++++++++++++++++++++++++++---- 2 files changed, 108 insertions(+), 13 deletions(-) diff --git a/internal/pkg/policy/self.go b/internal/pkg/policy/self.go index 802676133..f1b87734e 100644 --- a/internal/pkg/policy/self.go +++ b/internal/pkg/policy/self.go @@ -218,7 +218,14 @@ func (m *selfMonitorT) updateStatus(ctx context.Context) (proto.StateObserved_St return proto.StateObserved_FAILED, err } if !data.HasType("fleet-server") { - return proto.StateObserved_FAILED, errors.New("assigned policy does not have fleet-server input") + // no fleet-server input + m.status = proto.StateObserved_STARTING + if m.policyId == "" { + m.reporter.Status(proto.StateObserved_STARTING, "Waiting on fleet-server input to be added to default policy", nil) + } else { + m.reporter.Status(proto.StateObserved_STARTING, fmt.Sprintf("Waiting on fleet-server input to be added to policy: %s", m.policyId), nil) + } + return proto.StateObserved_STARTING, nil } status := proto.StateObserved_HEALTHY diff --git a/internal/pkg/policy/self_test.go b/internal/pkg/policy/self_test.go index ce2bec87d..38dcc7b4a 100644 --- a/internal/pkg/policy/self_test.go +++ b/internal/pkg/policy/self_test.go @@ -72,7 +72,51 @@ func TestSelfMonitor_DefaultPolicy(t *testing.T) { policyId := uuid.Must(uuid.NewV4()).String() rId := xid.New().String() - policyContents, err := json.Marshal(&policyData{Inputs: []policyInput{ + policyContents, err := json.Marshal(&policyData{Inputs: []policyInput{}}) + if err != nil { + t.Fatal(err) + } + policy := model.Policy{ + ESDocument: model.ESDocument{ + Id: rId, + Version: 1, + SeqNo: 1, + }, + PolicyId: policyId, + CoordinatorIdx: 1, + Data: policyContents, + RevisionIdx: 1, + DefaultFleetServer: true, + } + pData, err := json.Marshal(&policy) + if err != nil { + t.Fatal(err) + } + go func() { + mm.Notify(ctx, []es.HitT{ + { + Id: rId, + SeqNo: 1, + Version: 1, + Source: pData, + }, + }) + }() + + // should still be set to starting + ftesting.Retry(t, ctx, func(ctx context.Context) error { + status, msg, _ := reporter.Current() + if status != proto.StateObserved_STARTING { + return fmt.Errorf("should be reported as starting; instead its %s", status) + } + if msg != "Waiting on fleet-server input to be added to default policy" { + return fmt.Errorf("should be matching with default policy") + } + return nil + }) + + rId = xid.New().String() + policyContents, err = json.Marshal(&policyData{Inputs: []policyInput{ { Type: "fleet-server", }, @@ -80,7 +124,7 @@ func TestSelfMonitor_DefaultPolicy(t *testing.T) { if err != nil { t.Fatal(err) } - policy := model.Policy{ + policy = model.Policy{ ESDocument: model.ESDocument{ Id: rId, Version: 1, @@ -89,10 +133,10 @@ func TestSelfMonitor_DefaultPolicy(t *testing.T) { PolicyId: policyId, CoordinatorIdx: 1, Data: policyContents, - RevisionIdx: 1, + RevisionIdx: 2, DefaultFleetServer: true, } - policyData, err := json.Marshal(&policy) + pData, err = json.Marshal(&policy) if err != nil { t.Fatal(err) } @@ -100,9 +144,9 @@ func TestSelfMonitor_DefaultPolicy(t *testing.T) { mm.Notify(ctx, []es.HitT{ { Id: rId, - SeqNo: 1, + SeqNo: 2, Version: 1, - Source: policyData, + Source: pData, }, }) }() @@ -338,7 +382,51 @@ func TestSelfMonitor_SpecificPolicy(t *testing.T) { }, ftesting.RetrySleep(1*time.Second)) rId := xid.New().String() - policyContents, err := json.Marshal(&policyData{Inputs: []policyInput{ + policyContents, err := json.Marshal(&policyData{Inputs: []policyInput{}}) + if err != nil { + t.Fatal(err) + } + policy := model.Policy{ + ESDocument: model.ESDocument{ + Id: rId, + Version: 1, + SeqNo: 1, + }, + PolicyId: policyId, + CoordinatorIdx: 1, + Data: policyContents, + RevisionIdx: 2, + DefaultFleetServer: true, + } + pData, err := json.Marshal(&policy) + if err != nil { + t.Fatal(err) + } + go func() { + mm.Notify(ctx, []es.HitT{ + { + Id: rId, + SeqNo: 1, + Version: 1, + Source: pData, + }, + }) + }() + + // should still be set to starting + ftesting.Retry(t, ctx, func(ctx context.Context) error { + status, msg, _ := reporter.Current() + if status != proto.StateObserved_STARTING { + return fmt.Errorf("should be reported as starting; instead its %s", status) + } + if msg != fmt.Sprintf("Waiting on fleet-server input to be added to policy: %s", policyId) { + return fmt.Errorf("should be matching with specific policy") + } + return nil + }, ftesting.RetrySleep(1*time.Second)) + + rId = xid.New().String() + policyContents, err = json.Marshal(&policyData{Inputs: []policyInput{ { Type: "fleet-server", }, @@ -346,11 +434,11 @@ func TestSelfMonitor_SpecificPolicy(t *testing.T) { if err != nil { t.Fatal(err) } - policy := model.Policy{ + policy = model.Policy{ ESDocument: model.ESDocument{ Id: rId, Version: 1, - SeqNo: 1, + SeqNo: 2, }, PolicyId: policyId, CoordinatorIdx: 1, @@ -358,7 +446,7 @@ func TestSelfMonitor_SpecificPolicy(t *testing.T) { RevisionIdx: 1, DefaultFleetServer: true, } - policyData, err := json.Marshal(&policy) + pData, err = json.Marshal(&policy) if err != nil { t.Fatal(err) } @@ -366,9 +454,9 @@ func TestSelfMonitor_SpecificPolicy(t *testing.T) { mm.Notify(ctx, []es.HitT{ { Id: rId, - SeqNo: 1, + SeqNo: 2, Version: 1, - Source: policyData, + Source: pData, }, }) }() From fef2d018bbdf98eca3765fc4319de137c6fddd68 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 13 Oct 2021 01:19:35 -0400 Subject: [PATCH 203/240] [Automation] Update elastic stack version to 7.16.0-86000c2d for testing (#778) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 1b11ac696..3da1d105f 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-76e5f71e-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-86000c2d-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 4e79e98c3bc45de56f0b03ff1d6bbc47fc28774e Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 14 Oct 2021 01:28:45 -0400 Subject: [PATCH 204/240] [Automation] Update elastic stack version to 7.16.0-a907c0d5 for testing (#781) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 3da1d105f..67e15251e 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-86000c2d-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-a907c0d5-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 119244396268efb4e4b4b3a85142d7c07065a438 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 18 Oct 2021 01:19:25 -0400 Subject: [PATCH 205/240] [Automation] Update elastic stack version to 7.16.0-8bf0b9b1 for testing (#786) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 67e15251e..9d6549311 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-a907c0d5-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-8bf0b9b1-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From db99c489e1f4d804a7186826c6d5c44df80b5842 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Mon, 18 Oct 2021 11:17:40 +0000 Subject: [PATCH 206/240] [7.x](backport #787) Limit const size on 32bit arch to fit int32 (#788) * limit int to maxInt32 on 32 bit architectures (cherry picked from commit aa3ede4788298ebe8454db47ade22bfb955411c5) # Conflicts: # internal/pkg/config/env_defaults.go * limit int to maxInt32 on 32 bit architectures (cherry picked from commit 4f50f962ea3d43c1f2c72c1b81991cca9e64f3d7) # Conflicts: # dev-tools/buildlimits/buildlimits.go * limit int to maxInt32 on 32 bit architectures, tests (cherry picked from commit 0a86974304684879225c2f3cc05f575e08c123cc) # Conflicts: # internal/pkg/config/env_defaults_test.go * limit int to maxInt32 on 32 bit architectures, tests (cherry picked from commit b72f0fd61d8339e3fd836f2198bfc8c1a2f2e763) * make compiler work (cherry picked from commit 661258021b328193b80af2294e4c21ed97cf6789) * Server and cache defaults based on env Server and cache defaults based on env * test fix Co-authored-by: Michal Pristas --- Makefile | 6 + NOTICE.txt | 39 +++ dev-tools/buildlimits/buildlimits.go | 264 +++++++++++++++++++ go.mod | 1 + go.sum | 2 + internal/pkg/config/cache.go | 18 +- internal/pkg/config/defaults/1024_limits.yml | 25 ++ internal/pkg/config/defaults/2048_limits.yml | 25 ++ internal/pkg/config/defaults/4096_limits.yml | 25 ++ internal/pkg/config/defaults/8192_limits.yml | 25 ++ internal/pkg/config/defaults/base_limits.yml | 24 ++ internal/pkg/config/defaults/max_limits.yml | 24 ++ internal/pkg/config/env_defaults.go | 180 +++++++++++++ internal/pkg/config/env_defaults_test.go | 36 +++ internal/pkg/config/limits.go | 35 +-- 15 files changed, 704 insertions(+), 25 deletions(-) create mode 100644 dev-tools/buildlimits/buildlimits.go create mode 100644 internal/pkg/config/defaults/1024_limits.yml create mode 100644 internal/pkg/config/defaults/2048_limits.yml create mode 100644 internal/pkg/config/defaults/4096_limits.yml create mode 100644 internal/pkg/config/defaults/8192_limits.yml create mode 100644 internal/pkg/config/defaults/base_limits.yml create mode 100644 internal/pkg/config/defaults/max_limits.yml create mode 100644 internal/pkg/config/env_defaults.go create mode 100644 internal/pkg/config/env_defaults_test.go diff --git a/Makefile b/Makefile index 9a51fa286..d5b45db87 100644 --- a/Makefile +++ b/Makefile @@ -53,6 +53,7 @@ generate: ## - Generate schema models .PHONY: check check: ## - Run all checks @$(MAKE) generate + @$(MAKE) defaults @$(MAKE) check-headers @$(MAKE) check-go @$(MAKE) notice @@ -83,6 +84,11 @@ notice: ## - Generates the NOTICE.txt file. -noticeOut NOTICE.txt \ -depsOut "" +.PHONY: defaults +defaults: ## -Generate defaults based on limits files. + @echo "Generating env_defaults.go" + @go run dev-tools/buildlimits/buildlimits.go --in "internal/pkg/config/defaults/*.yml" --out internal/pkg/config/env_defaults.go + .PHONY: check-no-changes check-no-changes: @git diff | cat diff --git a/NOTICE.txt b/NOTICE.txt index a1407b24c..a5ecbf2aa 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -2167,6 +2167,45 @@ SOFTWARE. +-------------------------------------------------------------------------------- +Dependency : github.com/pbnjay/memory +Version: v0.0.0-20210728143218-7b4eea64cf58 +Licence type (autodetected): BSD-3-Clause +-------------------------------------------------------------------------------- + +Contents of probable licence file $GOMODCACHE/github.com/pbnjay/memory@v0.0.0-20210728143218-7b4eea64cf58/LICENSE: + +BSD 3-Clause License + +Copyright (c) 2017, Jeremy Jay +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of the copyright holder nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + -------------------------------------------------------------------------------- Dependency : github.com/pkg/errors Version: v0.9.1 diff --git a/dev-tools/buildlimits/buildlimits.go b/dev-tools/buildlimits/buildlimits.go new file mode 100644 index 000000000..22f007bca --- /dev/null +++ b/dev-tools/buildlimits/buildlimits.go @@ -0,0 +1,264 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package main + +import ( + "bytes" + "flag" + "fmt" + "go/format" + "io/ioutil" + "os" + "text/template" + + "github.com/elastic/beats/v7/licenses" + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/packer" +) + +var ( + input string + output string + license string +) + +func init() { + flag.StringVar(&input, "in", "", "Source of input. \"-\" means reading from stdin") + flag.StringVar(&output, "out", "-", "Output path. \"-\" means writing to stdout") + flag.StringVar(&license, "license", "Elastic", "License header for generated file.") +} + +var tmpl = template.Must(template.New("specs").Parse(` +{{ .License }} +// Code generated by dev-tools/cmd/buildlimits/buildlimits.go - DO NOT EDIT. + +package config + +import ( + "math" + "runtime" + "strings" + "time" + + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/packer" + "github.com/elastic/go-ucfg/yaml" + "github.com/pbnjay/memory" + "github.com/pkg/errors" +) + +const ( + defaultCacheNumCounters = 500000 // 10x times expected count + defaultCacheMaxCost = 50 * 1024 * 1024 // 50MiB cache size + + defaultMaxConnections = 0 // no limit + defaultPolicyThrottle = time.Millisecond * 5 + + defaultCheckinInterval = time.Millisecond + defaultCheckinBurst = 1000 + defaultCheckinMax = 0 + defaultCheckinMaxBody = 1024 * 1024 + + defaultArtifactInterval = time.Millisecond * 5 + defaultArtifactBurst = 25 + defaultArtifactMax = 50 + defaultArtifactMaxBody = 0 + + defaultEnrollInterval = time.Millisecond * 10 + defaultEnrollBurst = 100 + defaultEnrollMax = 50 + defaultEnrollMaxBody = 1024 * 512 + + defaultAckInterval = time.Millisecond * 10 + defaultAckBurst = 100 + defaultAckMax = 50 + defaultAckMaxBody = 1024 * 1024 * 2 +) + +type valueRange struct { + Min int ` + "`config:\"min\"`" + ` + Max int ` + "`config:\"max\"`" + ` +} + +type envLimits struct { + RAM valueRange ` + "`config:\"ram\"`" + ` + Server *serverLimitDefaults ` + "`config:\"server_limits\"`" + ` + Cache *cacheLimits ` + "`config:\"cache_limits\"`" + ` +} + +func defaultEnvLimits() *envLimits { + return &envLimits{ + RAM: valueRange{ + Min: 0, + Max: int(getMaxInt()), + }, + Server: defaultserverLimitDefaults(), + Cache: defaultCacheLimits(), + } +} + +type cacheLimits struct { + NumCounters int64 ` + "`config:\"num_counters\"`" + ` + MaxCost int64 ` + "`config:\"max_cost\"`" + ` +} + +func defaultCacheLimits() *cacheLimits { + return &cacheLimits{ + NumCounters: defaultCacheNumCounters, + MaxCost: defaultCacheMaxCost, + } +} + +type limit struct { + Interval time.Duration ` + "`config:\"interval\"`" + ` + Burst int ` + "`config:\"burst\"`" + ` + Max int64 ` + "`config:\"max\"`" + ` + MaxBody int64 ` + "`config:\"max_body_byte_size\"`" + ` +} + +type serverLimitDefaults struct { + PolicyThrottle time.Duration ` + "`config:\"policy_throttle\"`" + ` + MaxConnections int ` + "`config:\"max_connections\"`" + ` + + CheckinLimit limit ` + "`config:\"checkin_limit\"`" + ` + ArtifactLimit limit ` + "`config:\"artifact_limit\"`" + ` + EnrollLimit limit ` + "`config:\"enroll_limit\"`" + ` + AckLimit limit ` + "`config:\"ack_limit\"`" + ` +} + +func defaultserverLimitDefaults() *serverLimitDefaults { + return &serverLimitDefaults{ + PolicyThrottle: defaultCacheNumCounters, + MaxConnections: defaultCacheMaxCost, + + CheckinLimit: limit{ + Interval: defaultCheckinInterval, + Burst: defaultCheckinBurst, + Max: defaultCheckinMax, + MaxBody: defaultCheckinMaxBody, + }, + ArtifactLimit: limit{ + Interval: defaultArtifactInterval, + Burst: defaultArtifactBurst, + Max: defaultArtifactMax, + MaxBody: defaultArtifactMaxBody, + }, + EnrollLimit: limit{ + Interval: defaultEnrollInterval, + Burst: defaultEnrollBurst, + Max: defaultEnrollMax, + MaxBody: defaultEnrollMaxBody, + }, + AckLimit: limit{ + Interval: defaultAckInterval, + Burst: defaultAckBurst, + Max: defaultAckMax, + MaxBody: defaultAckMaxBody, + }, + } +} + +var defaults []*envLimits + +func init() { + // Packed Files + {{ range $i, $f := .Files -}} + // {{ $f }} + {{ end -}} + unpacked := packer.MustUnpack("{{ .Pack }}") + + for f, v := range unpacked { + cfg, err := yaml.NewConfig(v, DefaultOptions...) + if err != nil { + panic(errors.Wrap(err, "Cannot read spec from "+f)) + } + + l := defaultEnvLimits() + if err := cfg.Unpack(&l, DefaultOptions...); err != nil { + panic(errors.Wrap(err, "Cannot unpack spec from "+f)) + } + + defaults = append(defaults, l) + } +} + +func loadLimits() *envLimits { + ramSize := int(memory.TotalMemory() / 1024 / 1024) + return loadLimitsForRam(ramSize) +} + +func loadLimitsForRam(currentRAM int) *envLimits { + for _, l := range defaults { + // get max possible config for current env + if l.RAM.Min < currentRAM && currentRAM <= l.RAM.Max { + return l + } + } + + return defaultEnvLimits() +} + +func getMaxInt() int64 { + if strings.HasSuffix(runtime.GOARCH, "64") { + return math.MaxInt64 + } + return math.MaxInt32 +} + +`)) + +func main() { + flag.Parse() + + if len(input) == 0 { + fmt.Fprintln(os.Stderr, "Invalid input source") + os.Exit(1) + } + + l, err := licenses.Find(license) + if err != nil { + fmt.Fprintf(os.Stderr, "problem to retrieve the license, error: %+v", err) + os.Exit(1) + return + } + + data, err := gen(input, l) + if err != nil { + fmt.Fprintf(os.Stderr, "Error while generating the file, err: %+v\n", err) + os.Exit(1) + } + + if output == "-" { + os.Stdout.Write(data) + return + } else { + ioutil.WriteFile(output, data, 0640) + } + + return +} + +func gen(path string, l string) ([]byte, error) { + pack, files, err := packer.Pack(input) + if err != nil { + return nil, err + } + + var buf bytes.Buffer + tmpl.Execute(&buf, struct { + Pack string + Files []string + License string + }{ + Pack: pack, + Files: files, + License: l, + }) + + formatted, err := format.Source(buf.Bytes()) + if err != nil { + return nil, err + } + + return formatted, nil +} diff --git a/go.mod b/go.mod index 5f2ebf3d9..8697744ef 100644 --- a/go.mod +++ b/go.mod @@ -17,6 +17,7 @@ require ( github.com/julienschmidt/httprouter v1.3.0 github.com/mailru/easyjson v0.7.7 github.com/miolini/datacounter v1.0.2 + github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 github.com/pkg/errors v0.9.1 github.com/rs/xid v1.2.1 github.com/rs/zerolog v1.19.0 diff --git a/go.sum b/go.sum index 1b6ac7f52..583e177b2 100644 --- a/go.sum +++ b/go.sum @@ -621,6 +621,8 @@ github.com/otiai10/mint v1.3.1 h1:BCmzIS3n71sGfHB5NMNDB3lHYPz8fWSkCAErHed//qc= github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2 h1:CXwSGu/LYmbjEab5aMCs5usQRVBGThelUKBNnoSOuso= github.com/oxtoacart/bpool v0.0.0-20150712133111-4e1c5567d7c2/go.mod h1:L3UMQOThbttwfYRNFOWLLVXMhk5Lkio4GGOtw5UrxS0= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 h1:onHthvaw9LFnH4t2DcNVpwGmV9E1BkGknEliJkfwQj0= +github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58/go.mod h1:DXv8WO4yhMYhSNPKjeNKa5WY9YCIEBRbNzFFPJbWO6Y= github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= github.com/peterbourgon/diskv v2.0.1+incompatible h1:UBdAOUP5p4RWqPBg048CAvpKN+vxiaj6gdUUzhl4XmI= diff --git a/internal/pkg/config/cache.go b/internal/pkg/config/cache.go index 72cb38939..c1d5a67cf 100644 --- a/internal/pkg/config/cache.go +++ b/internal/pkg/config/cache.go @@ -9,13 +9,11 @@ import ( ) const ( - defaultCacheNumCounters = 500000 // 10x times expected count - defaultCacheMaxCost = 50 * 1024 * 1024 // 50MiB cache size - defaultActionTTL = time.Minute * 5 - defaultEnrollKeyTTL = time.Minute - defaultArtifactTTL = time.Hour * 24 - defaultApiKeyTTL = time.Minute * 15 // ApiKey validation is a bottleneck. - defaultApiKeyJitter = time.Minute * 5 // Jitter allows some randomness on ApiKeyTTL, zero to disable + defaultActionTTL = time.Minute * 5 + defaultEnrollKeyTTL = time.Minute + defaultArtifactTTL = time.Hour * 24 + defaultApiKeyTTL = time.Minute * 15 // ApiKey validation is a bottleneck. + defaultApiKeyJitter = time.Minute * 5 // Jitter allows some randomness on ApiKeyTTL, zero to disable ) type Cache struct { @@ -29,8 +27,10 @@ type Cache struct { } func (c *Cache) InitDefaults() { - c.NumCounters = defaultCacheNumCounters - c.MaxCost = defaultCacheMaxCost + l := loadLimits().Cache + + c.NumCounters = l.NumCounters + c.MaxCost = l.MaxCost c.ActionTTL = defaultActionTTL c.EnrollKeyTTL = defaultEnrollKeyTTL c.ArtifactTTL = defaultArtifactTTL diff --git a/internal/pkg/config/defaults/1024_limits.yml b/internal/pkg/config/defaults/1024_limits.yml new file mode 100644 index 000000000..cb6f82390 --- /dev/null +++ b/internal/pkg/config/defaults/1024_limits.yml @@ -0,0 +1,25 @@ +ram: + min: 1024 + max: 2048 +cache_limits: + num_counters: 20000 + max_cost: 20971520 +server_limits: + policy_throttle: 50ms + max_connections: 7000 + checkin_limit: + interval: 5ms + burst: 500 + max: 5001 + artifact_limit: + interval: 5ms + burst: 500 + max: 1000 + enroll_limit: + interval: 20ms + burst: 50 + max: 100 + ack_limit: + interval: 4ms + burst: 500 + max: 1000 \ No newline at end of file diff --git a/internal/pkg/config/defaults/2048_limits.yml b/internal/pkg/config/defaults/2048_limits.yml new file mode 100644 index 000000000..5e5462cfc --- /dev/null +++ b/internal/pkg/config/defaults/2048_limits.yml @@ -0,0 +1,25 @@ +ram: + min: 2048 + max: 4096 +cache_limits: + num_counters: 40000 + max_cost: 50971520 +server_limits: + policy_throttle: 10ms + max_connections: 10000 + checkin_limit: + interval: 2ms + burst: 1000 + max: 7501 + artifact_limit: + interval: 2ms + burst: 1000 + max: 2000 + enroll_limit: + interval: 10ms + burst: 100 + max: 200 + ack_limit: + interval: 2ms + burst: 1000 + max: 2000 \ No newline at end of file diff --git a/internal/pkg/config/defaults/4096_limits.yml b/internal/pkg/config/defaults/4096_limits.yml new file mode 100644 index 000000000..622460411 --- /dev/null +++ b/internal/pkg/config/defaults/4096_limits.yml @@ -0,0 +1,25 @@ +ram: + min: 4096 + max: 8192 +cache_limits: + num_counters: 80000 + max_cost: 104857600 +server_limits: + policy_throttle: 5ms + max_connections: 20000 + checkin_limit: + interval: 1ms + burst: 2000 + max: 10001 + artifact_limit: + interval: 1ms + burst: 2000 + max: 4000 + enroll_limit: + interval: 10ms + burst: 100 + max: 200 + ack_limit: + interval: 1ms + burst: 2000 + max: 4000 \ No newline at end of file diff --git a/internal/pkg/config/defaults/8192_limits.yml b/internal/pkg/config/defaults/8192_limits.yml new file mode 100644 index 000000000..9dfcf3784 --- /dev/null +++ b/internal/pkg/config/defaults/8192_limits.yml @@ -0,0 +1,25 @@ +ram: + min: 8192 + max: 16384 +cache_limits: + num_counters: 160000 + max_cost: 209715200 +server_limits: + policy_throttle: 5ms + max_connections: 32000 + checkin_limit: + interval: 500us + burst: 4000 + max: 12501 + artifact_limit: + interval: 500us + burst: 4000 + max: 8000 + enroll_limit: + interval: 10ms + burst: 100 + max: 200 + ack_limit: + interval: 500us + burst: 4000 + max: 8000 \ No newline at end of file diff --git a/internal/pkg/config/defaults/base_limits.yml b/internal/pkg/config/defaults/base_limits.yml new file mode 100644 index 000000000..c47f669f0 --- /dev/null +++ b/internal/pkg/config/defaults/base_limits.yml @@ -0,0 +1,24 @@ +ram: + max: 1024 +cache_limits: + num_counters: 2000 + max_cost: 2097152 +server_limits: + policy_throttle: 200ms + max_connections: 100 + checkin_limit: + interval: 50ms + burst: 25 + max: 100 + artifact_limit: + interval: 100ms + burst: 10 + max: 10 + enroll_limit: + interval: 100ms + burst: 5 + max: 10 + ack_limit: + interval: 10ms + burst: 20 + max: 20 \ No newline at end of file diff --git a/internal/pkg/config/defaults/max_limits.yml b/internal/pkg/config/defaults/max_limits.yml new file mode 100644 index 000000000..394ee9a73 --- /dev/null +++ b/internal/pkg/config/defaults/max_limits.yml @@ -0,0 +1,24 @@ +ram: + min: 16384 +cache_limits: + num_counters: 160000 + max_cost: 209715200 +server_limits: + policy_throttle: 2ms + max_connections: 32000 + checkin_limit: + interval: 500us + burst: 4000 + max: 15001 + artifact_limit: + interval: 500us + burst: 4000 + max: 8000 + enroll_limit: + interval: 10ms + burst: 100 + max: 200 + ack_limit: + interval: 500us + burst: 4000 + max: 8000 \ No newline at end of file diff --git a/internal/pkg/config/env_defaults.go b/internal/pkg/config/env_defaults.go new file mode 100644 index 000000000..87d6b5b2a --- /dev/null +++ b/internal/pkg/config/env_defaults.go @@ -0,0 +1,180 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// Code generated by dev-tools/cmd/buildlimits/buildlimits.go - DO NOT EDIT. + +package config + +import ( + "math" + "runtime" + "strings" + "time" + + "github.com/elastic/beats/v7/x-pack/elastic-agent/pkg/packer" + "github.com/elastic/go-ucfg/yaml" + "github.com/pbnjay/memory" + "github.com/pkg/errors" +) + +const ( + defaultCacheNumCounters = 500000 // 10x times expected count + defaultCacheMaxCost = 50 * 1024 * 1024 // 50MiB cache size + + defaultMaxConnections = 0 // no limit + defaultPolicyThrottle = time.Millisecond * 5 + + defaultCheckinInterval = time.Millisecond + defaultCheckinBurst = 1000 + defaultCheckinMax = 0 + defaultCheckinMaxBody = 1024 * 1024 + + defaultArtifactInterval = time.Millisecond * 5 + defaultArtifactBurst = 25 + defaultArtifactMax = 50 + defaultArtifactMaxBody = 0 + + defaultEnrollInterval = time.Millisecond * 10 + defaultEnrollBurst = 100 + defaultEnrollMax = 50 + defaultEnrollMaxBody = 1024 * 512 + + defaultAckInterval = time.Millisecond * 10 + defaultAckBurst = 100 + defaultAckMax = 50 + defaultAckMaxBody = 1024 * 1024 * 2 +) + +type valueRange struct { + Min int `config:"min"` + Max int `config:"max"` +} + +type envLimits struct { + RAM valueRange `config:"ram"` + Server *serverLimitDefaults `config:"server_limits"` + Cache *cacheLimits `config:"cache_limits"` +} + +func defaultEnvLimits() *envLimits { + return &envLimits{ + RAM: valueRange{ + Min: 0, + Max: int(getMaxInt()), + }, + Server: defaultserverLimitDefaults(), + Cache: defaultCacheLimits(), + } +} + +type cacheLimits struct { + NumCounters int64 `config:"num_counters"` + MaxCost int64 `config:"max_cost"` +} + +func defaultCacheLimits() *cacheLimits { + return &cacheLimits{ + NumCounters: defaultCacheNumCounters, + MaxCost: defaultCacheMaxCost, + } +} + +type limit struct { + Interval time.Duration `config:"interval"` + Burst int `config:"burst"` + Max int64 `config:"max"` + MaxBody int64 `config:"max_body_byte_size"` +} + +type serverLimitDefaults struct { + PolicyThrottle time.Duration `config:"policy_throttle"` + MaxConnections int `config:"max_connections"` + + CheckinLimit limit `config:"checkin_limit"` + ArtifactLimit limit `config:"artifact_limit"` + EnrollLimit limit `config:"enroll_limit"` + AckLimit limit `config:"ack_limit"` +} + +func defaultserverLimitDefaults() *serverLimitDefaults { + return &serverLimitDefaults{ + PolicyThrottle: defaultCacheNumCounters, + MaxConnections: defaultCacheMaxCost, + + CheckinLimit: limit{ + Interval: defaultCheckinInterval, + Burst: defaultCheckinBurst, + Max: defaultCheckinMax, + MaxBody: defaultCheckinMaxBody, + }, + ArtifactLimit: limit{ + Interval: defaultArtifactInterval, + Burst: defaultArtifactBurst, + Max: defaultArtifactMax, + MaxBody: defaultArtifactMaxBody, + }, + EnrollLimit: limit{ + Interval: defaultEnrollInterval, + Burst: defaultEnrollBurst, + Max: defaultEnrollMax, + MaxBody: defaultEnrollMaxBody, + }, + AckLimit: limit{ + Interval: defaultAckInterval, + Burst: defaultAckBurst, + Max: defaultAckMax, + MaxBody: defaultAckMaxBody, + }, + } +} + +var defaults []*envLimits + +func init() { + // Packed Files + // internal/pkg/config/defaults/1024_limits.yml + // internal/pkg/config/defaults/2048_limits.yml + // internal/pkg/config/defaults/4096_limits.yml + // internal/pkg/config/defaults/8192_limits.yml + // internal/pkg/config/defaults/base_limits.yml + // internal/pkg/config/defaults/max_limits.yml + unpacked := packer.MustUnpack("eJzsll9vqkgYxu/3Y/R6swUUc9zkXIyOIDQzRoP8u9kwYBEO/1KxMGz2u28GwbZCFZtNerOXTvCZd97nN887fz8ESb57SZzoMfvlP7pp8hz4j97u2TlG+eGR54TxX1EQB/nhDxpHD38+uLGUr/zUV2Q+I2HqIw1QDOrf+x1MfRQCbgWy0DJwauvTg2PwmbdEk3kAfJLouSngV88QOdtUq1UAKIKgQO3/pWlIRpg7rf8aIW1LEcgq21QF21SfiRzljrmp6v2Xs1ciR+FOn3LOUn31lpuDraU+1ma5W7zTE8SjbWDOMaZHt0p9XLX74dQ28ItjiI3uuq5Rmc8ykmwiN7H3BKY+NviqWQ88U608tqaBolnLLdOfKHBbILh4UuazvZtsMjuWQk9qzg5T/+u6iwJBwHQjkqivRC7PtSrsDHPgO6deCpZRTBSoFMRET2xdkVXeTepe8qj5lhjSeBWAstHcW0J+/ezy4Bp/Pvx+nSSBG/+4QVJNzhtJGAIR+4NJ4j4hif8qSegGSUgDrTuhI+uhI0RHUygzYkRcrdtxRz3pzYFvJTp1R+t3DoO3c1fbAq3ZvhL15Ci2DMyddYP22+joyTr1YunAbkuP68zlC6eUO2la9NFUXtaLwtM+loFf/ovzMz0Eft4kasxNJ9eJOhH0RtQKLkQ0nKjxJ0SVCK7HWHMFVrkrMBd02p7crVLW4YIIZWaNomdP3lMy2nBE3k4UqF8nKryXKKmno0q3ozWp9+UT6smnkzMfsoRrKbMNkRKhPNwgtUQ9mYI6maI0fZdChz/X2avXQyjtkr8elFE/+KlwnagTQe+mnWZVK7h+sgSJOfYxV2SRJzqjRq8rPmWGVbTVsQ4y4lrXVppbYo2d/M6J1zjV6pFYjKzRJiOCyCiuzq4NnXgaKLxuV7luVxcUa6Cs776pco5h763R5kambAu01LtTBXbIGn8zWTyCM757u9ad27WCw/KKOIfdp3S195TiO99OV0i6M5uUou3gJySV92XTtm96ULzuf4sMziUN9E+lyzdO7bJ+dOPpgXxNk7985wybcr1Tk17UR4cQEzvlrcf398cPir8tfngE/4+fc/z889u/AQAA///e0qUb") + + for f, v := range unpacked { + cfg, err := yaml.NewConfig(v, DefaultOptions...) + if err != nil { + panic(errors.Wrap(err, "Cannot read spec from "+f)) + } + + l := defaultEnvLimits() + if err := cfg.Unpack(&l, DefaultOptions...); err != nil { + panic(errors.Wrap(err, "Cannot unpack spec from "+f)) + } + + defaults = append(defaults, l) + } +} + +func loadLimits() *envLimits { + ramSize := int(memory.TotalMemory() / 1024 / 1024) + return loadLimitsForRam(ramSize) +} + +func loadLimitsForRam(currentRAM int) *envLimits { + for _, l := range defaults { + // get max possible config for current env + if l.RAM.Min < currentRAM && currentRAM <= l.RAM.Max { + return l + } + } + + return defaultEnvLimits() +} + +func getMaxInt() int64 { + if strings.HasSuffix(runtime.GOARCH, "64") { + return math.MaxInt64 + } + return math.MaxInt32 +} diff --git a/internal/pkg/config/env_defaults_test.go b/internal/pkg/config/env_defaults_test.go new file mode 100644 index 000000000..cd16db9c4 --- /dev/null +++ b/internal/pkg/config/env_defaults_test.go @@ -0,0 +1,36 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +// Code generated by dev-tools/cmd/buildlimits/buildlimits.go - DO NOT EDIT. + +package config + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestLoadLimits(t *testing.T) { + testCases := []struct { + Name string + CurrentRAM int + ExpectedMaxRAM int + }{ + {"low ram", 128, 1024}, + {"512", 512, 1024}, + {"precise", 1024, 1024}, + {"2-to-4", 2650, 4096}, + {"close to max", 16383, 16384}, + {"above max", 16385, int(getMaxInt())}, + } + + for _, tc := range testCases { + t.Run(tc.Name, func(t *testing.T) { + l := loadLimitsForRam(tc.CurrentRAM) + + require.Equal(t, tc.ExpectedMaxRAM, l.RAM.Max) + }) + } +} diff --git a/internal/pkg/config/limits.go b/internal/pkg/config/limits.go index cbe469274..f731cdb7d 100644 --- a/internal/pkg/config/limits.go +++ b/internal/pkg/config/limits.go @@ -28,31 +28,34 @@ type ServerLimits struct { // InitDefaults initializes the defaults for the configuration. func (c *ServerLimits) InitDefaults() { + l := loadLimits().Server c.MaxHeaderByteSize = 8192 // 8k - c.MaxConnections = 0 // no limit - c.PolicyThrottle = time.Millisecond * 5 + c.MaxConnections = l.MaxConnections + c.PolicyThrottle = l.PolicyThrottle c.CheckinLimit = Limit{ - Interval: time.Millisecond, - Burst: 1000, - MaxBody: 1024 * 1024, + Interval: l.CheckinLimit.Interval, + Burst: l.CheckinLimit.Burst, + Max: l.CheckinLimit.Max, + MaxBody: l.CheckinLimit.MaxBody, } c.ArtifactLimit = Limit{ - Interval: time.Millisecond * 5, - Burst: 25, - Max: 50, + Interval: l.ArtifactLimit.Interval, + Burst: l.ArtifactLimit.Burst, + Max: l.ArtifactLimit.Max, + MaxBody: l.ArtifactLimit.MaxBody, } c.EnrollLimit = Limit{ - Interval: time.Millisecond * 10, - Burst: 100, - Max: 50, - MaxBody: 1024 * 512, + Interval: l.EnrollLimit.Interval, + Burst: l.EnrollLimit.Burst, + Max: l.EnrollLimit.Max, + MaxBody: l.EnrollLimit.MaxBody, } c.AckLimit = Limit{ - Interval: time.Millisecond * 10, - Burst: 100, - Max: 50, - MaxBody: 1024 * 1024 * 2, + Interval: l.AckLimit.Interval, + Burst: l.AckLimit.Burst, + Max: l.AckLimit.Max, + MaxBody: l.AckLimit.MaxBody, } } From f6edadf2eff25b97be2310b7e8d2f66bc902f257 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Tue, 19 Oct 2021 01:24:13 -0400 Subject: [PATCH 207/240] [Automation] Update elastic stack version to 7.16.0-72891008 for testing (#790) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 9d6549311..011bcaf18 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-8bf0b9b1-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-72891008-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 0d8ffe5e6d2bd3c6baedb41e76c72e28fc255ece Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 20 Oct 2021 01:19:57 -0400 Subject: [PATCH 208/240] [Automation] Update elastic stack version to 7.16.0-738085ba for testing (#793) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 011bcaf18..911acad7c 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-72891008-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-738085ba-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From c3642b0b6b07654295f5e94249e2ead967dbed95 Mon Sep 17 00:00:00 2001 From: Sean Cunningham Date: Thu, 21 Oct 2021 10:42:24 -0400 Subject: [PATCH 209/240] Change agentId field to a value that elastic will map. --- cmd/fleet/handleAck.go | 1 + cmd/fleet/handleArtifacts.go | 2 +- cmd/fleet/handleCheckin.go | 4 +--- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/cmd/fleet/handleAck.go b/cmd/fleet/handleAck.go index c19e1a97e..2c43c138d 100644 --- a/cmd/fleet/handleAck.go +++ b/cmd/fleet/handleAck.go @@ -64,6 +64,7 @@ func (rt Router) handleAcks(w http.ResponseWriter, r *http.Request, ps httproute log.WithLevel(resp.Level). Err(err). + Str("agentId", id). Str(EcsHttpRequestId, reqId). Int(EcsHttpResponseCode, resp.StatusCode). Int64(EcsEventDuration, time.Since(start).Nanoseconds()). diff --git a/cmd/fleet/handleArtifacts.go b/cmd/fleet/handleArtifacts.go index 91e778945..87df90583 100644 --- a/cmd/fleet/handleArtifacts.go +++ b/cmd/fleet/handleArtifacts.go @@ -74,7 +74,7 @@ func (rt Router) handleArtifacts(w http.ResponseWriter, r *http.Request, ps http reqId := r.Header.Get(logger.HeaderRequestID) zlog := log.With(). - Str("id", id). + Str("agentId", id). Str("sha2", sha2). Str("remoteAddr", r.RemoteAddr). Str(EcsHttpRequestId, reqId). diff --git a/cmd/fleet/handleCheckin.go b/cmd/fleet/handleCheckin.go index af8c6a7eb..22763a5d3 100644 --- a/cmd/fleet/handleCheckin.go +++ b/cmd/fleet/handleCheckin.go @@ -74,11 +74,9 @@ func (rt Router) handleCheckin(w http.ResponseWriter, r *http.Request, ps httpro reqId := r.Header.Get(logger.HeaderRequestID) - log.WithLevel(resp.Level). + zlog.WithLevel(resp.Level). Err(err). - Str("id", id). Int(EcsHttpResponseCode, resp.StatusCode). - Str(EcsHttpRequestId, reqId). Int64(EcsEventDuration, time.Since(start).Nanoseconds()). Msg("fail checkin") From aa182317ec178eee888bc17ab5fa258e63dfe880 Mon Sep 17 00:00:00 2001 From: Aleksandr Maus Date: Thu, 21 Oct 2021 20:38:49 -0400 Subject: [PATCH 210/240] Add action_response field into .fleet-actions-results (#796) (#800) --- cmd/fleet/handleAck.go | 15 ++++++++------- cmd/fleet/schema.go | 29 +++++++++++++++-------------- internal/pkg/es/mapping.go | 11 +++++++++++ internal/pkg/model/schema.go | 7 +++++++ model/schema.json | 5 +++++ 5 files changed, 46 insertions(+), 21 deletions(-) diff --git a/cmd/fleet/handleAck.go b/cmd/fleet/handleAck.go index 2c43c138d..79b88bc4c 100644 --- a/cmd/fleet/handleAck.go +++ b/cmd/fleet/handleAck.go @@ -163,13 +163,14 @@ func (ack *AckT) handleAckEvents(ctx context.Context, agent *model.Agent, events } acr := model.ActionResult{ - ActionId: ev.ActionId, - AgentId: agent.Id, - StartedAt: ev.StartedAt, - CompletedAt: ev.CompletedAt, - ActionData: ev.ActionData, - Data: ev.Data, - Error: ev.Error, + ActionId: ev.ActionId, + AgentId: agent.Id, + StartedAt: ev.StartedAt, + CompletedAt: ev.CompletedAt, + ActionData: ev.ActionData, + ActionResponse: ev.ActionResponse, + Data: ev.Data, + Error: ev.Error, } if _, err := dl.CreateActionResult(ctx, ack.bulk, acr); err != nil { return errors.Wrap(err, "create action result") diff --git a/cmd/fleet/schema.go b/cmd/fleet/schema.go index f8f300b46..c2b03bc5e 100644 --- a/cmd/fleet/schema.go +++ b/cmd/fleet/schema.go @@ -102,20 +102,21 @@ type ActionResp struct { } type Event struct { - Type string `json:"type"` - SubType string `json:"subtype"` - AgentId string `json:"agent_id"` - ActionId string `json:"action_id"` - PolicyId string `json:"policy_id"` - StreamId string `json:"stream_id"` - Timestamp string `json:"timestamp"` - Message string `json:"message"` - Payload json.RawMessage `json:"payload,omitempty"` - StartedAt string `json:"started_at"` - CompletedAt string `json:"completed_at"` - ActionData json.RawMessage `json:"action_data,omitempty"` - Data json.RawMessage `json:"data,omitempty"` - Error string `json:"error,omitempty"` + Type string `json:"type"` + SubType string `json:"subtype"` + AgentId string `json:"agent_id"` + ActionId string `json:"action_id"` + PolicyId string `json:"policy_id"` + StreamId string `json:"stream_id"` + Timestamp string `json:"timestamp"` + Message string `json:"message"` + Payload json.RawMessage `json:"payload,omitempty"` + StartedAt string `json:"started_at"` + CompletedAt string `json:"completed_at"` + ActionData json.RawMessage `json:"action_data,omitempty"` + ActionResponse json.RawMessage `json:"action_response,omitempty"` + Data json.RawMessage `json:"data,omitempty"` + Error string `json:"error,omitempty"` } type StatusResponse struct { diff --git a/internal/pkg/es/mapping.go b/internal/pkg/es/mapping.go index ccc0a6b51..deb9a52d9 100644 --- a/internal/pkg/es/mapping.go +++ b/internal/pkg/es/mapping.go @@ -46,6 +46,13 @@ const ( MappingActionData = `{ "properties": { + } +}` + + // ActionResponse The custom action response payload. + MappingActionResponse = `{ + "properties": { + } }` @@ -59,6 +66,10 @@ const ( "action_id": { "type": "keyword" }, + "action_response": { + "enabled" : false, + "type": "object" + }, "agent_id": { "type": "keyword" }, diff --git a/internal/pkg/model/schema.go b/internal/pkg/model/schema.go index 58728bae9..c1cf8160a 100644 --- a/internal/pkg/model/schema.go +++ b/internal/pkg/model/schema.go @@ -65,6 +65,10 @@ type Action struct { type ActionData struct { } +// ActionResponse The custom action response payload. +type ActionResponse struct { +} + // ActionResult An Elastic Agent action results type ActionResult struct { ESDocument @@ -75,6 +79,9 @@ type ActionResult struct { // The action id. ActionId string `json:"action_id,omitempty"` + // The custom action response payload. + ActionResponse json.RawMessage `json:"action_response,omitempty"` + // The agent id. AgentId string `json:"agent_id,omitempty"` diff --git a/model/schema.json b/model/schema.json index eaf3e872c..1af57c407 100644 --- a/model/schema.json +++ b/model/schema.json @@ -97,6 +97,11 @@ "type": "object", "format": "raw" }, + "action_response": { + "description": "The custom action response payload.", + "type": "object", + "format": "raw" + }, "error": { "description": "The action error message.", "type": "string" From 6d167f40eafc8d0214d695602c804315b9c8a764 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Fri, 22 Oct 2021 01:21:12 -0400 Subject: [PATCH 211/240] [Automation] Update elastic stack version to 7.16.0-b54dfa68 for testing (#803) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 911acad7c..d16791fbd 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-738085ba-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-b54dfa68-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From d9e967e57a79e1e8b1db11ce6f89eb3bc1ac0b25 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Tue, 26 Oct 2021 01:23:10 -0400 Subject: [PATCH 212/240] [Automation] Update elastic stack version to 7.16.0-ee63131c for testing (#808) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index d16791fbd..cd60dd2fe 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-b54dfa68-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-ee63131c-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 7a5b05d6eae32ab580c965d37ec79cc3916a051d Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 27 Oct 2021 01:23:42 -0400 Subject: [PATCH 213/240] [Automation] Update elastic stack version to 7.16.0-a0af8f2a for testing (#811) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index cd60dd2fe..3203e0ff3 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-ee63131c-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-a0af8f2a-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 9fb8526a5f8f78d6af79f52052314dc4a30f4ea5 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 1 Nov 2021 01:21:01 -0400 Subject: [PATCH 214/240] [Automation] Update elastic stack version to 7.16.0-66ccea1a for testing (#820) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 3203e0ff3..e5cbaa492 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-a0af8f2a-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-66ccea1a-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From d481d067c01575fab4a8fcaba86fe79eeb166b8f Mon Sep 17 00:00:00 2001 From: Sean Cunningham Date: Mon, 1 Nov 2021 14:54:19 -0400 Subject: [PATCH 215/240] Migrate agent.id field from 7.14 to 7.15+ --- cmd/fleet/main.go | 5 ++ internal/pkg/checkin/bulk.go | 4 +- internal/pkg/dl/constants.go | 3 +- internal/pkg/dl/migration.go | 130 +++++++++++++++++++++++++++++++++++ 4 files changed, 140 insertions(+), 2 deletions(-) create mode 100644 internal/pkg/dl/migration.go diff --git a/cmd/fleet/main.go b/cmd/fleet/main.go index ebb33cd68..18f303ffb 100644 --- a/cmd/fleet/main.go +++ b/cmd/fleet/main.go @@ -767,6 +767,11 @@ func (f *FleetServer) runSubsystems(ctx context.Context, cfg *config.Config, g * return fmt.Errorf("failed version compatibility check with elasticsearch: %w", err) } + // Run migrations; current safe to do in background. That may change in the future. + g.Go(loggedRunFunc(ctx, "Migrations", func(ctx context.Context) error { + return dl.Migrate(ctx, bulker) + })) + // Monitoring es client, longer timeout, no retries monCli, err := es.NewClient(ctx, cfg, true, es.WithUserAgent(kUAFleetServer, f.bi)) if err != nil { diff --git a/internal/pkg/checkin/bulk.go b/internal/pkg/checkin/bulk.go index 17a2c6b93..0d070e409 100644 --- a/internal/pkg/checkin/bulk.go +++ b/internal/pkg/checkin/bulk.go @@ -197,7 +197,9 @@ func (bc *Bulk) flush(ctx context.Context) error { // If the agent version is not empty it needs to be updated // Assuming the agent can by upgraded keeping the same id, but incrementing the version if pendingData.extra.ver != "" { - fields[dl.FieldAgentVersion] = pendingData.extra.ver + fields[dl.FieldAgent] = map[string]interface{}{ + dl.FieldAgentVersion: pendingData.extra.ver, + } } // Update local metadata if provided diff --git a/internal/pkg/dl/constants.go b/internal/pkg/dl/constants.go index 725fad1aa..882a09d37 100644 --- a/internal/pkg/dl/constants.go +++ b/internal/pkg/dl/constants.go @@ -40,7 +40,8 @@ const ( FieldDefaultApiKeyId = "default_api_key_id" FieldPolicyOutputPermissionsHash = "policy_output_permissions_hash" FieldUnenrolledReason = "unenrolled_reason" - FieldAgentVersion = "agent.version" + FieldAgentVersion = "version" + FieldAgent = "agent" FieldActive = "active" FieldUpdatedAt = "updated_at" diff --git a/internal/pkg/dl/migration.go b/internal/pkg/dl/migration.go new file mode 100644 index 000000000..73f854a86 --- /dev/null +++ b/internal/pkg/dl/migration.go @@ -0,0 +1,130 @@ +// Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one +// or more contributor license agreements. Licensed under the Elastic License; +// you may not use this file except in compliance with the Elastic License. + +package dl + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "time" + + "github.com/elastic/fleet-server/v7/internal/pkg/bulk" + "github.com/elastic/fleet-server/v7/internal/pkg/dsl" + + "github.com/elastic/go-elasticsearch/v7/esapi" + "github.com/pkg/errors" + "github.com/rs/zerolog/log" +) + +func Migrate(ctx context.Context, bulker bulk.Bulk) error { + return migrateAgentMetadata(ctx, bulker) +} + +// FleetServer 7.15 added a new *AgentMetadata field to the Agent record. +// This field was populated in new enrollments in 7.15 and later; however, the +// change was not backported to support 7.14. The security team is reliant on the +// existence of this field in 7.16, so the following migration was added to +// support upgrade from 7.14. +// +// It is currently safe to run this in the background; albeit with some +// concern on conflicts. The conflict risk exists regardless as N Fleet Servers +// can be run in parallel at the same time. +// +// As the update only occurs once, the 99.9% case is a noop. +func migrateAgentMetadata(ctx context.Context, bulker bulk.Bulk) error { + + root := dsl.NewRoot() + root.Query().Bool().MustNot().Exists("agent.id") + + painless := "ctx._source.agent = [:]; ctx._source.agent.id = ctx._id;" + root.Param("script", painless) + + body, err := root.MarshalJSON() + if err != nil { + return err + } + +LOOP: + for { + nConflicts, err := updateAgentMetadata(ctx, bulker, body) + if err != nil { + return err + } + if nConflicts == 0 { + break LOOP + } + + time.Sleep(time.Second) + } + + return nil +} + +func updateAgentMetadata(ctx context.Context, bulker bulk.Bulk, body []byte) (int, error) { + start := time.Now() + + client := bulker.Client() + + reader := bytes.NewReader(body) + + opts := []func(*esapi.UpdateByQueryRequest){ + client.UpdateByQuery.WithBody(reader), + client.UpdateByQuery.WithContext(ctx), + client.UpdateByQuery.WithRefresh(true), + client.UpdateByQuery.WithConflicts("proceed"), + } + + res, err := client.UpdateByQuery([]string{FleetAgents}, opts...) + + if err != nil { + return 0, err + } + + if res.IsError() { + return 0, fmt.Errorf("Migrate UpdateByQuery %s", res.String()) + } + + resp := struct { + Took int `json:"took"` + TimedOut bool `json:"timed_out"` + Total int `json:"total"` + Updated int `json:"updated"` + Deleted int `json:"deleted"` + Batches int `json:"batches"` + VersionConflicts int `json:"version_conflicts"` + Noops int `json:"noops"` + Retries struct { + Bulk int `json:"bulk"` + Search int `json:"search"` + } `json:"retries"` + Failures []json.RawMessage `json:"failures"` + }{} + + decoder := json.NewDecoder(res.Body) + if err := decoder.Decode(&resp); err != nil { + return 0, errors.Wrap(err, "decode UpdateByQuery response") + } + + log.Info(). + Int("took", resp.Took). + Bool("timed_out", resp.TimedOut). + Int("total", resp.Total). + Int("updated", resp.Updated). + Int("deleted", resp.Deleted). + Int("batches", resp.Batches). + Int("version_conflicts", resp.VersionConflicts). + Int("noops", resp.Noops). + Int("retries.bulk", resp.Retries.Bulk). + Int("retries.search", resp.Retries.Search). + Dur("rtt", time.Since(start)). + Msg("migrate agent records response") + + for _, fail := range resp.Failures { + log.Error().RawJSON("failure", fail).Msg("migration failure") + } + + return resp.VersionConflicts, err +} From bfadd090c3e140f714dadfdac2a58f8df15611c2 Mon Sep 17 00:00:00 2001 From: Sean Cunningham Date: Tue, 2 Nov 2021 12:33:26 -0400 Subject: [PATCH 216/240] Handle 404 on .fleet-agent index as a noop during migration. --- internal/pkg/dl/migration.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/internal/pkg/dl/migration.go b/internal/pkg/dl/migration.go index 73f854a86..4beb26741 100644 --- a/internal/pkg/dl/migration.go +++ b/internal/pkg/dl/migration.go @@ -9,6 +9,7 @@ import ( "context" "encoding/json" "fmt" + "net/http" "time" "github.com/elastic/fleet-server/v7/internal/pkg/bulk" @@ -84,6 +85,11 @@ func updateAgentMetadata(ctx context.Context, bulker bulk.Bulk, body []byte) (in } if res.IsError() { + if res.StatusCode == http.StatusNotFound { + // Ignore index not created yet; nothing to upgrade + return 0, nil + } + return 0, fmt.Errorf("Migrate UpdateByQuery %s", res.String()) } From 024642a7157dda76bc28059db3a16230b3ecfa78 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 3 Nov 2021 01:24:20 -0400 Subject: [PATCH 217/240] [Automation] Update elastic stack version to 7.16.0-c5c99751 for testing (#830) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index e5cbaa492..3dd5496fe 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-66ccea1a-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-c5c99751-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From f73abc8f60f00a67734a4827ed19234fc02daa13 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 4 Nov 2021 01:22:52 -0400 Subject: [PATCH 218/240] [Automation] Update elastic stack version to 7.16.0-c51d3b71 for testing (#835) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 3dd5496fe..0d720f255 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-c5c99751-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-c51d3b71-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 3977d6b073612bc2427664a0fa0f05f533a2a75a Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Fri, 5 Nov 2021 01:23:11 -0400 Subject: [PATCH 219/240] [Automation] Update elastic stack version to 7.16.0-68781827 for testing (#840) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 0d720f255..de9420042 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-c51d3b71-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-68781827-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 70d50d354fbed05e3a0ec88357c789e3f82dff13 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 10 Nov 2021 00:22:03 -0500 Subject: [PATCH 220/240] [Automation] Update elastic stack version to 7.16.0-df026734 for testing (#851) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index de9420042..d812e5353 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-68781827-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-df026734-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 765adde495f4bff551f096e91b285d47271a2ff6 Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Thu, 11 Nov 2021 08:21:09 +0000 Subject: [PATCH 221/240] Normalize logging (#859) (cherry picked from commit 8a4855bc5687cbc859dc5e82278366509ecf677e) Co-authored-by: Sean Cunningham --- cmd/fleet/auth.go | 53 ++++++--- cmd/fleet/error.go | 9 +- cmd/fleet/handleAck.go | 104 ++++++++++++----- cmd/fleet/handleArtifacts.go | 24 ++-- cmd/fleet/handleCheckin.go | 44 +++++--- cmd/fleet/handleEnroll.go | 167 +++++++++++++++++++++------- cmd/fleet/server.go | 2 +- cmd/fleet/userAgent.go | 24 +++- cmd/fleet/userAgent_test.go | 3 +- internal/pkg/action/dispatcher.go | 7 +- internal/pkg/apikey/get.go | 4 +- internal/pkg/config/input.go | 20 ++-- internal/pkg/coordinator/monitor.go | 47 ++++++-- internal/pkg/coordinator/v0.go | 3 +- internal/pkg/es/client.go | 24 ++-- internal/pkg/logger/ecs.go | 23 +++- internal/pkg/logger/http.go | 162 +++++++++++++++++++-------- internal/pkg/policy/monitor.go | 27 ++--- 18 files changed, 533 insertions(+), 214 deletions(-) diff --git a/cmd/fleet/auth.go b/cmd/fleet/auth.go index ede3f0d71..8c6a53fd5 100644 --- a/cmd/fleet/auth.go +++ b/cmd/fleet/auth.go @@ -22,6 +22,7 @@ var ( ErrApiKeyNotEnabled = errors.New("APIKey not enabled") ErrAgentCorrupted = errors.New("agent record corrupted") ErrAgentInactive = errors.New("agent inactive") + ErrAgentIdentity = errors.New("agent header contains wrong identifier") ) // This authenticates that the provided API key exists and is enabled. @@ -47,7 +48,7 @@ func authApiKey(r *http.Request, bulker bulk.Bulk, c cache.Cache) (*apikey.ApiKe if err != nil { log.Info(). Err(err). - Str("id", key.Id). + Str(LogApiKeyId, key.Id). Str(EcsHttpRequestId, reqId). Int64(EcsEventDuration, time.Since(start).Nanoseconds()). Msg("ApiKey fail authentication") @@ -78,7 +79,7 @@ func authApiKey(r *http.Request, bulker bulk.Bulk, c cache.Cache) (*apikey.ApiKe return key, err } -func authAgent(r *http.Request, id string, bulker bulk.Bulk, c cache.Cache) (*model.Agent, error) { +func authAgent(r *http.Request, id *string, bulker bulk.Bulk, c cache.Cache) (*model.Agent, error) { start := time.Now() // authenticate @@ -87,12 +88,20 @@ func authAgent(r *http.Request, id string, bulker bulk.Bulk, c cache.Cache) (*mo return nil, err } + w := log.With(). + Str(LogAccessApiKeyId, key.Id). + Str(EcsHttpRequestId, r.Header.Get(logger.HeaderRequestID)) + + if id != nil { + w = w.Str(LogAgentId, *id) + } + + zlog := w.Logger() + authTime := time.Now() if authTime.Sub(start) > time.Second { - log.Debug(). - Str("agentId", id). - Str(EcsHttpRequestId, r.Header.Get(logger.HeaderRequestID)). + zlog.Debug(). Int64(EcsEventDuration, authTime.Sub(start).Nanoseconds()). Msg("authApiKey slow") } @@ -102,33 +111,45 @@ func authAgent(r *http.Request, id string, bulker bulk.Bulk, c cache.Cache) (*mo return nil, err } + if agent.Agent == nil { + zlog.Warn(). + Err(ErrAgentCorrupted). + Msg("agent record does not contain required metadata section") + return nil, ErrAgentCorrupted + } + findTime := time.Now() if findTime.Sub(authTime) > time.Second { - log.Debug(). - Str("agentId", id). - Str(EcsHttpRequestId, r.Header.Get(logger.HeaderRequestID)). + zlog.Debug(). Int64(EcsEventDuration, findTime.Sub(authTime).Nanoseconds()). Msg("findAgentByApiKeyId slow") } - // validate key alignment + // validate that the Access ApiKey identifier stored in the agent's record + // is in alignment when the authenticated key provided on this transaction if agent.AccessApiKeyId != key.Id { - log.Info(). + zlog.Warn(). Err(ErrAgentCorrupted). - Interface("agent", &agent). - Str("key.Id", key.Id). - Msg("agent API key id mismatch agent record") + Str("agent.AccessApiKeyId", agent.AccessApiKeyId). + Msg("agent access ApiKey id mismatch agent record") return nil, ErrAgentCorrupted } + // validate that the id in the header is equal to the agent id record + if id != nil && *id != agent.Agent.Id { + zlog.Warn(). + Err(ErrAgentIdentity). + Str("agent.Agent.Id", agent.Agent.Id). + Msg("agent id mismatch against http header") + return nil, ErrAgentIdentity + } + // validate active, an api key can be valid for an inactive agent record // if it is in our cache and has not timed out. if !agent.Active { - log.Info(). + zlog.Info(). Err(ErrAgentInactive). - Str("agentId", id). - Str(EcsHttpRequestId, r.Header.Get(logger.HeaderRequestID)). Msg("agent record inactive") // Update the cache to mark the api key id associated with this agent as not enabled diff --git a/cmd/fleet/error.go b/cmd/fleet/error.go index 201b53b25..375521516 100644 --- a/cmd/fleet/error.go +++ b/cmd/fleet/error.go @@ -18,12 +18,19 @@ import ( "github.com/rs/zerolog" ) -// Alias useful ECS headers +// Alias logger constants const ( EcsHttpRequestId = logger.EcsHttpRequestId EcsEventDuration = logger.EcsEventDuration EcsHttpResponseCode = logger.EcsHttpResponseCode EcsHttpResponseBodyBytes = logger.EcsHttpResponseBodyBytes + + LogApiKeyId = logger.ApiKeyId + LogPolicyId = logger.PolicyId + LogAgentId = logger.AgentId + LogEnrollApiKeyId = logger.EnrollApiKeyId + LogAccessApiKeyId = logger.AccessApiKeyId + LogDefaultOutputApiKeyId = logger.DefaultOutputApiKeyId ) type errResp struct { diff --git a/cmd/fleet/handleAck.go b/cmd/fleet/handleAck.go index 79b88bc4c..ae0dccd5e 100644 --- a/cmd/fleet/handleAck.go +++ b/cmd/fleet/handleAck.go @@ -14,7 +14,6 @@ import ( "strings" "time" - "github.com/elastic/fleet-server/v7/internal/pkg/apikey" "github.com/elastic/fleet-server/v7/internal/pkg/bulk" "github.com/elastic/fleet-server/v7/internal/pkg/cache" "github.com/elastic/fleet-server/v7/internal/pkg/config" @@ -26,6 +25,7 @@ import ( "github.com/pkg/errors" "github.com/julienschmidt/httprouter" + "github.com/rs/zerolog" "github.com/rs/zerolog/log" ) @@ -55,43 +55,58 @@ func (rt Router) handleAcks(w http.ResponseWriter, r *http.Request, ps httproute start := time.Now() id := ps.ByName("id") - err := rt.ack.handleAcks(w, r, id) + + reqId := r.Header.Get(logger.HeaderRequestID) + + zlog := log.With(). + Str(LogAgentId, id). + Str(EcsHttpRequestId, reqId). + Logger() + + err := rt.ack.handleAcks(&zlog, w, r, id) if err != nil { cntAcks.IncError(err) resp := NewErrorResp(err) - reqId := r.Header.Get(logger.HeaderRequestID) - log.WithLevel(resp.Level). + zlog.WithLevel(resp.Level). Err(err). - Str("agentId", id). - Str(EcsHttpRequestId, reqId). Int(EcsHttpResponseCode, resp.StatusCode). Int64(EcsEventDuration, time.Since(start).Nanoseconds()). Msg("fail ACK") if err := resp.Write(w); err != nil { - log.Error().Err(err).Str(EcsHttpRequestId, reqId).Msg("fail writing error response") + zlog.Error().Err(err).Msg("fail writing error response") } } } -func (ack AckT) handleAcks(w http.ResponseWriter, r *http.Request, id string) error { +func (ack *AckT) handleAcks(zlog *zerolog.Logger, w http.ResponseWriter, r *http.Request, id string) error { limitF, err := ack.limit.Acquire() if err != nil { return err } defer limitF() - agent, err := authAgent(r, id, ack.bulk, ack.cache) + agent, err := authAgent(r, &id, ack.bulk, ack.cache) if err != nil { return err } + // Pointer is passed in to allow UpdateContext by child function + zlog.UpdateContext(func(ctx zerolog.Context) zerolog.Context { + return ctx.Str(LogAccessApiKeyId, agent.AccessApiKeyId) + }) + // Metrics; serenity now. dfunc := cntAcks.IncStart() defer dfunc() + return ack.processRequest(*zlog, w, r, agent) +} + +func (ack *AckT) processRequest(zlog zerolog.Logger, w http.ResponseWriter, r *http.Request, agent *model.Agent) error { + body := r.Body // Limit the size of the body to prevent malicious agent from exhausting RAM in server @@ -111,9 +126,11 @@ func (ack AckT) handleAcks(w http.ResponseWriter, r *http.Request, id string) er return errors.Wrap(err, "handleAcks unmarshal") } - log.Trace().RawJSON("raw", raw).Msg("Ack request") + zlog.Trace().RawJSON("raw", raw).Msg("Ack request") + + zlog = zlog.With().Int("nEvents", len(req.Events)).Logger() - if err = ack.handleAckEvents(r.Context(), agent, req.Events); err != nil { + if err = ack.handleAckEvents(r.Context(), zlog, agent, req.Events); err != nil { return err } @@ -134,10 +151,18 @@ func (ack AckT) handleAcks(w http.ResponseWriter, r *http.Request, id string) er return nil } -func (ack *AckT) handleAckEvents(ctx context.Context, agent *model.Agent, events []Event) error { +func (ack *AckT) handleAckEvents(ctx context.Context, zlog zerolog.Logger, agent *model.Agent, events []Event) error { var policyAcks []string var unenroll bool - for _, ev := range events { + for n, ev := range events { + zlog.Info(). + Str("actionType", ev.Type). + Str("actionSubType", ev.SubType). + Str("actionId", ev.ActionId). + Str("timestamp", ev.Timestamp). + Int("n", n). + Msg("ack event") + if ev.AgentId != "" && ev.AgentId != agent.Id { return ErrEventAgentIdMismatch } @@ -180,7 +205,7 @@ func (ack *AckT) handleAckEvents(ctx context.Context, agent *model.Agent, events if action.Type == TypeUnenroll { unenroll = true } else if action.Type == TypeUpgrade { - if err := ack.handleUpgrade(ctx, agent); err != nil { + if err := ack.handleUpgrade(ctx, zlog, agent); err != nil { return err } } @@ -188,13 +213,13 @@ func (ack *AckT) handleAckEvents(ctx context.Context, agent *model.Agent, events } if len(policyAcks) > 0 { - if err := ack.handlePolicyChange(ctx, agent, policyAcks...); err != nil { + if err := ack.handlePolicyChange(ctx, zlog, agent, policyAcks...); err != nil { return err } } if unenroll { - if err := ack.handleUnenroll(ctx, agent); err != nil { + if err := ack.handleUnenroll(ctx, zlog, agent); err != nil { return err } } @@ -202,7 +227,7 @@ func (ack *AckT) handleAckEvents(ctx context.Context, agent *model.Agent, events return nil } -func (ack *AckT) handlePolicyChange(ctx context.Context, agent *model.Agent, actionIds ...string) error { +func (ack *AckT) handlePolicyChange(ctx context.Context, zlog zerolog.Logger, agent *model.Agent, actionIds ...string) error { // If more than one, pick the winner; // 0) Correct policy id // 1) Highest revision/coordinator number @@ -212,6 +237,16 @@ func (ack *AckT) handlePolicyChange(ctx context.Context, agent *model.Agent, act currCoord := agent.PolicyCoordinatorIdx for _, a := range actionIds { rev, ok := policy.RevisionFromString(a) + + zlog.Debug(). + Str("agent.policyId", agent.PolicyId). + Int64("agent.revisionIdx", currRev). + Int64("agent.coordinatorIdx", currCoord). + Str("rev.policyId", rev.PolicyId). + Int64("rev.revisionIdx", rev.RevisionIdx). + Int64("rev.coordinatorIdx", rev.CoordinatorIdx). + Msg("ack policy revision") + if ok && rev.PolicyId == agent.PolicyId && (rev.RevisionIdx > currRev || (rev.RevisionIdx == currRev && rev.CoordinatorIdx > currCoord)) { found = true @@ -239,20 +274,21 @@ func (ack *AckT) handlePolicyChange(ctx context.Context, agent *model.Agent, act bulk.WithRetryOnConflict(3), ) - log.Info().Err(err). - Str("agentId", agent.Id). - Str("policyId", agent.PolicyId). + zlog.Info().Err(err). + Str(LogPolicyId, agent.PolicyId). Int64("policyRevision", currRev). Int64("policyCoordinator", currCoord). - Msg("Policy ACK") + Msg("ack policy") return errors.Wrap(err, "handlePolicyChange update") } -func (ack *AckT) handleUnenroll(ctx context.Context, agent *model.Agent) error { +func (ack *AckT) handleUnenroll(ctx context.Context, zlog zerolog.Logger, agent *model.Agent) error { apiKeys := _getAPIKeyIDs(agent) if len(apiKeys) > 0 { - if err := apikey.Invalidate(ctx, ack.bulk.Client(), apiKeys...); err != nil { + zlog = zlog.With().Strs(LogApiKeyId, apiKeys).Logger() + + if err := ack.bulk.ApiKeyInvalidate(ctx, apiKeys...); err != nil { return errors.Wrap(err, "handleUnenroll invalidate apikey") } } @@ -269,11 +305,15 @@ func (ack *AckT) handleUnenroll(ctx context.Context, agent *model.Agent) error { return errors.Wrap(err, "handleUnenroll marshal") } - err = ack.bulk.Update(ctx, dl.FleetAgents, agent.Id, body, bulk.WithRefresh()) - return errors.Wrap(err, "handleUnenroll update") + if err = ack.bulk.Update(ctx, dl.FleetAgents, agent.Id, body, bulk.WithRefresh()); err != nil { + return errors.Wrap(err, "handleUnenroll update") + } + + zlog.Info().Msg("ack unenroll") + return nil } -func (ack *AckT) handleUpgrade(ctx context.Context, agent *model.Agent) error { +func (ack *AckT) handleUpgrade(ctx context.Context, zlog zerolog.Logger, agent *model.Agent) error { now := time.Now().UTC().Format(time.RFC3339) doc := bulk.UpdateFields{ @@ -286,8 +326,16 @@ func (ack *AckT) handleUpgrade(ctx context.Context, agent *model.Agent) error { return errors.Wrap(err, "handleUpgrade marshal") } - err = ack.bulk.Update(ctx, dl.FleetAgents, agent.Id, body, bulk.WithRefresh()) - return errors.Wrap(err, "handleUpgrade update") + if err = ack.bulk.Update(ctx, dl.FleetAgents, agent.Id, body, bulk.WithRefresh()); err != nil { + return errors.Wrap(err, "handleUpgrade update") + } + + zlog.Info(). + Str("lastReportedVersion", agent.Agent.Version). + Str("upgradedAt", now). + Msg("ack upgrade") + + return nil } func _getAPIKeyIDs(agent *model.Agent) []string { diff --git a/cmd/fleet/handleArtifacts.go b/cmd/fleet/handleArtifacts.go index 87df90583..64f8b9f7d 100644 --- a/cmd/fleet/handleArtifacts.go +++ b/cmd/fleet/handleArtifacts.go @@ -74,13 +74,13 @@ func (rt Router) handleArtifacts(w http.ResponseWriter, r *http.Request, ps http reqId := r.Header.Get(logger.HeaderRequestID) zlog := log.With(). - Str("agentId", id). + Str(LogAgentId, id). + Str(EcsHttpRequestId, reqId). Str("sha2", sha2). Str("remoteAddr", r.RemoteAddr). - Str(EcsHttpRequestId, reqId). Logger() - rdr, err := rt.at.handleArtifacts(r, zlog, id, sha2) + rdr, err := rt.at.handleArtifacts(&zlog, r, id, sha2) var nWritten int64 if err == nil { @@ -111,7 +111,7 @@ func (rt Router) handleArtifacts(w http.ResponseWriter, r *http.Request, ps http } } -func (at ArtifactT) handleArtifacts(r *http.Request, zlog zerolog.Logger, id, sha2 string) (io.Reader, error) { +func (at ArtifactT) handleArtifacts(zlog *zerolog.Logger, r *http.Request, id, sha2 string) (io.Reader, error) { limitF, err := at.limit.Acquire() if err != nil { return nil, err @@ -121,21 +121,21 @@ func (at ArtifactT) handleArtifacts(r *http.Request, zlog zerolog.Logger, id, sh // Authenticate the APIKey; retrieve agent record. // Note: This is going to be a bit slow even if we hit the cache on the api key. // In order to validate that the agent still has that api key, we fetch the agent record from elastic. - agent, err := authAgent(r, "", at.bulker, at.cache) + agent, err := authAgent(r, nil, at.bulker, at.cache) if err != nil { return nil, err } + // Pointer is passed in to allow UpdateContext by child function + zlog.UpdateContext(func(ctx zerolog.Context) zerolog.Context { + return ctx.Str(LogAccessApiKeyId, agent.AccessApiKeyId) + }) + // Metrics; serenity now. dfunc := cntArtifacts.IncStart() defer dfunc() - zlog = zlog.With(). - Str("APIKeyId", agent.AccessApiKeyId). - Str("agentId", agent.Id). - Logger() - - return at.handle(r.Context(), zlog, agent, id, sha2) + return at.processRequest(r.Context(), *zlog, agent, id, sha2) } type artHandler struct { @@ -144,7 +144,7 @@ type artHandler struct { c cache.Cache } -func (at ArtifactT) handle(ctx context.Context, zlog zerolog.Logger, agent *model.Agent, id, sha2 string) (io.Reader, error) { +func (at ArtifactT) processRequest(ctx context.Context, zlog zerolog.Logger, agent *model.Agent, id, sha2 string) (io.Reader, error) { // Input validation if err := validateSha2String(sha2); err != nil { diff --git a/cmd/fleet/handleCheckin.go b/cmd/fleet/handleCheckin.go index 22763a5d3..86b628298 100644 --- a/cmd/fleet/handleCheckin.go +++ b/cmd/fleet/handleCheckin.go @@ -56,11 +56,11 @@ func (rt Router) handleCheckin(w http.ResponseWriter, r *http.Request, ps httpro reqId := r.Header.Get(logger.HeaderRequestID) zlog := log.With(). - Str("agentId", id). + Str(LogAgentId, id). Str(EcsHttpRequestId, reqId). Logger() - err := rt.ct._handleCheckin(zlog, w, r, id, rt.bulker) + err := rt.ct.handleCheckin(&zlog, w, r, id) if err != nil { cntCheckin.IncError(err) @@ -72,8 +72,6 @@ func (rt Router) handleCheckin(w http.ResponseWriter, r *http.Request, ps httpro resp.Level = zerolog.WarnLevel } - reqId := r.Header.Get(logger.HeaderRequestID) - zlog.WithLevel(resp.Level). Err(err). Int(EcsHttpResponseCode, resp.StatusCode). @@ -81,7 +79,7 @@ func (rt Router) handleCheckin(w http.ResponseWriter, r *http.Request, ps httpro Msg("fail checkin") if err := resp.Write(w); err != nil { - log.Error().Str(EcsHttpRequestId, reqId).Err(err).Msg("fail writing error response") + zlog.Error().Err(err).Msg("fail writing error response") } } } @@ -134,7 +132,7 @@ func NewCheckinT( return ct } -func (ct *CheckinT) _handleCheckin(zlog zerolog.Logger, w http.ResponseWriter, r *http.Request, id string, bulker bulk.Bulk) error { +func (ct *CheckinT) handleCheckin(zlog *zerolog.Logger, w http.ResponseWriter, r *http.Request, id string) error { start := time.Now() @@ -144,13 +142,18 @@ func (ct *CheckinT) _handleCheckin(zlog zerolog.Logger, w http.ResponseWriter, r } defer limitF() - agent, err := authAgent(r, id, ct.bulker, ct.cache) + agent, err := authAgent(r, &id, ct.bulker, ct.cache) if err != nil { return err } - ver, err := validateUserAgent(r, ct.verCon) + // Pointer is passed in to allow UpdateContext by child function + zlog.UpdateContext(func(ctx zerolog.Context) zerolog.Context { + return ctx.Str(LogAccessApiKeyId, agent.AccessApiKeyId) + }) + + ver, err := validateUserAgent(*zlog, r, ct.verCon) if err != nil { return err } @@ -162,6 +165,11 @@ func (ct *CheckinT) _handleCheckin(zlog zerolog.Logger, w http.ResponseWriter, r dfunc := cntCheckin.IncStart() defer dfunc() + return ct.processRequest(*zlog, w, r, start, agent, newVer) +} + +func (ct *CheckinT) processRequest(zlog zerolog.Logger, w http.ResponseWriter, r *http.Request, start time.Time, agent *model.Agent, ver string) error { + ctx := r.Context() body := r.Body @@ -176,7 +184,7 @@ func (ct *CheckinT) _handleCheckin(zlog zerolog.Logger, w http.ResponseWriter, r var req CheckinRequest decoder := json.NewDecoder(readCounter) if err := decoder.Decode(&req); err != nil { - return err + return errors.Wrap(err, "decode checkin request") } cntCheckin.bodyIn.Add(readCounter.Count()) @@ -188,7 +196,7 @@ func (ct *CheckinT) _handleCheckin(zlog zerolog.Logger, w http.ResponseWriter, r } // Resolve AckToken from request, fallback on the agent record - seqno, err := ct.resolveSeqNo(ctx, req, agent) + seqno, err := ct.resolveSeqNo(ctx, zlog, req, agent) if err != nil { return err } @@ -226,7 +234,7 @@ func (ct *CheckinT) _handleCheckin(zlog zerolog.Logger, w http.ResponseWriter, r defer longPoll.Stop() // Intial update on checkin, and any user fields that might have changed - ct.bc.CheckIn(agent.Id, req.Status, rawMeta, seqno, newVer) + ct.bc.CheckIn(agent.Id, req.Status, rawMeta, seqno, ver) // Initial fetch for pending actions var ( @@ -253,7 +261,7 @@ func (ct *CheckinT) _handleCheckin(zlog zerolog.Logger, w http.ResponseWriter, r actions = append(actions, acs...) break LOOP case policy := <-sub.Output(): - actionResp, err := processPolicy(ctx, zlog, bulker, agent.Id, policy) + actionResp, err := processPolicy(ctx, zlog, ct.bulker, agent.Id, policy) if err != nil { return errors.Wrap(err, "processPolicy") } @@ -263,7 +271,7 @@ func (ct *CheckinT) _handleCheckin(zlog zerolog.Logger, w http.ResponseWriter, r zlog.Trace().Msg("fire long poll") break LOOP case <-tick.C: - ct.bc.CheckIn(agent.Id, req.Status, nil, nil, newVer) + ct.bc.CheckIn(agent.Id, req.Status, nil, nil, ver) } } } @@ -348,7 +356,7 @@ func acceptsEncoding(r *http.Request, encoding string) bool { } // Resolve AckToken from request, fallback on the agent record -func (ct *CheckinT) resolveSeqNo(ctx context.Context, req CheckinRequest, agent *model.Agent) (seqno sqn.SeqNo, err error) { +func (ct *CheckinT) resolveSeqNo(ctx context.Context, zlog zerolog.Logger, req CheckinRequest, agent *model.Agent) (seqno sqn.SeqNo, err error) { // Resolve AckToken from request, fallback on the agent record ackToken := req.AckToken seqno = agent.ActionSeqNo @@ -358,7 +366,7 @@ func (ct *CheckinT) resolveSeqNo(ctx context.Context, req CheckinRequest, agent sn, err = ct.tr.Resolve(ctx, ackToken) if err != nil { if errors.Is(err, dl.ErrNotFound) { - log.Debug().Str("token", ackToken).Str("agent_id", agent.Id).Msg("revision token not found") + zlog.Debug().Str("token", ackToken).Msg("revision token not found") err = nil } else { err = errors.Wrap(err, "resolveSeqNo") @@ -415,7 +423,7 @@ func processPolicy(ctx context.Context, zlog zerolog.Logger, bulker bulk.Bulk, a Str("ctx", "processPolicy"). Int64("policyRevision", pp.Policy.RevisionIdx). Int64("policyCoordinator", pp.Policy.CoordinatorIdx). - Str("policyId", pp.Policy.PolicyId). + Str(LogPolicyId, pp.Policy.PolicyId). Logger() // The parsed policy object contains a map of name->role with a precalculated sha2. @@ -460,7 +468,7 @@ func processPolicy(ctx context.Context, zlog zerolog.Logger, bulker bulk.Bulk, a zlog.Info(). Str("hash.sha256", pp.Default.Role.Sha2). - Str("apiKeyId", defaultOutputApiKey.Id). + Str(LogDefaultOutputApiKeyId, defaultOutputApiKey.Id). Msg("Updating agent record to pick up default output key.") fields := map[string]interface{}{ @@ -582,7 +590,7 @@ func parseMeta(zlog zerolog.Logger, agent *model.Agent, req *CheckinRequest) ([] // Quick comparison first; compare the JSON payloads. // If the data is not consistently normalized, this short-circuit will not work. if bytes.Equal(req.LocalMeta, agent.LocalMetadata) { - log.Trace().Msg("quick comparing local metadata is equal") + zlog.Trace().Msg("quick comparing local metadata is equal") return nil, nil } diff --git a/cmd/fleet/handleEnroll.go b/cmd/fleet/handleEnroll.go index 3b5538fa4..01b5169ec 100644 --- a/cmd/fleet/handleEnroll.go +++ b/cmd/fleet/handleEnroll.go @@ -27,6 +27,7 @@ import ( "github.com/julienschmidt/httprouter" "github.com/miolini/datacounter" "github.com/pkg/errors" + "github.com/rs/zerolog" "github.com/rs/zerolog/log" ) @@ -76,53 +77,44 @@ func (rt Router) handleEnroll(w http.ResponseWriter, r *http.Request, ps httprou return } - enrollResponse, err := rt.et.handleEnroll(w, r) + reqId := r.Header.Get(logger.HeaderRequestID) - var data []byte - if err == nil { - data, err = json.Marshal(enrollResponse) - } + zlog := log.With(). + Str(EcsHttpRequestId, reqId). + Str("mod", kEnrollMod). + Logger() - reqId := r.Header.Get(logger.HeaderRequestID) + resp, err := rt.et.handleEnroll(&zlog, w, r) if err != nil { cntEnroll.IncError(err) resp := NewErrorResp(err) - log.WithLevel(resp.Level). + zlog.WithLevel(resp.Level). Err(err). - Str(EcsHttpRequestId, reqId). - Str("mod", kEnrollMod). Int(EcsHttpResponseCode, resp.StatusCode). Int64(EcsEventDuration, time.Since(start).Nanoseconds()). Msg("fail enroll") if err := resp.Write(w); err != nil { - log.Error().Err(err).Str(EcsHttpRequestId, reqId).Msg("fail writing error response") + zlog.Error().Err(err).Msg("fail writing error response") } return } - var numWritten int - if numWritten, err = w.Write(data); err != nil { - log.Error().Err(err).Str(EcsHttpRequestId, reqId).Msg("fail send enroll response") - } - - cntEnroll.bodyOut.Add(uint64(numWritten)) + if err = writeResponse(zlog, w, resp, start); err != nil { + cntEnroll.IncError(err) + zlog.Error(). + Err(err). + Int64(EcsEventDuration, time.Since(start).Nanoseconds()). + Msg("fail write response") - log.Info(). - Err(err). - Str("mod", kEnrollMod). - Str("agentId", enrollResponse.Item.ID). - Str("policyId", enrollResponse.Item.PolicyId). - Str("apiKeyId", enrollResponse.Item.AccessApiKeyId). - Str(EcsHttpRequestId, reqId). - Int(EcsHttpResponseBodyBytes, numWritten). - Int64(EcsEventDuration, time.Since(start).Nanoseconds()). - Msg("success enroll") + // Remove ghost artifacts; agent will never receive the paylod + rt.et.wipeGhosts(r.Context(), zlog, resp) + } } -func (et *EnrollerT) handleEnroll(w http.ResponseWriter, r *http.Request) (*EnrollResponse, error) { +func (et *EnrollerT) handleEnroll(zlog *zerolog.Logger, w http.ResponseWriter, r *http.Request) (*EnrollResponse, error) { limitF, err := et.limit.Acquire() if err != nil { @@ -135,7 +127,12 @@ func (et *EnrollerT) handleEnroll(w http.ResponseWriter, r *http.Request) (*Enro return nil, err } - ver, err := validateUserAgent(r, et.verCon) + // Pointer is passed in to allow UpdateContext by child function + zlog.UpdateContext(func(ctx zerolog.Context) zerolog.Context { + return ctx.Str(LogEnrollApiKeyId, key.Id) + }) + + ver, err := validateUserAgent(*zlog, r, et.verCon) if err != nil { return nil, err } @@ -144,8 +141,13 @@ func (et *EnrollerT) handleEnroll(w http.ResponseWriter, r *http.Request) (*Enro dfunc := cntEnroll.IncStart() defer dfunc() + return et.processRequest(*zlog, w, r, key.Id, ver) +} + +func (et *EnrollerT) processRequest(zlog zerolog.Logger, w http.ResponseWriter, r *http.Request, enrollmentApiKeyId, ver string) (*EnrollResponse, error) { + // Validate that an enrollment record exists for a key with this id. - erec, err := et.fetchEnrollmentKeyRecord(r.Context(), key.Id) + erec, err := et.fetchEnrollmentKeyRecord(r.Context(), enrollmentApiKeyId) if err != nil { return nil, err } @@ -167,10 +169,10 @@ func (et *EnrollerT) handleEnroll(w http.ResponseWriter, r *http.Request) (*Enro cntEnroll.bodyIn.Add(readCounter.Count()) - return _enroll(r.Context(), et.bulker, et.cache, *req, *erec, ver) + return et._enroll(r.Context(), zlog, req, erec.PolicyId, ver) } -func _enroll(ctx context.Context, bulker bulk.Bulk, c cache.Cache, req EnrollRequest, erec model.EnrollmentApiKey, ver string) (*EnrollResponse, error) { +func (et *EnrollerT) _enroll(ctx context.Context, zlog zerolog.Logger, req *EnrollRequest, policyId, ver string) (*EnrollResponse, error) { if req.SharedId != "" { // TODO: Support pre-existing install @@ -185,26 +187,23 @@ func _enroll(ctx context.Context, bulker bulk.Bulk, c cache.Cache, req EnrollReq return nil, err } - // TODO: Cleanup after ourselves on failure: - // Revoke generated keys. - // Remove agent record. - agentId := u.String() - accessApiKey, err := generateAccessApiKey(ctx, bulker, agentId) + // Update the local metadata agent id + localMeta, err := updateLocalMetaAgentId(req.Meta.Local, agentId) if err != nil { return nil, err } - // Update the local metadata agent id - localMeta, err := updateLocalMetaAgentId(req.Meta.Local, agentId) + // Generate the Fleet Agent access api key + accessApiKey, err := generateAccessApiKey(ctx, et.bulker, agentId) if err != nil { return nil, err } agentData := model.Agent{ Active: true, - PolicyId: erec.PolicyId, + PolicyId: policyId, Type: req.Type, EnrolledAt: now.UTC().Format(time.RFC3339), LocalMetadata: localMeta, @@ -216,8 +215,9 @@ func _enroll(ctx context.Context, bulker bulk.Bulk, c cache.Cache, req EnrollReq }, } - err = createFleetAgent(ctx, bulker, agentId, agentData) + err = createFleetAgent(ctx, et.bulker, agentId, agentData) if err != nil { + invalidateApiKey(ctx, zlog, et.bulker, accessApiKey.Id) return nil, err } @@ -238,11 +238,96 @@ func _enroll(ctx context.Context, bulker bulk.Bulk, c cache.Cache, req EnrollReq } // We are Kool & and the Gang; cache the access key to avoid the roundtrip on impending checkin - c.SetApiKey(*accessApiKey, true) + et.cache.SetApiKey(*accessApiKey, true) return &resp, nil } +// Remove the ghost artifacts from Elastic; the agent record and the accessApiKey. +func (et *EnrollerT) wipeGhosts(ctx context.Context, zlog zerolog.Logger, resp *EnrollResponse) { + zlog = zlog.With().Str(LogAgentId, resp.Item.ID).Logger() + + if err := et.bulker.Delete(ctx, dl.FleetAgents, resp.Item.ID); err != nil { + zlog.Error().Err(err).Msg("ghost agent record failed to delete") + } else { + zlog.Info().Msg("ghost agent record deleted") + } + + invalidateApiKey(ctx, zlog, et.bulker, resp.Item.AccessApiKeyId) +} + +func invalidateApiKey(ctx context.Context, zlog zerolog.Logger, bulker bulk.Bulk, apikeyId string) error { + + // hack-a-rama: We purposely do not force a "refresh:true" on the Apikey creation + // because doing so causes the api call to slow down at scale. It is already very slow. + // So we have to wait for the key to become visible until we can invalidate it. + + zlog = zlog.With().Str(LogApiKeyId, apikeyId).Logger() + + start := time.Now() + +LOOP: + for { + + _, err := bulker.ApiKeyRead(ctx, apikeyId) + + switch { + case err == nil: + break LOOP + case !errors.Is(err, apikey.ErrApiKeyNotFound): + zlog.Error().Err(err).Msg("Fail ApiKeyRead") + return err + case time.Since(start) > time.Minute: + err := errors.New("Apikey index failed to refresh") + zlog.Error().Err(err).Msg("Abort query attempt on apikey") + return err + } + + select { + case <-ctx.Done(): + zlog.Error(). + Err(ctx.Err()). + Str("apikeyId", apikeyId). + Msg("Failed to invalidate apiKey on ctx done during hack sleep") + return ctx.Err() + case <-time.After(time.Second): + } + } + + if err := bulker.ApiKeyInvalidate(ctx, apikeyId); err != nil { + zlog.Error().Err(err).Msg("fail invalidate apiKey") + return err + } + + zlog.Info().Dur("dur", time.Since(start)).Msg("invalidated apiKey") + return nil +} + +func writeResponse(zlog zerolog.Logger, w http.ResponseWriter, resp *EnrollResponse, start time.Time) error { + + data, err := json.Marshal(resp) + if err != nil { + return errors.Wrap(err, "marshal enrollResponse") + } + + numWritten, err := w.Write(data) + cntEnroll.bodyOut.Add(uint64(numWritten)) + + if err != nil { + return errors.Wrap(err, "fail send enroll response") + } + + zlog.Info(). + Str(LogAgentId, resp.Item.ID). + Str(LogPolicyId, resp.Item.PolicyId). + Str(LogAccessApiKeyId, resp.Item.AccessApiKeyId). + Int(EcsHttpResponseBodyBytes, numWritten). + Int64(EcsEventDuration, time.Since(start).Nanoseconds()). + Msg("success enroll") + + return nil +} + // updateMetaLocalAgentId updates the agent id in the local metadata if exists // At the time of writing the local metadata blob looks something like this // { diff --git a/cmd/fleet/server.go b/cmd/fleet/server.go index 025892acd..98ad61630 100644 --- a/cmd/fleet/server.go +++ b/cmd/fleet/server.go @@ -100,7 +100,7 @@ func runServer(ctx context.Context, router *httprouter.Router, cfg *config.Serve ln = wrapConnLimitter(ctx, ln, cfg) if cfg.TLS != nil && cfg.TLS.IsEnabled() { - commonTlsCfg, err := tlscommon.LoadTLSConfig(cfg.TLS) + commonTlsCfg, err := tlscommon.LoadTLSServerConfig(cfg.TLS) if err != nil { return err } diff --git a/cmd/fleet/userAgent.go b/cmd/fleet/userAgent.go index 76feb425f..2c960b886 100644 --- a/cmd/fleet/userAgent.go +++ b/cmd/fleet/userAgent.go @@ -13,6 +13,7 @@ import ( "strings" "github.com/hashicorp/go-version" + "github.com/rs/zerolog" ) const ( @@ -57,13 +58,24 @@ func maximizePatch(ver *version.Version) string { // validateUserAgent validates that the User-Agent of the connecting Elastic Agent is valid and that the version is // supported for this Fleet Server. -func validateUserAgent(r *http.Request, verConst version.Constraints) (string, error) { +func validateUserAgent(zlog zerolog.Logger, r *http.Request, verConst version.Constraints) (string, error) { userAgent := r.Header.Get("User-Agent") + + zlog = zlog.With().Str("userAgent", userAgent).Logger() + if userAgent == "" { + zlog.Info(). + Err(ErrInvalidUserAgent). + Msg("empty User-Agent") return "", ErrInvalidUserAgent } + userAgent = strings.ToLower(userAgent) if !strings.HasPrefix(userAgent, userAgentPrefix) { + zlog.Info(). + Err(ErrInvalidUserAgent). + Str("targetPrefix", userAgentPrefix). + Msg("invalid user agent prefix") return "", ErrInvalidUserAgent } @@ -78,10 +90,20 @@ func validateUserAgent(r *http.Request, verConst version.Constraints) (string, e ver, err := version.NewVersion(verStr) if err != nil { + zlog.Info(). + Err(err). + Str("verStr", verStr). + Msg("invalid user agent version string") return "", ErrInvalidUserAgent } if !verConst.Check(ver) { + zlog.Info(). + Err(ErrUnsupportedVersion). + Str("verStr", verStr). + Str("constraints", verConst.String()). + Msg("unsuported user agent version") return "", ErrUnsupportedVersion } + return ver.String(), nil } diff --git a/cmd/fleet/userAgent_test.go b/cmd/fleet/userAgent_test.go index 671d412a5..8beb40ed0 100644 --- a/cmd/fleet/userAgent_test.go +++ b/cmd/fleet/userAgent_test.go @@ -9,6 +9,7 @@ import ( "testing" "github.com/hashicorp/go-version" + "github.com/rs/zerolog/log" ) func TestValidateUserAgent(t *testing.T) { @@ -112,7 +113,7 @@ func TestValidateUserAgent(t *testing.T) { t.Run(tr.userAgent, func(t *testing.T) { req := httptest.NewRequest("GET", "/", nil) req.Header.Set("User-Agent", tr.userAgent) - _, res := validateUserAgent(req, tr.verCon) + _, res := validateUserAgent(log.Logger, req, tr.verCon) if tr.err != res { t.Fatalf("err mismatch: %v != %v", tr.err, res) } diff --git a/internal/pkg/action/dispatcher.go b/internal/pkg/action/dispatcher.go index 1ac06dee9..756f31db1 100644 --- a/internal/pkg/action/dispatcher.go +++ b/internal/pkg/action/dispatcher.go @@ -9,6 +9,7 @@ import ( "sync" "github.com/elastic/fleet-server/v7/internal/pkg/es" + "github.com/elastic/fleet-server/v7/internal/pkg/logger" "github.com/elastic/fleet-server/v7/internal/pkg/model" "github.com/elastic/fleet-server/v7/internal/pkg/monitor" "github.com/elastic/fleet-server/v7/internal/pkg/sqn" @@ -65,7 +66,7 @@ func (d *Dispatcher) Subscribe(agentId string, seqNo sqn.SeqNo) *Sub { sz := len(d.subs) d.mx.Unlock() - log.Trace().Str("agentId", agentId).Int("sz", sz).Msg("Subscribed to action dispatcher") + log.Trace().Str(logger.AgentId, agentId).Int("sz", sz).Msg("Subscribed to action dispatcher") return &sub } @@ -80,7 +81,7 @@ func (d *Dispatcher) Unsubscribe(sub *Sub) { sz := len(d.subs) d.mx.Unlock() - log.Trace().Str("agentId", sub.agentId).Int("sz", sz).Msg("Unsubscribed from action dispatcher") + log.Trace().Str(logger.AgentId, sub.agentId).Int("sz", sz).Msg("Unsubscribed from action dispatcher") } func (d *Dispatcher) process(ctx context.Context, hits []es.HitT) { @@ -119,7 +120,7 @@ func (d *Dispatcher) getSub(agentId string) (Sub, bool) { func (d *Dispatcher) dispatch(ctx context.Context, agentId string, acdocs []model.Action) { sub, ok := d.getSub(agentId) if !ok { - log.Debug().Str("agent_id", agentId).Msg("Agent is not currently connected. Not dispatching actions.") + log.Debug().Str(logger.AgentId, agentId).Msg("Agent is not currently connected. Not dispatching actions.") return } select { diff --git a/internal/pkg/apikey/get.go b/internal/pkg/apikey/get.go index a6ed039a5..50ba3a64d 100644 --- a/internal/pkg/apikey/get.go +++ b/internal/pkg/apikey/get.go @@ -7,10 +7,10 @@ package apikey import ( "context" "encoding/json" - "fmt" "github.com/elastic/go-elasticsearch/v7" "github.com/elastic/go-elasticsearch/v7/esapi" + "github.com/pkg/errors" ) type ApiKeyMetadata struct { @@ -36,7 +36,7 @@ func Read(ctx context.Context, client *elasticsearch.Client, id string) (apiKey defer res.Body.Close() if res.IsError() { - err = fmt.Errorf("fail GetAPIKey: %s, %w", res.String(), ErrApiKeyNotFound) + err = errors.Wrap(ErrApiKeyNotFound, res.String()) return } diff --git a/internal/pkg/config/input.go b/internal/pkg/config/input.go index 3ec438177..b4238fb3e 100644 --- a/internal/pkg/config/input.go +++ b/internal/pkg/config/input.go @@ -55,16 +55,16 @@ func (c *ServerBulk) InitDefaults() { // Server is the configuration for the server type Server struct { - Host string `config:"host"` - Port uint16 `config:"port"` - TLS *tlscommon.Config `config:"ssl"` - Timeouts ServerTimeouts `config:"timeouts"` - Profiler ServerProfiler `config:"profiler"` - CompressionLevel int `config:"compression_level"` - CompressionThresh int `config:"compression_threshold"` - Limits ServerLimits `config:"limits"` - Runtime Runtime `config:"runtime"` - Bulk ServerBulk `config:"bulk"` + Host string `config:"host"` + Port uint16 `config:"port"` + TLS *tlscommon.ServerConfig `config:"ssl"` + Timeouts ServerTimeouts `config:"timeouts"` + Profiler ServerProfiler `config:"profiler"` + CompressionLevel int `config:"compression_level"` + CompressionThresh int `config:"compression_threshold"` + Limits ServerLimits `config:"limits"` + Runtime Runtime `config:"runtime"` + Bulk ServerBulk `config:"bulk"` } // InitDefaults initializes the defaults for the configuration. diff --git a/internal/pkg/coordinator/monitor.go b/internal/pkg/coordinator/monitor.go index e1ee47615..e0a4ce0f2 100644 --- a/internal/pkg/coordinator/monitor.go +++ b/internal/pkg/coordinator/monitor.go @@ -7,7 +7,6 @@ package coordinator import ( "context" "errors" - "github.com/elastic/fleet-server/v7/internal/pkg/apikey" "net" "os" "runtime" @@ -21,6 +20,7 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/config" "github.com/elastic/fleet-server/v7/internal/pkg/dl" "github.com/elastic/fleet-server/v7/internal/pkg/es" + "github.com/elastic/fleet-server/v7/internal/pkg/logger" "github.com/elastic/fleet-server/v7/internal/pkg/model" "github.com/elastic/fleet-server/v7/internal/pkg/monitor" "github.com/elastic/fleet-server/v7/internal/pkg/sleep" @@ -419,7 +419,7 @@ func runCoordinatorOutput(ctx context.Context, cord Coordinator, bulker bulk.Bul s := l.With().Int64(dl.FieldRevisionIdx, p.RevisionIdx).Int64(dl.FieldCoordinatorIdx, p.CoordinatorIdx).Logger() _, err := dl.CreatePolicy(ctx, bulker, p, dl.WithIndexName(policiesIndex)) if err != nil { - l.Err(err).Msg("failed to insert a new policy revision") + s.Err(err).Msg("failed to insert a new policy revision") } else { s.Info().Msg("coordinator inserted a new policy revision") } @@ -430,8 +430,15 @@ func runCoordinatorOutput(ctx context.Context, cord Coordinator, bulker bulk.Bul } func runUnenroller(ctx context.Context, bulker bulk.Bulk, policyId string, unenrollTimeout time.Duration, l zerolog.Logger, checkInterval time.Duration, agentsIndex string) { + l.Info(). + Dur("checkInterval", checkInterval). + Dur("unenrollTimeout", unenrollTimeout). + Msg("unenroll monitor start") + defer l.Info().Msg("unenroll monitor exit") + t := time.NewTimer(checkInterval) defer t.Stop() + for { select { case <-t.C: @@ -445,26 +452,32 @@ func runUnenroller(ctx context.Context, bulker bulk.Bulk, policyId string, unenr } } -func runUnenrollerWork(ctx context.Context, bulker bulk.Bulk, policyId string, unenrollTimeout time.Duration, l zerolog.Logger, agentsIndex string) error { +func runUnenrollerWork(ctx context.Context, bulker bulk.Bulk, policyId string, unenrollTimeout time.Duration, zlog zerolog.Logger, agentsIndex string) error { agents, err := dl.FindOfflineAgents(ctx, bulker, policyId, unenrollTimeout, dl.WithIndexName(agentsIndex)) - if err != nil { + if err != nil || len(agents) == 0 { return err } + + zlog = zlog.With().Dur("timeout", unenrollTimeout).Logger() + agentIds := make([]string, len(agents)) + for i, agent := range agents { - err = unenrollAgent(ctx, bulker, &agent, agentsIndex) + err = unenrollAgent(ctx, zlog, bulker, &agent, agentsIndex) if err != nil { return err } agentIds[i] = agent.Id } - if len(agentIds) > 0 { - l.Info().Strs("agents", agentIds).Msg("marked agents unenrolled due to unenroll timeout") - } + + zlog.Info(). + Strs(logger.ApiKeyId, agentIds). + Msg("marked agents unenrolled due to unenroll timeout") + return nil } -func unenrollAgent(ctx context.Context, bulker bulk.Bulk, agent *model.Agent, agentsIndex string) error { +func unenrollAgent(ctx context.Context, zlog zerolog.Logger, bulker bulk.Bulk, agent *model.Agent, agentsIndex string) error { now := time.Now().UTC().Format(time.RFC3339) fields := bulk.UpdateFields{ dl.FieldActive: false, @@ -477,13 +490,25 @@ func unenrollAgent(ctx context.Context, bulker bulk.Bulk, agent *model.Agent, ag return err } apiKeys := getAPIKeyIDs(agent) + + zlog = zlog.With(). + Str(logger.AgentId, agent.Id). + Strs(logger.ApiKeyId, apiKeys). + Logger() + + zlog.Info().Msg("unenrollAgent due to unenroll timeout") + if len(apiKeys) > 0 { - err = apikey.Invalidate(ctx, bulker.Client(), apiKeys...) + err = bulker.ApiKeyInvalidate(ctx, apiKeys...) if err != nil { + zlog.Error().Err(err).Msg("Fail apiKey invalidate") return err } } - err = bulker.Update(ctx, agentsIndex, agent.Id, body, bulk.WithRefresh()) + if err = bulker.Update(ctx, agentsIndex, agent.Id, body, bulk.WithRefresh()); err != nil { + zlog.Error().Err(err).Msg("Fail unenrollAgent record update") + } + return err } diff --git a/internal/pkg/coordinator/v0.go b/internal/pkg/coordinator/v0.go index 5c8da5837..1730b34c8 100644 --- a/internal/pkg/coordinator/v0.go +++ b/internal/pkg/coordinator/v0.go @@ -11,6 +11,7 @@ import ( "github.com/rs/zerolog" "github.com/rs/zerolog/log" + "github.com/elastic/fleet-server/v7/internal/pkg/logger" "github.com/elastic/fleet-server/v7/internal/pkg/model" ) @@ -26,7 +27,7 @@ type coordinatorZeroT struct { // NewCoordinatorZero creates a V0 coordinator. func NewCoordinatorZero(policy model.Policy) (Coordinator, error) { return &coordinatorZeroT{ - log: log.With().Str("ctx", "coordinator v0").Str("policyId", policy.PolicyId).Logger(), + log: log.With().Str("ctx", "coordinator v0").Str(logger.PolicyId, policy.PolicyId).Logger(), policy: policy, in: make(chan model.Policy), out: make(chan model.Policy), diff --git a/internal/pkg/es/client.go b/internal/pkg/es/client.go index c5ca8f129..70db0f1c3 100644 --- a/internal/pkg/es/client.go +++ b/internal/pkg/es/client.go @@ -34,28 +34,32 @@ func NewClient(ctx context.Context, cfg *config.Config, longPoll bool, opts ...C opt(escfg) } - log.Debug(). - Strs("addr", addr). - Str("user", user). - Int("maxConnsPersHost", mcph). - Msg("init es") + zlog := log.With(). + Strs("cluster.addr", addr). + Str("cluster.user", user). + Int("cluster.maxConnsPersHost", mcph). + Logger() + + zlog.Debug().Msg("init es") es, err := elasticsearch.NewClient(escfg) if err != nil { + zlog.Error().Err(err).Msg("fail elasticsearch init") return nil, err } // Validate connection resp, err := info(ctx, es) if err != nil { + zlog.Error().Err(err).Msg("fail elasticsearch info") return nil, err } - log.Info(). - Str("name", resp.ClusterName). - Str("uuid", resp.ClusterUUID). - Str("vers", resp.Version.Number). - Msg("Cluster Info") + zlog.Info(). + Str("cluster.name", resp.ClusterName). + Str("cluster.uuid", resp.ClusterUUID). + Str("cluster.version", resp.Version.Number). + Msg("elasticsearch cluster info") return es, nil } diff --git a/internal/pkg/logger/ecs.go b/internal/pkg/logger/ecs.go index f52172e24..b25097b13 100644 --- a/internal/pkg/logger/ecs.go +++ b/internal/pkg/logger/ecs.go @@ -37,7 +37,17 @@ const ( EcsServerAddress = "server.address" // TLS - EcsTlsEstablished = "tls.established" + EcsTlsEstablished = "tls.established" + EcsTlsResumed = "tls.resumed" + EcsTlsVersion = "tls.version" + EcsTlsClientServerName = "tls.client.server_name" + EcsTlsCipher = "tls.cipher" + EcsTlsClientIssuer = "tls.client.issuer" + EcsTlsClientSubject = "tls.client.subject" + EcsTlsClientNotBefore = "tls.client.not_before" + EcsTlsClientNotAfter = "tls.client.not_after" + EcsTlsClientSerialNumber = "tls.client.x509.serial_number" + EcsTlsClientTimeFormat = "2006-01-02T15:04:05.999Z" // Event EcsEventDuration = "event.duration" @@ -45,3 +55,14 @@ const ( // Service EcsServiceName = "service.name" ) + +// Non ECS compliant contants used in logging + +const ( + ApiKeyId = "fleet.apikey.id" + PolicyId = "fleet.policy.id" + AgentId = "fleet.agent.id" + EnrollApiKeyId = "fleet.enroll.apikey.id" + AccessApiKeyId = "fleet.access.apikey.id" + DefaultOutputApiKeyId = "fleet.default.apikey.id" +) diff --git a/internal/pkg/logger/http.go b/internal/pkg/logger/http.go index 64eeae012..07fbbfb12 100644 --- a/internal/pkg/logger/http.go +++ b/internal/pkg/logger/http.go @@ -5,6 +5,8 @@ package logger import ( + "crypto/tls" + "fmt" "io" "net" "net/http" @@ -13,7 +15,9 @@ import ( "sync/atomic" "time" + "github.com/elastic/fleet-server/v7/internal/pkg/apikey" "github.com/julienschmidt/httprouter" + "github.com/rs/zerolog" "github.com/rs/zerolog/log" ) @@ -45,9 +49,8 @@ func (rd *ReaderCounter) Count() uint64 { type ResponseCounter struct { http.ResponseWriter - count uint64 - statusCode int - wroteHeader bool + count uint64 + statusCode int } func NewResponseCounter(w http.ResponseWriter) *ResponseCounter { @@ -57,10 +60,10 @@ func NewResponseCounter(w http.ResponseWriter) *ResponseCounter { } func (rc *ResponseCounter) Write(buf []byte) (int, error) { - if !rc.wroteHeader { - rc.wroteHeader = true - rc.statusCode = 200 + if rc.statusCode == 0 { + rc.WriteHeader(http.StatusOK) } + n, err := rc.ResponseWriter.Write(buf) atomic.AddUint64(&rc.count, uint64(n)) return n, err @@ -70,9 +73,8 @@ func (rc *ResponseCounter) WriteHeader(statusCode int) { rc.ResponseWriter.WriteHeader(statusCode) // Defend unsupported multiple calls to WriteHeader - if !rc.wroteHeader { + if rc.statusCode == 0 { rc.statusCode = statusCode - rc.wroteHeader = true } } @@ -95,18 +97,97 @@ func splitAddr(addr string) (host string, port int) { // Expects HTTP version in form of HTTP/x.y func stripHTTP(h string) string { - if strings.HasPrefix(h, httpSlashPrefix) { - return h[len(httpSlashPrefix):] + + switch h { + case "HTTP/2.0": + return "2.0" + case "HTTP/1.1": + return "1.1" + default: + if strings.HasPrefix(h, httpSlashPrefix) { + return h[len(httpSlashPrefix):] + } } return h } +func httpMeta(r *http.Request, e *zerolog.Event) { + // Look for request id + if reqID := r.Header.Get(HeaderRequestID); reqID != "" { + e.Str(EcsHttpRequestId, reqID) + } + + oldForce := r.URL.ForceQuery + r.URL.ForceQuery = false + e.Str(EcsUrlFull, r.URL.String()) + r.URL.ForceQuery = oldForce + + if domain := r.URL.Hostname(); domain != "" { + e.Str(EcsUrlDomain, domain) + } + + port := r.URL.Port() + if port != "" { + if v, err := strconv.Atoi(port); err == nil { + e.Int(EcsUrlPort, v) + } + } + + // HTTP info + e.Str(EcsHttpVersion, stripHTTP(r.Proto)) + e.Str(EcsHttpRequestMethod, r.Method) + + // ApiKey + if apiKey, err := apikey.ExtractAPIKey(r); err == nil { + e.Str(ApiKeyId, apiKey.Id) + } + + // Client info + if r.RemoteAddr != "" { + e.Str(EcsClientAddress, r.RemoteAddr) + } + + // TLS info + e.Bool(EcsTlsEstablished, r.TLS != nil) +} + +func httpDebug(r *http.Request, e *zerolog.Event) { + // Client info + if r.RemoteAddr != "" { + remoteIP, remotePort := splitAddr(r.RemoteAddr) + e.Str(EcsClientIp, remoteIP) + e.Int(EcsClientPort, remotePort) + } + + if r.TLS != nil { + + e.Str(EcsTlsVersion, TlsVersionToString(r.TLS.Version)) + e.Str(EcsTlsCipher, tls.CipherSuiteName(r.TLS.CipherSuite)) + e.Bool(EcsTlsResumed, r.TLS.DidResume) + + if r.TLS.ServerName != "" { + e.Str(EcsTlsClientServerName, r.TLS.ServerName) + } + + if len(r.TLS.PeerCertificates) > 0 && r.TLS.PeerCertificates[0] != nil { + leaf := r.TLS.PeerCertificates[0] + if leaf.SerialNumber != nil { + e.Str(EcsTlsClientSerialNumber, leaf.SerialNumber.String()) + } + e.Str(EcsTlsClientIssuer, leaf.Issuer.String()) + e.Str(EcsTlsClientSubject, leaf.Subject.String()) + e.Str(EcsTlsClientNotBefore, leaf.NotBefore.UTC().Format(EcsTlsClientTimeFormat)) + e.Str(EcsTlsClientNotAfter, leaf.NotAfter.UTC().Format(EcsTlsClientTimeFormat)) + } + } +} + // ECS HTTP log wrapper func HttpHandler(next httprouter.Handle) httprouter.Handle { return func(w http.ResponseWriter, r *http.Request, p httprouter.Params) { - e := log.Debug() + e := log.Info() if !e.Enabled() { next(w, r, p) @@ -120,46 +201,39 @@ func HttpHandler(next httprouter.Handle) httprouter.Handle { wrCounter := NewResponseCounter(w) - next(wrCounter, r, p) - - // Look for request id - if reqID := r.Header.Get(HeaderRequestID); reqID != "" { - e.Str(EcsHttpRequestId, reqID) + if log.Debug().Enabled() { + d := log.Debug() + httpMeta(r, d) + httpDebug(r, d) + d.Msg("HTTP start") } - // URL info - e.Str(EcsUrlFull, r.URL.String()) - - if domain := r.URL.Hostname(); domain != "" { - e.Str(EcsUrlDomain, domain) - } + next(wrCounter, r, p) - port := r.URL.Port() - if port != "" { - if v, err := strconv.Atoi(port); err != nil { - e.Int(EcsUrlPort, v) - } - } + httpMeta(r, e) - // HTTP info - e.Str(EcsHttpVersion, stripHTTP(r.Proto)) - e.Str(EcsHttpRequestMethod, r.Method) - e.Int(EcsHttpResponseCode, wrCounter.statusCode) + // Data on response e.Uint64(EcsHttpRequestBodyBytes, rdCounter.Count()) e.Uint64(EcsHttpResponseBodyBytes, wrCounter.Count()) - - // Client info - remoteIP, remotePort := splitAddr(r.RemoteAddr) - e.Str(EcsClientAddress, r.RemoteAddr) - e.Str(EcsClientIp, remoteIP) - e.Int(EcsClientPort, remotePort) - - // TLS info - e.Bool(EcsTlsEstablished, (r.TLS != nil)) - - // Event info + e.Int(EcsHttpResponseCode, wrCounter.statusCode) e.Int64(EcsEventDuration, time.Since(start).Nanoseconds()) - e.Msg("HTTP handler") + e.Msg("HTTP done") + } +} + +func TlsVersionToString(vers uint16) string { + switch vers { + case tls.VersionTLS10: + return "1.0" + case tls.VersionTLS11: + return "1.1" + case tls.VersionTLS12: + return "1.2" + case tls.VersionTLS13: + return "1.3" + default: } + + return fmt.Sprintf("unknown_0x%x", vers) } diff --git a/internal/pkg/policy/monitor.go b/internal/pkg/policy/monitor.go index 0dfd9076c..ea532331a 100644 --- a/internal/pkg/policy/monitor.go +++ b/internal/pkg/policy/monitor.go @@ -16,6 +16,7 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/bulk" "github.com/elastic/fleet-server/v7/internal/pkg/dl" "github.com/elastic/fleet-server/v7/internal/pkg/es" + "github.com/elastic/fleet-server/v7/internal/pkg/logger" "github.com/elastic/fleet-server/v7/internal/pkg/model" "github.com/elastic/fleet-server/v7/internal/pkg/monitor" ) @@ -217,7 +218,7 @@ func (m *monitorT) dispatchPending() bool { policy, ok := m.policies[s.policyId] if !ok { m.log.Warn(). - Str(dl.FieldPolicyId, s.policyId). + Str(logger.PolicyId, s.policyId). Msg("logic error: policy missing on dispatch") return done } @@ -225,8 +226,8 @@ func (m *monitorT) dispatchPending() bool { select { case s.ch <- &policy.pp: m.log.Debug(). - Str("agent_id", s.agentId). - Str(dl.FieldPolicyId, s.policyId). + Str(logger.AgentId, s.agentId). + Str(logger.PolicyId, s.policyId). Int64("rev", s.revIdx). Int64("coord", s.coordIdx). Msg("dispatch") @@ -234,8 +235,8 @@ func (m *monitorT) dispatchPending() bool { // Should never block on a channel; we created a channel of size one. // A block here indicates a logic error somewheres. m.log.Error(). - Str(dl.FieldPolicyId, s.policyId). - Str("agent_id", s.agentId). + Str(logger.PolicyId, s.policyId). + Str(logger.AgentId, s.agentId). Msg("logic error: should never block on policy channel") } @@ -299,7 +300,7 @@ func (m *monitorT) updatePolicy(pp *ParsedPolicy) bool { newPolicy := pp.Policy zlog := m.log.With(). - Str(dl.FieldPolicyId, newPolicy.PolicyId). + Str(logger.PolicyId, newPolicy.PolicyId). Int64("rev", newPolicy.RevisionIdx). Int64("coord", newPolicy.CoordinatorIdx). Logger() @@ -351,7 +352,7 @@ func (m *monitorT) updatePolicy(pp *ParsedPolicy) bool { } zlog.Debug(). - Str("agent_id", sub.agentId). + Str(logger.AgentId, sub.agentId). Msg("scheduled pendingQ on policy revision") nQueued += 1 @@ -394,8 +395,8 @@ func (m *monitorT) Subscribe(agentId string, policyId string, revisionIdx int64, } m.log.Debug(). - Str("agent_id", agentId). - Str(dl.FieldPolicyId, policyId). + Str(logger.AgentId, agentId). + Str(logger.PolicyId, policyId). Int64("rev", revisionIdx). Int64("coord", coordinatorIdx). Msg("subscribed to policy monitor") @@ -415,7 +416,7 @@ func (m *monitorT) Subscribe(agentId string, policyId string, revisionIdx int64, case !ok: // We've not seen this policy before, force load. m.log.Info(). - Str(dl.FieldPolicyId, policyId). + Str(logger.PolicyId, policyId). Msg("force load on unknown policyId") p = policyT{head: makeHead()} p.head.pushBack(s) @@ -425,7 +426,7 @@ func (m *monitorT) Subscribe(agentId string, policyId string, revisionIdx int64, empty := m.pendingQ.isEmpty() m.pendingQ.pushBack(s) m.log.Debug(). - Str("agent_id", s.agentId). + Str(logger.AgentId, s.agentId). Msg("scheduled pending on subscribe") if empty { m.kickDeploy() @@ -449,8 +450,8 @@ func (m *monitorT) Unsubscribe(sub Subscription) error { m.mut.Unlock() m.log.Debug(). - Str("agent_id", s.agentId). - Str(dl.FieldPolicyId, s.policyId). + Str(logger.AgentId, s.agentId). + Str(logger.PolicyId, s.policyId). Int64("rev", s.revIdx). Int64("coord", s.coordIdx). Msg("unsubscribe") From 093e9f0aaff86536c852ff89832623e8e8c829fd Mon Sep 17 00:00:00 2001 From: Nicolas Ruflin Date: Thu, 11 Nov 2021 09:56:46 +0100 Subject: [PATCH 222/240] Improve some of the log message (#844) (#862) This was coming out of the debugging session around fleet-server where some of the log messages were not too clear to me on what these mean. --- cmd/fleet/handleAck.go | 2 +- cmd/fleet/handleEnroll.go | 4 ++-- cmd/fleet/main.go | 4 ++-- cmd/fleet/router.go | 4 +++- internal/pkg/bulk/helpers.go | 10 +++++++++- internal/pkg/bulk/opRead.go | 4 ++-- internal/pkg/coordinator/monitor.go | 10 +++++----- internal/pkg/monitor/monitor.go | 4 ++-- internal/pkg/policy/monitor.go | 7 ++++--- internal/pkg/ver/check.go | 2 +- 10 files changed, 31 insertions(+), 20 deletions(-) diff --git a/cmd/fleet/handleAck.go b/cmd/fleet/handleAck.go index ae0dccd5e..e2a5b2a2b 100644 --- a/cmd/fleet/handleAck.go +++ b/cmd/fleet/handleAck.go @@ -41,7 +41,7 @@ type AckT struct { func NewAckT(cfg *config.Server, bulker bulk.Bulk, cache cache.Cache) *AckT { log.Info(). Interface("limits", cfg.Limits.AckLimit). - Msg("Ack install limits") + Msg("Setting config ack_limits") return &AckT{ cfg: cfg, diff --git a/cmd/fleet/handleEnroll.go b/cmd/fleet/handleEnroll.go index 01b5169ec..de1f922cc 100644 --- a/cmd/fleet/handleEnroll.go +++ b/cmd/fleet/handleEnroll.go @@ -56,7 +56,7 @@ func NewEnrollerT(verCon version.Constraints, cfg *config.Server, bulker bulk.Bu log.Info(). Interface("limits", cfg.Limits.EnrollLimit). - Msg("Enroller install limits") + Msg("Setting config enroll_limit") return &EnrollerT{ verCon: verCon, @@ -323,7 +323,7 @@ func writeResponse(zlog zerolog.Logger, w http.ResponseWriter, resp *EnrollRespo Str(LogAccessApiKeyId, resp.Item.AccessApiKeyId). Int(EcsHttpResponseBodyBytes, numWritten). Int64(EcsEventDuration, time.Since(start).Nanoseconds()). - Msg("success enroll") + Msg("Elastic Agent successfully enrolled") return nil } diff --git a/cmd/fleet/main.go b/cmd/fleet/main.go index 18f303ffb..3a695e191 100644 --- a/cmd/fleet/main.go +++ b/cmd/fleet/main.go @@ -61,7 +61,7 @@ func installSignalHandler() context.Context { func makeCache(cfg *config.Config) (cache.Cache, error) { cacheCfg := makeCacheConfig(cfg) - log.Info().Interface("cfg", cacheCfg).Msg("makeCache") + log.Info().Interface("cfg", cacheCfg).Msg("Setting cache config options") return cache.New(cacheCfg) } @@ -92,7 +92,7 @@ func initLogger(cfg *config.Config, version, commit string) (*logger.Logger, err Int("ppid", os.Getppid()). Str("exe", os.Args[0]). Strs("args", os.Args[1:]). - Msg("boot") + Msg("Boot fleet-server") log.Debug().Strs("env", os.Environ()).Msg("environment") return l, err diff --git a/cmd/fleet/router.go b/cmd/fleet/router.go index 4f40f703a..8b6f6fc58 100644 --- a/cmd/fleet/router.go +++ b/cmd/fleet/router.go @@ -82,7 +82,7 @@ func NewRouter(bulker bulk.Bulk, ct *CheckinT, et *EnrollerT, at *ArtifactT, ack log.Info(). Str("method", rte.method). Str("path", rte.path). - Msg("Server install route") + Msg("fleet-server route added") router.Handle( rte.method, @@ -91,5 +91,7 @@ func NewRouter(bulker bulk.Bulk, ct *CheckinT, et *EnrollerT, at *ArtifactT, ack ) } + log.Info().Msg("fleet-server routes set up") + return router } diff --git a/internal/pkg/bulk/helpers.go b/internal/pkg/bulk/helpers.go index f2659591a..d4ad7ef35 100644 --- a/internal/pkg/bulk/helpers.go +++ b/internal/pkg/bulk/helpers.go @@ -6,6 +6,7 @@ package bulk import ( "encoding/json" + "io/ioutil" "github.com/elastic/fleet-server/v7/internal/pkg/es" "github.com/elastic/go-elasticsearch/v7/esapi" @@ -35,7 +36,14 @@ func parseError(res *esapi.Response) error { decoder := json.NewDecoder(res.Body) if err := decoder.Decode(&e); err != nil { - log.Error().Err(err).Msg("Cannot decode error body") + log.Error().Err(err).Msg("Cannot decode Elasticsearch error body") + bodyBytes, readErr := ioutil.ReadAll(res.Body) + if readErr != nil { + log.Debug().Err(readErr).Msg("Error reading error response body from Elasticsearch") + } else { + log.Debug().Err(err).Bytes("body", bodyBytes).Msg("Error content") + } + return err } diff --git a/internal/pkg/bulk/opRead.go b/internal/pkg/bulk/opRead.go index 8dd599494..6c6de1255 100644 --- a/internal/pkg/bulk/opRead.go +++ b/internal/pkg/bulk/opRead.go @@ -86,7 +86,7 @@ func (b *Bulker) flushRead(ctx context.Context, queue queueT) error { res, err := req.Do(ctx, b.es) if err != nil { - log.Error().Err(err).Str("mod", kModBulk).Msg("Fail MgetRequest req.Do") + log.Error().Err(err).Str("mod", kModBulk).Msg("Error sending mget request to Elasticsearch") return err } @@ -95,7 +95,7 @@ func (b *Bulker) flushRead(ctx context.Context, queue queueT) error { } if res.IsError() { - log.Error().Str("mod", kModBulk).Str("err", res.String()).Msg("Fail MgetRequest result") + log.Error().Str("mod", kModBulk).Str("err", res.String()).Msg("Error in mget request result to Elasticsearch") return parseError(res) } diff --git a/internal/pkg/coordinator/monitor.go b/internal/pkg/coordinator/monitor.go index e0a4ce0f2..66c934154 100644 --- a/internal/pkg/coordinator/monitor.go +++ b/internal/pkg/coordinator/monitor.go @@ -398,10 +398,10 @@ func (m *monitorT) rescheduleUnenroller(ctx context.Context, pt *policyT, p *mod func runCoordinator(ctx context.Context, cord Coordinator, l zerolog.Logger, d time.Duration) { cnt := 0 for { - l.Info().Int("count", cnt).Str("coordinator", cord.Name()).Msg("starting coordinator for policy") + l.Info().Int("count", cnt).Str("coordinator", cord.Name()).Msg("Starting policy coordinator") err := cord.Run(ctx) if err != context.Canceled { - l.Err(err).Msg("coordinator failed") + l.Err(err).Msg("Policy coordinator failed and stopped") if sleep.WithContext(ctx, d) == context.Canceled { break } @@ -419,9 +419,9 @@ func runCoordinatorOutput(ctx context.Context, cord Coordinator, bulker bulk.Bul s := l.With().Int64(dl.FieldRevisionIdx, p.RevisionIdx).Int64(dl.FieldCoordinatorIdx, p.CoordinatorIdx).Logger() _, err := dl.CreatePolicy(ctx, bulker, p, dl.WithIndexName(policiesIndex)) if err != nil { - s.Err(err).Msg("failed to insert a new policy revision") + s.Err(err).Msg("Policy coordinator failed to add a new policy revision") } else { - s.Info().Msg("coordinator inserted a new policy revision") + s.Info().Int64("revision_id", p.RevisionIdx).Msg("Policy coordinator added a new policy revision") } case <-ctx.Done(): return @@ -434,7 +434,7 @@ func runUnenroller(ctx context.Context, bulker bulk.Bulk, policyId string, unenr Dur("checkInterval", checkInterval). Dur("unenrollTimeout", unenrollTimeout). Msg("unenroll monitor start") - defer l.Info().Msg("unenroll monitor exit") + defer l.Info().Msg("Unenroll monitor exit") t := time.NewTimer(checkInterval) defer t.Stop() diff --git a/internal/pkg/monitor/monitor.go b/internal/pkg/monitor/monitor.go index f7081bf3e..361b5ba40 100644 --- a/internal/pkg/monitor/monitor.go +++ b/internal/pkg/monitor/monitor.go @@ -186,12 +186,12 @@ func (m *simpleMonitorT) loadCheckpoint() sqn.SeqNo { // Run runs monitor. func (m *simpleMonitorT) Run(ctx context.Context) (err error) { - m.log.Info().Msg("start") + m.log.Info().Msg("Starting index monitor") defer func() { if err == context.Canceled { err = nil } - m.log.Info().Err(err).Msg("exited") + m.log.Info().Err(err).Msg("Index monitor exited") }() defer func() { diff --git a/internal/pkg/policy/monitor.go b/internal/pkg/policy/monitor.go index ea532331a..972261340 100644 --- a/internal/pkg/policy/monitor.go +++ b/internal/pkg/policy/monitor.go @@ -306,7 +306,7 @@ func (m *monitorT) updatePolicy(pp *ParsedPolicy) bool { Logger() if newPolicy.CoordinatorIdx <= 0 { - zlog.Info().Msg("ignore policy that has not pass through coordinator") + zlog.Info().Str(logger.PolicyId, newPolicy.PolicyId).Msg("Ignore policy that has not passed through coordinator") return false } @@ -320,7 +320,7 @@ func (m *monitorT) updatePolicy(pp *ParsedPolicy) bool { head: makeHead(), } m.policies[newPolicy.PolicyId] = p - zlog.Info().Msg("new policy added on update") + zlog.Info().Str(logger.PolicyId, newPolicy.PolicyId).Msg("New policy found on update and added") return false } @@ -363,7 +363,8 @@ func (m *monitorT) updatePolicy(pp *ParsedPolicy) bool { Int64("oldRev", oldPolicy.RevisionIdx). Int64("oldCoord", oldPolicy.CoordinatorIdx). Int("nQueued", nQueued). - Msg("revised policy") + Str(logger.PolicyId, newPolicy.PolicyId). + Msg("New revision of policy received and added to the queue") return true } diff --git a/internal/pkg/ver/check.go b/internal/pkg/ver/check.go index 3cd0ec088..e504bf727 100644 --- a/internal/pkg/ver/check.go +++ b/internal/pkg/ver/check.go @@ -57,7 +57,7 @@ func checkCompatibility(fleetVersion, esVersion string) error { Msg("failed elasticsearch version check") return ErrUnsupportedVersion } - log.Info().Str("fleet_version", fleetVersion).Str("elasticsearch_version", esVersion).Msg("versions are compatible") + log.Info().Str("fleet_version", fleetVersion).Str("elasticsearch_version", esVersion).Msg("Elasticsearch compatibility check successful") return nil } From 9e5245d7f3f9d3b293cd74065e1e0f089d1b8b1a Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Fri, 12 Nov 2021 00:23:33 -0500 Subject: [PATCH 223/240] [Automation] Update elastic stack version to 7.16.0-20fe9209 for testing (#866) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index d812e5353..33c9d12cc 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-df026734-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-20fe9209-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From f73d0c5b3e1b665c2e2c41e3c04f2f84e30e29f2 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 15 Nov 2021 00:21:36 -0500 Subject: [PATCH 224/240] [Automation] Update elastic stack version to 7.16.0-27f0a6d6 for testing (#871) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 33c9d12cc..2b5a128bc 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-20fe9209-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-27f0a6d6-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From fb9bddf9e518416058139203474f6b07cf38f02c Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 17 Nov 2021 00:22:15 -0500 Subject: [PATCH 225/240] [Automation] Update elastic stack version to 7.16.0-1af90c36 for testing (#885) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 2b5a128bc..1907f5761 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-27f0a6d6-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-1af90c36-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 14b0b086c8f7b3850477aa8fb3ef7d51096d0e24 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 18 Nov 2021 00:22:15 -0500 Subject: [PATCH 226/240] [Automation] Update elastic stack version to 7.16.0-705b02ff for testing (#892) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 1907f5761..74a17e9d8 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-1af90c36-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-705b02ff-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 3609aeb637897f5708b262111b577ca8509cc68a Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Fri, 19 Nov 2021 05:53:53 -0500 Subject: [PATCH 227/240] [Automation] Update elastic stack version to 7.16.0-3a1031bc for testing (#900) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 74a17e9d8..26dc8285d 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-705b02ff-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-3a1031bc-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 7ce0a8c21e2c509dc50e91958bd458d89df10d9b Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 24 Nov 2021 00:21:25 -0500 Subject: [PATCH 228/240] [Automation] Update elastic stack version to 7.16.0-8c304116 for testing (#912) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 26dc8285d..ad6ea032a 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-3a1031bc-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-8c304116-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 16259750ad5d9a964f7d96bd9d46afa8028f0ab0 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 25 Nov 2021 00:21:46 -0500 Subject: [PATCH 229/240] [Automation] Update elastic stack version to 7.16.0-5227689d for testing (#917) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index ad6ea032a..906edfbe1 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-8c304116-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-5227689d-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From f1918b3ea75b0257be5feb00d383e26cfc33bf13 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Fri, 26 Nov 2021 00:21:51 -0500 Subject: [PATCH 230/240] [Automation] Update elastic stack version to 7.16.0-58c0cae7 for testing (#922) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 906edfbe1..7b4c39e7d 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-5227689d-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-58c0cae7-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From ae84148d8965e3adca7e1dc90d17d41d4e002f0d Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 29 Nov 2021 00:23:06 -0500 Subject: [PATCH 231/240] [Automation] Update elastic stack version to 7.16.0-19916e4d for testing (#928) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index 7b4c39e7d..dbb9a2088 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-58c0cae7-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-19916e4d-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 3d19317ca6f1351ce52cd8b33d4502ec7183802b Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Tue, 30 Nov 2021 00:22:10 -0500 Subject: [PATCH 232/240] [Automation] Update elastic stack version to 7.16.0-4a83af8d for testing (#932) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index dbb9a2088..c1540c64f 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-19916e4d-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-4a83af8d-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From dd5873c64d90ff35f983f5690f383487d566af5e Mon Sep 17 00:00:00 2001 From: "mergify[bot]" <37929162+mergify[bot]@users.noreply.github.com> Date: Tue, 30 Nov 2021 18:28:51 +0000 Subject: [PATCH 233/240] [7.16](backport #883) Retry reads when ES unavailable (#937) * keep trucking on ES availability errors; more tests to come (cherry picked from commit 7fb01389fadc239545f35ded4c67e9d80a00fddf) * don't attempt to distinguish between errors, just keep retrying (cherry picked from commit 2c75552ac3f97088f2fd84f5c6bfd4bbe0448e36) * move error blackholing up the stack so the monitor will never crash, added additional logging (cherry picked from commit f5fead9d8446743c521446bd038d648f8c60c42d) * pr feedback (cherry picked from commit 1886dc57f3f1ccaf21b704ae49743079184b9c0b) * upped logging level, properly wrapped errors (cherry picked from commit 97524dca4241d34f877a9d83d2b1a1c9198eaeeb) Co-authored-by: bryan --- internal/pkg/coordinator/monitor.go | 14 +++++++++----- 1 file changed, 9 insertions(+), 5 deletions(-) diff --git a/internal/pkg/coordinator/monitor.go b/internal/pkg/coordinator/monitor.go index 66c934154..03019ac71 100644 --- a/internal/pkg/coordinator/monitor.go +++ b/internal/pkg/coordinator/monitor.go @@ -7,6 +7,7 @@ package coordinator import ( "context" "errors" + "fmt" "net" "os" "runtime" @@ -132,7 +133,7 @@ func (m *monitorT) Run(ctx context.Context) (err error) { case hits := <-s.Output(): err = m.handlePolicies(ctx, hits) if err != nil { - return err + m.log.Warn().Err(err).Msgf("Encountered an error while policy leadership changes; continuing to retry.") } case <-mT.C: m.calcMetadata() @@ -140,7 +141,7 @@ func (m *monitorT) Run(ctx context.Context) (err error) { case <-lT.C: err = m.ensureLeadership(ctx) if err != nil { - return err + m.log.Warn().Err(err).Msgf("Encountered an error while checking/assigning policy leaders; continuing to retry.") } lT.Reset(m.checkInterval) case <-ctx.Done(): @@ -157,6 +158,7 @@ func (m *monitorT) handlePolicies(ctx context.Context, hits []es.HitT) error { var policy model.Policy err := hit.Unmarshal(&policy) if err != nil { + m.log.Debug().Err(err).Msg("Failed to deserialize policy json") return err } if policy.CoordinatorIdx != 0 { @@ -170,6 +172,7 @@ func (m *monitorT) handlePolicies(ctx context.Context, hits []es.HitT) error { // current leader send to its coordinator err = p.cord.Update(ctx, policy) if err != nil { + m.log.Info().Err(err).Msg("Failed to update policy leader") return err } } @@ -192,8 +195,9 @@ func (m *monitorT) handlePolicies(ctx context.Context, hits []es.HitT) error { func (m *monitorT) ensureLeadership(ctx context.Context) error { m.log.Debug().Msg("ensuring leadership of policies") err := dl.EnsureServer(ctx, m.bulker, m.version, m.agentMetadata, m.hostMetadata, dl.WithIndexName(m.serversIndex)) + if err != nil { - return err + return fmt.Errorf("Failed to check server status on Elasticsearch (%s): %w", m.hostMetadata.Name, err) } // fetch current policies and leaders @@ -204,7 +208,7 @@ func (m *monitorT) ensureLeadership(ctx context.Context) error { m.log.Debug().Str("index", m.policiesIndex).Msg(es.ErrIndexNotFound.Error()) return nil } - return err + return fmt.Errorf("Encountered error while querying policies: %w", err) } if len(policies) > 0 { ids := make([]string, len(policies)) @@ -214,7 +218,7 @@ func (m *monitorT) ensureLeadership(ctx context.Context) error { leaders, err = dl.SearchPolicyLeaders(ctx, m.bulker, ids, dl.WithIndexName(m.leadersIndex)) if err != nil { if !errors.Is(err, es.ErrIndexNotFound) { - return err + return fmt.Errorf("Encountered error while fetching policy leaders: %w", err) } } } From 5d4693191d744acb37dc72ddc605e5c2bfbe7a7f Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Wed, 1 Dec 2021 00:21:51 -0500 Subject: [PATCH 234/240] [Automation] Update elastic stack version to 7.16.0-046aeba5 for testing (#941) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index c1540c64f..ac990f67e 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-4a83af8d-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-046aeba5-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 42301f06b291b3517515028a3d5a1de6af005026 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Thu, 2 Dec 2021 00:23:04 -0500 Subject: [PATCH 235/240] [Automation] Update elastic stack version to 7.16.0-80ab1dd0 for testing (#947) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index ac990f67e..cf06b762a 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-046aeba5-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-80ab1dd0-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 9290834ad8d618b3bec7d17b2ace4000e851a80d Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Fri, 3 Dec 2021 00:21:40 -0500 Subject: [PATCH 236/240] [Automation] Update elastic stack version to 7.16.0-f17274e7 for testing (#950) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index cf06b762a..adaa6e609 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-80ab1dd0-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-f17274e7-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 847e854c9639d8ffb9aafa566cc8b9742f62103f Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Mon, 6 Dec 2021 00:22:13 -0500 Subject: [PATCH 237/240] [Automation] Update elastic stack version to 7.16.0-ef210289 for testing (#956) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index adaa6e609..a3ce539f9 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-f17274e7-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-ef210289-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From ce3abb65ee2fb15e1c9c41ed82019040227a6608 Mon Sep 17 00:00:00 2001 From: apmmachine <58790750+apmmachine@users.noreply.github.com> Date: Tue, 7 Dec 2021 00:21:35 -0500 Subject: [PATCH 238/240] [Automation] Update elastic stack version to 7.16.0-f2941f42 for testing (#961) Co-authored-by: apmmachine --- dev-tools/integration/.env | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dev-tools/integration/.env b/dev-tools/integration/.env index a3ce539f9..c6317a44e 100644 --- a/dev-tools/integration/.env +++ b/dev-tools/integration/.env @@ -1,4 +1,4 @@ -ELASTICSEARCH_VERSION=7.16.0-ef210289-SNAPSHOT +ELASTICSEARCH_VERSION=7.16.0-f2941f42-SNAPSHOT ELASTICSEARCH_USERNAME=elastic ELASTICSEARCH_PASSWORD=changeme TEST_ELASTICSEARCH_HOSTS=localhost:9200 \ No newline at end of file From 0cabcd814cd28c5964ef5c33782e612aa97fe928 Mon Sep 17 00:00:00 2001 From: Andres Rodriguez Date: Tue, 7 Dec 2021 16:03:16 +0100 Subject: [PATCH 239/240] Bump version to 7.16.1 (#954) --- main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/main.go b/main.go index 27330d702..d2451366a 100644 --- a/main.go +++ b/main.go @@ -17,7 +17,7 @@ import ( "github.com/elastic/fleet-server/v7/internal/pkg/build" ) -const defaultVersion = "7.16.0" +const defaultVersion = "7.16.1" var ( Version string = defaultVersion From 76ea93883d94977d625ab02415d583a1d39007c9 Mon Sep 17 00:00:00 2001 From: Michal Pristas Date: Tue, 23 Nov 2021 09:12:56 +0100 Subject: [PATCH 240/240] merge conflicts resolver --- cmd/fleet/server.go | 131 +++++++++++++++++------------ internal/pkg/config/config_test.go | 5 +- internal/pkg/config/input.go | 33 +++++++- 3 files changed, 109 insertions(+), 60 deletions(-) diff --git a/cmd/fleet/server.go b/cmd/fleet/server.go index 98ad61630..9fba82e1d 100644 --- a/cmd/fleet/server.go +++ b/cmd/fleet/server.go @@ -41,7 +41,7 @@ func diagConn(c net.Conn, s http.ConnState) { func runServer(ctx context.Context, router *httprouter.Router, cfg *config.Server) error { - addr := cfg.BindAddress() + listeners := cfg.BindEndpoints() rdto := cfg.Timeouts.Read wrto := cfg.Timeouts.Write idle := cfg.Timeouts.Idle @@ -49,75 +49,94 @@ func runServer(ctx context.Context, router *httprouter.Router, cfg *config.Serve mhbz := cfg.Limits.MaxHeaderByteSize bctx := func(net.Listener) context.Context { return ctx } - log.Info(). - Str("bind", addr). - Dur("rdTimeout", rdto). - Dur("wrTimeout", wrto). - Msg("server listening") - - server := http.Server{ - Addr: addr, - ReadTimeout: rdto, - WriteTimeout: wrto, - IdleTimeout: idle, - ReadHeaderTimeout: rdhr, - Handler: router, - BaseContext: bctx, - ConnState: diagConn, - MaxHeaderBytes: mhbz, - ErrorLog: errLogger(), - } + errChan := make(chan error) + cancelCtx, cancel := context.WithCancel(ctx) + defer cancel() - forceCh := make(chan struct{}) - defer close(forceCh) - - // handler to close server - go func() { - select { - case <-ctx.Done(): - log.Debug().Msg("force server close on ctx.Done()") - server.Close() - case <-forceCh: - log.Debug().Msg("go routine forced closed on exit") + for _, addr := range listeners { + log.Info(). + Str("bind", addr). + Dur("rdTimeout", rdto). + Dur("wrTimeout", wrto). + Msg("server listening") + + server := http.Server{ + Addr: addr, + ReadTimeout: rdto, + WriteTimeout: wrto, + IdleTimeout: idle, + ReadHeaderTimeout: rdhr, + Handler: router, + BaseContext: bctx, + ConnState: diagConn, + MaxHeaderBytes: mhbz, + ErrorLog: errLogger(), } - }() - var listenCfg net.ListenConfig - - ln, err := listenCfg.Listen(ctx, "tcp", addr) - if err != nil { - return err - } + forceCh := make(chan struct{}) + defer close(forceCh) - // Bind the deferred Close() to the stack variable to handle case where 'ln' is wrapped - defer func() { ln.Close() }() + // handler to close server + go func() { + select { + case <-ctx.Done(): + log.Debug().Msg("force server close on ctx.Done()") + server.Close() + case <-forceCh: + log.Debug().Msg("go routine forced closed on exit") + } + }() - // Conn Limiter must be before the TLS handshake in the stack; - // The server should not eat the cost of the handshake if there - // is no capacity to service the connection. - // Also, it appears the HTTP2 implementation depends on the tls.Listener - // being at the top of the stack. - ln = wrapConnLimitter(ctx, ln, cfg) + var listenCfg net.ListenConfig - if cfg.TLS != nil && cfg.TLS.IsEnabled() { - commonTlsCfg, err := tlscommon.LoadTLSServerConfig(cfg.TLS) + ln, err := listenCfg.Listen(ctx, "tcp", addr) if err != nil { return err } - server.TLSConfig = commonTlsCfg.ToConfig() - // Must enable http/2 in the configuration explicitly. - // (see https://golang.org/pkg/net/http/#Server.Serve) - server.TLSConfig.NextProtos = []string{"h2", "http/1.1"} + // Bind the deferred Close() to the stack variable to handle case where 'ln' is wrapped + defer func() { ln.Close() }() - ln = tls.NewListener(ln, server.TLSConfig) + // Conn Limiter must be before the TLS handshake in the stack; + // The server should not eat the cost of the handshake if there + // is no capacity to service the connection. + // Also, it appears the HTTP2 implementation depends on the tls.Listener + // being at the top of the stack. + ln = wrapConnLimitter(ctx, ln, cfg) + + if cfg.TLS != nil && cfg.TLS.IsEnabled() { + commonTlsCfg, err := tlscommon.LoadTLSServerConfig(cfg.TLS) + if err != nil { + return err + } + server.TLSConfig = commonTlsCfg.ToConfig() + + // Must enable http/2 in the configuration explicitly. + // (see https://golang.org/pkg/net/http/#Server.Serve) + server.TLSConfig.NextProtos = []string{"h2", "http/1.1"} + + ln = tls.NewListener(ln, server.TLSConfig) + + } else { + log.Warn().Msg("Exposed over insecure HTTP; enablement of TLS is strongly recommended") + } + + log.Debug().Msgf("Listening on %s", addr) + + go func(ctx context.Context, errChan chan error, ln net.Listener) { + if err := server.Serve(ln); err != nil && err != http.ErrServerClosed { + errChan <- err + } + }(cancelCtx, errChan, ln) - } else { - log.Warn().Msg("exposed over insecure HTTP; enablement of TLS is strongly recommended") } - if err := server.Serve(ln); err != nil && err != http.ErrServerClosed { - return err + select { + case err := <-errChan: + if err != context.Canceled { + return err + } + case <-cancelCtx.Done(): } return nil diff --git a/internal/pkg/config/config_test.go b/internal/pkg/config/config_test.go index bfffcd9da..4c7b5c1c9 100644 --- a/internal/pkg/config/config_test.go +++ b/internal/pkg/config/config_test.go @@ -103,8 +103,9 @@ func TestConfig(t *testing.T) { { Type: "fleet-server", Server: Server{ - Host: "localhost", - Port: 8888, + Host: "localhost", + Port: 8888, + InternalPort: 8221, Timeouts: ServerTimeouts{ Read: 20 * time.Second, ReadHeader: 5 * time.Second, diff --git a/internal/pkg/config/input.go b/internal/pkg/config/input.go index b4238fb3e..7124e430b 100644 --- a/internal/pkg/config/input.go +++ b/internal/pkg/config/input.go @@ -15,6 +15,8 @@ import ( const kDefaultHost = "0.0.0.0" const kDefaultPort = 8220 +const kDefaultInternalHost = "localhost" +const kDefaultInternalPort = 8221 // Policy is the configuration policy to use. type Policy struct { @@ -57,6 +59,7 @@ func (c *ServerBulk) InitDefaults() { type Server struct { Host string `config:"host"` Port uint16 `config:"port"` + InternalPort uint16 `config:"internal_port"` TLS *tlscommon.ServerConfig `config:"ssl"` Timeouts ServerTimeouts `config:"timeouts"` Profiler ServerProfiler `config:"profiler"` @@ -71,6 +74,7 @@ type Server struct { func (c *Server) InitDefaults() { c.Host = kDefaultHost c.Port = kDefaultPort + c.InternalPort = kDefaultInternalPort c.Timeouts.InitDefaults() c.CompressionLevel = flate.BestSpeed c.CompressionThresh = 1024 @@ -80,13 +84,38 @@ func (c *Server) InitDefaults() { c.Bulk.InitDefaults() } +// BindEndpoints returns the binding address for the all HTTP server listeners. +func (c *Server) BindEndpoints() []string { + primaryAddress := c.BindAddress() + endpoints := make([]string, 0, 2) + endpoints = append(endpoints, primaryAddress) + + if internalAddress := c.BindInternalAddress(); internalAddress != "" && internalAddress != ":0" && internalAddress != primaryAddress { + endpoints = append(endpoints, internalAddress) + } + + return endpoints +} + // BindAddress returns the binding address for the HTTP server. func (c *Server) BindAddress() string { - host := c.Host + return bindAddress(c.Host, c.Port) +} + +// BindInternalAddress returns the binding address for the internal HTTP server. +func (c *Server) BindInternalAddress() string { + if c.InternalPort <= 0 { + return bindAddress(kDefaultInternalHost, kDefaultInternalPort) + } + + return bindAddress(kDefaultInternalHost, c.InternalPort) +} + +func bindAddress(host string, port uint16) string { if strings.Count(host, ":") > 1 && strings.Count(host, "]") == 0 { host = "[" + host + "]" } - return fmt.Sprintf("%s:%d", host, c.Port) + return fmt.Sprintf("%s:%d", host, port) } // Input is the input defined by Agent to run Fleet Server.