diff --git a/.circleci/config.yml b/.circleci/config.yml index 6a98bb4de..b6a1f200c 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -14,9 +14,6 @@ executors: - image: tendermintdev/docker-website-deployment environment: AWS_REGION: us-east-1 - protoc: - docker: - - image: tendermintdev/docker-protoc commands: checkout_with_submodules: @@ -95,33 +92,6 @@ jobs: root: "/tmp/bin" paths: - "." - proto-lint: - executor: protoc - steps: - - checkout - - run: - command: make proto-lint - - test_abci_apps: - executor: golang - steps: - - run_test: - script_path: abci/tests/test_app/test.sh - - # if this test fails, fix it and update the docs at: - # https://github.com/tendermint/tendermint/blob/master/docs/abci-cli.md - test_abci_cli: - executor: golang - steps: - - run_test: - script_path: abci/tests/test_cli/test.sh - - test_apps: - executor: golang - resource_class: large - steps: - - run_test: - script_path: test/app/test.sh test_persistence: executor: golang @@ -132,6 +102,7 @@ jobs: test_cover: executor: golang + resource_class: large parallelism: 4 steps: - restore_cache: @@ -143,7 +114,6 @@ jobs: name: "Restore go module cache" keys: - go-mod-v2-{{ checksum "go.sum" }} -# - make_libsodium - run: name: "Run tests" command: | @@ -369,31 +339,33 @@ jobs: - store_artifacts: path: /go/src/github.com/tendermint/tendermint/tendermint-*.tar.gz - # # Test RPC implementation against the swagger documented specs - # contract_tests: - # working_directory: /home/circleci/.go_workspace/src/github.com/tendermint/tendermint - # machine: - # image: circleci/classic:latest - # environment: - # GOBIN: /home/circleci/.go_workspace/bin - # GOPATH: /home/circleci/.go_workspace/ - # GOOS: linux - # GOARCH: amd64 - # parallelism: 1 - # steps: - # - checkout_with_submodules - # - run: - # name: Test RPC endpoints against swagger documentation - # command: | - # set -x - # export PATH=~/.local/bin:$PATH - # # install node and dredd - # ./scripts/get_nodejs.sh - # # build the binaries with a proper version of Go - # docker run --rm -v "$PWD":/go/src/github.com/tendermint/tendermint -w /go/src/github.com/tendermint/tendermint golang make build-linux build-contract-tests-hooks - # # This docker image works with go 1.7, we can install here the hook handler that contract-tests is going to use - # go get github.com/snikch/goodman/cmd/goodman - # make contract-tests + contract_tests: + working_directory: /home/circleci/.go_workspace/src/github.com/tendermint/tendermint + machine: + image: circleci/classic:latest + environment: + GOBIN: /home/circleci/.go_workspace/bin + GOPATH: /home/circleci/.go_workspace/ + GOOS: linux + GOARCH: amd64 + parallelism: 1 + steps: + - checkout_with_submodules + - run: + name: Test RPC endpoints against swagger documentation + command: | + set -x + export PATH=~/.local/bin:$PATH + # install node and dredd + ./scripts/get_nodejs.sh + # build the binaries with a proper version of Go + # Build Tendermint + docker run --rm -v "$PWD":/go/src/github.com/tendermint/tendermint -w /go/src/github.com/tendermint/tendermint golang:1.14.1-alpine /bin/sh -c "apk add --update make git && make build-linux" + # Build contract-tests + docker run --rm -v "$PWD":/go/src/github.com/tendermint/tendermint -w /go/src/github.com/tendermint/tendermint ubuntu:20.10 ./scripts/prepare_dredd_test.sh + # This docker image works with go 1.7, we can install here the hook handler that contract-tests is going to use + go get github.com/snikch/goodman/cmd/goodman + make contract-tests workflows: version: 2 @@ -413,19 +385,9 @@ workflows: filters: branches: only: - - docs-theme-latest + - docs-staging - lint - setup_dependencies - - test_abci_apps: - requires: - - setup_dependencies - - proto-lint - - test_abci_cli: - requires: - - setup_dependencies - - test_apps: - requires: - - setup_dependencies - test_cover: requires: - setup_dependencies @@ -435,7 +397,6 @@ workflows: - localnet: requires: - setup_dependencies -# - test_p2p - test_p2p: name: test_p2p_ipv6 ipv: 6 @@ -445,9 +406,9 @@ workflows: only: - master - /v[0-9]+\.[0-9]+/ - # - contract_tests: - # requires: - # - setup_dependencies + - contract_tests: + requires: + - setup_dependencies # release: # jobs: diff --git a/.github/workflows/action.yml b/.github/workflows/action.yml deleted file mode 100644 index 7c240480a..000000000 --- a/.github/workflows/action.yml +++ /dev/null @@ -1,10 +0,0 @@ -#name: Check Markdown links -#on: push -#jobs: -# markdown-link-check: -# runs-on: ubuntu-latest -# steps: -# - uses: actions/checkout@master -# - uses: gaurav-nelson/github-action-markdown-link-check@0.6.0 -# with: -# folder-path: "docs" diff --git a/.github/workflows/linkchecker.yml b/.github/workflows/linkchecker.yml new file mode 100644 index 000000000..9f1092df9 --- /dev/null +++ b/.github/workflows/linkchecker.yml @@ -0,0 +1,12 @@ +name: Check Markdown links +on: + schedule: + - cron: '* */24 * * *' +jobs: + markdown-link-check: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - uses: gaurav-nelson/github-action-markdown-link-check@0.6.0 + with: + folder-path: "docs" diff --git a/.github/workflows/lint.yaml b/.github/workflows/lint.yaml new file mode 100644 index 000000000..de3126982 --- /dev/null +++ b/.github/workflows/lint.yaml @@ -0,0 +1,12 @@ +name: Lint +on: [pull_request] +jobs: + golangci-lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - name: golangci-lint + uses: reviewdog/action-golangci-lint@v1 + with: + github_token: ${{ secrets.github_token }} + reporter: github-pr-review diff --git a/.github/workflows/proto.yml b/.github/workflows/proto.yml new file mode 100644 index 000000000..ca30d0291 --- /dev/null +++ b/.github/workflows/proto.yml @@ -0,0 +1,12 @@ +name: Proto check +on: [pull_request] +jobs: + proto-checks: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - uses: docker-practice/actions-setup-docker@master + - name: lint + run: make proto-lint +# - name: check-breakage +# run: make proto-check-breaking-ci diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml new file mode 100644 index 000000000..75ca8b4a0 --- /dev/null +++ b/.github/workflows/tests.yml @@ -0,0 +1,82 @@ +name: Tests +on: + pull_request: + push: + branches: + - master + - release/** + +jobs: + cleanup-runs: + runs-on: ubuntu-latest + steps: + - uses: rokroskar/workflow-run-cleanup-action@master + env: + GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}" + if: "!startsWith(github.ref, 'refs/tags/') && github.ref != 'refs/heads/master'" + build: + name: Build + runs-on: ubuntu-latest + steps: + - uses: actions/setup-go@v2-beta + - name: Set GOBIN + run: | + echo "::add-path::$(go env GOPATH)/bin" + - uses: actions/checkout@v2 + - name: install + run: make install install_abci + # Cache bin + - uses: actions/cache@v1 + with: + path: ~/go/bin + key: ${{ runner.os }}-go-tm-binary + + test_abci_apps: + runs-on: ubuntu-latest + needs: Build + steps: + - uses: actions/setup-go@v2-beta + - name: Set GOBIN + run: | + echo "::add-path::$(go env GOPATH)/bin" + - uses: actions/checkout@v2 + - uses: actions/cache@v1 + with: + path: ~/go/bin + key: ${{ runner.os }}-go-tm-binary + - name: test_abci_apps + run: abci/tests/test_app/test.sh + shell: bash + + test_abci_cli: + runs-on: ubuntu-latest + needs: Build + steps: + - uses: actions/setup-go@v2-beta + - name: Set GOBIN + run: | + echo "::add-path::$(go env GOPATH)/bin" + - uses: actions/checkout@v2 + - uses: actions/cache@v1 + with: + path: ~/go/bin + key: ${{ runner.os }}-go-tm-binary + - run: abci/tests/test_cli/test.sh + shell: bash + + test_apps: + runs-on: ubuntu-latest + needs: Build + steps: + - uses: actions/setup-go@v2-beta + - name: Set GOBIN + run: | + echo "::add-path::$(go env GOPATH)/bin" + - uses: actions/checkout@v2 + - uses: actions/cache@v1 + with: + path: ~/go/bin + key: ${{ runner.os }}-go-tm-binary + - name: test_apps + run: test/app/test.sh + shell: bash diff --git a/.mergify.yml b/.mergify.yml index 136bb1148..b1bef433d 100644 --- a/.mergify.yml +++ b/.mergify.yml @@ -1,5 +1,5 @@ pull_request_rules: - - name: automerge to master with label S:automerge and branch protection passing + - name: Automerge to master conditions: - base=master - label=S:automerge @@ -7,3 +7,4 @@ pull_request_rules: merge: method: squash strict: true + commit_message: title+body diff --git a/CHANGELOG.md b/CHANGELOG.md index 49968ad2d..156f397c2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,29 @@ # Changelog +## v0.2 +* Changed from the consensus way which the entire validator agrees to a part of the validators is elected as a voter to consensus. +The selected validator is called `voter` +* Base Tendermint version is v0.33.4. please see the [CHANGELOGS](./CHANGELOG_OF_TENDERMINT.md#v0.33.4) of the Tendermint. + +### BREAKING CHANGES: + +- State + - [state] [\#92](https://github.com/line/tendermint/pull/92) Add `VoterParams` to Genesis state + +- Go API + - [types] [\#83](https://github.com/line/tendermint/pull/83) Add `StakingPower` to `Validator` + - [consensus] [\#83](https://github.com/line/tendermint/pull/83) Change calculation of `VotingPower` + +### FEATURES: +- [rpc] [\#78](https://github.com/line/tendermint/pull/78) Add `Voters` rpc +- [consensus] [\#83](https://github.com/line/tendermint/pull/83) Selection voters using random sampling without replacement +- [consensus] [\#92](https://github.com/line/tendermint/pull/92) Apply calculation of voter count + +### BUG FIXES: +- [circleCI] [\#76](https://github.com/line/tendermint/pull/76) Fix contract test job of circleCI + + + ## v0.1 Base Tendermint v0.33.3. please see the [CHANGELOG](./CHANGELOG_OF_TENDERMINT.md#v0.33.3) diff --git a/CHANGELOG_OF_TENDERMINT.md b/CHANGELOG_OF_TENDERMINT.md index 5c860ad67..1bf8d8ec6 100644 --- a/CHANGELOG_OF_TENDERMINT.md +++ b/CHANGELOG_OF_TENDERMINT.md @@ -1,5 +1,49 @@ # Changelog +## v0.33.4 + +- Nodes are no longer guaranteed to contain all blocks up to the latest height. The ABCI app can now control which blocks to retain through the ABCI field `ResponseCommit.retain_height`, all blocks and associated data below this height will be removed. + +*April 21, 2020* + +Special thanks to external contributors on this release: @whylee259, @greg-szabo + +Friendly reminder, we have a [bug bounty program](https://hackerone.com/tendermint). + +### BREAKING CHANGES: + +- Go API + + - [lite2] [\#4616](https://github.com/tendermint/tendermint/pull/4616) Make `maxClockDrift` an option `Verify/VerifyAdjacent/VerifyNonAdjacent` now accept `maxClockDrift time.Duration` (@melekes). + - [rpc/client] [\#4628](https://github.com/tendermint/tendermint/pull/4628) Split out HTTP and local clients into `http` and `local` packages (@erikgrinaker). + +### FEATURES: + +- [abci] [\#4588](https://github.com/tendermint/tendermint/issues/4588) Add `ResponseCommit.retain_height` field, which will automatically remove blocks below this height. This bumps the ABCI version to 0.16.2 (@erikgrinaker). +- [cmd] [\#4665](https://github.com/tendermint/tendermint/pull/4665) New `tendermint completion` command to generate Bash/Zsh completion scripts (@alessio). +- [rpc] [\#4588](https://github.com/tendermint/tendermint/issues/4588) Add `/status` response fields for the earliest block available on the node (@erikgrinaker). +- [rpc] [\#4611](https://github.com/tendermint/tendermint/pull/4611) Add `codespace` to `ResultBroadcastTx` (@whylee259). + +### IMPROVEMENTS: + +- [all] [\#4608](https://github.com/tendermint/tendermint/pull/4608) Give reactors descriptive names when they're initialized (@tessr). +- [blockchain] [\#4588](https://github.com/tendermint/tendermint/issues/4588) Add `Base` to blockchain reactor P2P messages `StatusRequest` and `StatusResponse` (@erikgrinaker). +- [Docker] [\#4569](https://github.com/tendermint/tendermint/issues/4569) Default configuration added to docker image (you can still mount your own config the same way) (@greg-szabo). +- [example/kvstore] [\#4588](https://github.com/tendermint/tendermint/issues/4588) Add `RetainBlocks` option to control block retention (@erikgrinaker). +- [evidence] [\#4632](https://github.com/tendermint/tendermint/pull/4632) Inbound evidence checked if already existing (@cmwaters). +- [lite2] [\#4575](https://github.com/tendermint/tendermint/pull/4575) Use bisection for within-range verification (@cmwaters). +- [lite2] [\#4562](https://github.com/tendermint/tendermint/pull/4562) Cache headers when using bisection (@cmwaters). +- [p2p] [\#4548](https://github.com/tendermint/tendermint/pull/4548) Add ban list to address book (@cmwaters). +- [privval] [\#4534](https://github.com/tendermint/tendermint/issues/4534) Add `error` as a return value on`GetPubKey()` (@marbar3778). +- [p2p] [\#4621](https://github.com/tendermint/tendermint/issues/4621) Ban peers when messages are unsolicited or too frequent (@cmwaters). +- [rpc] [\#4703](https://github.com/tendermint/tendermint/pull/4703) Add `count` and `total` to `/validators` response (@melekes). +- [tools] [\#4615](https://github.com/tendermint/tendermint/issues/4615) Allow developers to use Docker to generate proto stubs, via `make proto-gen-docker` (@erikgrinaker). + +### BUG FIXES: + +- [rpc] [\#4568](https://github.com/tendermint/tendermint/issues/4568) Fix panic when `Subscribe` is called, but HTTP client is not running. `Subscribe`, `Unsubscribe(All)` methods return an error now (@melekes). + + ## v0.33.3 *April 6, 2020* @@ -208,7 +252,7 @@ subjectivity interface. Refer to the [spec](https://github.com/tendermint/spec/b - Apps - - [tm-bench] Removed tm-bench in favor of [tm-load-test](https://github.com/interchainio/tm-load-test) + - [tm-bench] Removed tm-bench in favor of [tm-load-test](https://github.com/informalsystems/tm-load-test) - Go API @@ -295,6 +339,67 @@ subjectivity interface. Refer to the [spec](https://github.com/tendermint/spec/b - [consensus/types] [\#4243](https://github.com/tendermint/tendermint/issues/4243) fix BenchmarkRoundStateDeepCopy panics (@cuonglm) - [rpc] [\#4256](https://github.com/tendermint/tendermint/issues/4256) Pass `outCapacity` to `eventBus#Subscribe` when subscribing using a local client +## v0.32.10 + +*April 6, 2020* + +This security release fixes: + +### Denial of Service 1 + +Tendermint 0.33.2 and earlier does not limit the number of P2P connection +requests. For each p2p connection, Tendermint allocates ~0.5MB. Even though +this memory is garbage collected once the connection is terminated (due to +duplicate IP or reaching a maximum number of inbound peers), temporary memory +spikes can lead to OOM (Out-Of-Memory) exceptions. + +Tendermint 0.33.3 (and 0.32.10) limits the total number of P2P incoming +connection requests to to `p2p.max_num_inbound_peers + +len(p2p.unconditional_peer_ids)`. + +Notes: + +- Tendermint does not rate limit P2P connection requests per IP (an attacker + can saturate all the inbound slots); +- Tendermint does not rate limit HTTP(S) requests. If you expose any RPC + endpoints to the public, please make sure to put in place some protection + (https://www.nginx.com/blog/rate-limiting-nginx/). We may implement this in + the future ([\#1696](https://github.com/tendermint/tendermint/issues/1696)). + +### Denial of Service 2 + +Tendermint 0.33.2 and earlier does not reclaim `activeID` of a peer after it's +removed in `Mempool` reactor. This does not happen all the time. It only +happens when a connection fails (for any reason) before the Peer is created and +added to all reactors. `RemovePeer` is therefore called before `AddPeer`, which +leads to always growing memory (`activeIDs` map). The `activeIDs` map has a +maximum size of 65535 and the node will panic if this map reaches the maximum. +An attacker can create a lot of connection attempts (exploiting Denial of +Service 1), which ultimately will lead to the node panicking. + +Tendermint 0.33.3 (and 0.32.10) claims `activeID` for a peer in `InitPeer`, +which is executed before `MConnection` is started. + +Notes: + +- `InitPeer` function was added to all reactors to combat a similar issue - + [\#3338](https://github.com/tendermint/tendermint/issues/3338); +- Denial of Service 2 is independent of Denial of Service 1 and can be executed + without it. + +**All clients are recommended to upgrade** + +Special thanks to [fudongbai](https://hackerone.com/fudongbai) for finding +and reporting this. + +Friendly reminder, we have a [bug bounty +program](https://hackerone.com/tendermint). + +### SECURITY: + +- [mempool] Reserve IDs in InitPeer instead of AddPeer (@tessr) +- [p2p] Limit the number of incoming connections (@melekes) + ## v0.32.9 _January, 9, 2020_ @@ -626,6 +731,69 @@ program](https://hackerone.com/tendermint). - [node] [\#3716](https://github.com/tendermint/tendermint/issues/3716) Fix a bug where `nil` is recorded as node's address - [node] [\#3741](https://github.com/tendermint/tendermint/issues/3741) Fix profiler blocking the entire node +*Tendermint 0.31 release series has reached End-Of-Life and is no longer supported.* + +## v0.31.12 + +*April 6, 2020* + +This security release fixes: + +### Denial of Service 1 + +Tendermint 0.33.2 and earlier does not limit the number of P2P connection requests. +For each p2p connection, Tendermint allocates ~0.5MB. Even though this +memory is garbage collected once the connection is terminated (due to duplicate +IP or reaching a maximum number of inbound peers), temporary memory spikes can +lead to OOM (Out-Of-Memory) exceptions. + +Tendermint 0.33.3, 0.32.10, and 0.31.12 limit the total number of P2P incoming +connection requests to to `p2p.max_num_inbound_peers + +len(p2p.unconditional_peer_ids)`. + +Notes: + +- Tendermint does not rate limit P2P connection requests per IP (an attacker + can saturate all the inbound slots); +- Tendermint does not rate limit HTTP(S) requests. If you expose any RPC + endpoints to the public, please make sure to put in place some protection + (https://www.nginx.com/blog/rate-limiting-nginx/). We may implement this in + the future ([\#1696](https://github.com/tendermint/tendermint/issues/1696)). + +### Denial of Service 2 + +Tendermint 0.33.2 and earlier does not reclaim `activeID` of a peer after it's +removed in `Mempool` reactor. This does not happen all the time. It only +happens when a connection fails (for any reason) before the Peer is created and +added to all reactors. `RemovePeer` is therefore called before `AddPeer`, which +leads to always growing memory (`activeIDs` map). The `activeIDs` map has a +maximum size of 65535 and the node will panic if this map reaches the maximum. +An attacker can create a lot of connection attempts (exploiting Denial of +Service 1), which ultimately will lead to the node panicking. + +Tendermint 0.33.3, 0.32.10, and 0.31.12 claim `activeID` for a peer in `InitPeer`, +which is executed before `MConnection` is started. + +Notes: + +- `InitPeer` function was added to all reactors to combat a similar issue - + [\#3338](https://github.com/tendermint/tendermint/issues/3338); +- Denial of Service 2 is independent of Denial of Service 1 and can be executed + without it. + +**All clients are recommended to upgrade** + +Special thanks to [fudongbai](https://hackerone.com/fudongbai) for finding +and reporting this. + +Friendly reminder, we have a [bug bounty +program](https://hackerone.com/tendermint). + +### SECURITY: + +- [mempool] Reserve IDs in InitPeer instead of AddPeer (@tessr) +- [p2p] Limit the number of incoming connections (@melekes) + ## v0.31.11 *October 18, 2019* diff --git a/CHANGELOG_PENDING.md b/CHANGELOG_PENDING.md index ad67141e3..32a2edbaa 100644 --- a/CHANGELOG_PENDING.md +++ b/CHANGELOG_PENDING.md @@ -1,16 +1,19 @@ -## v0.2 +## v0.3 \*\* ### BREAKING CHANGES: +- State + - CLI/RPC/Config - Apps +- P2P Protocol + - Go API -- Blockchain Protocol ### FEATURES: diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 6e6897ffa..a972e9c3b 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -12,7 +12,7 @@ landing changes in master. All work on the code base should be motivated by a [Github Issue](https://github.com/tendermint/tendermint/issues). [Search](https://github.com/tendermint/tendermint/issues?q=is%3Aopen+is%3Aissue+label%3A%22help+wanted%22) -is a good place start when looking for places to contribute. If you +is a good place start when looking for places to contribute. If you would like to work on an issue which already exists, please indicate so by leaving a comment. @@ -49,8 +49,9 @@ maintainers to take a look. ![Contributing flow](./docs/imgs/contributing.png) Each stage of the process is aimed at creating feedback cycles which align contributors and maintainers to make sure: -* Contributors don’t waste their time implementing/proposing features which won’t land in master. -* Maintainers have the necessary context in order to support and review contributions. + +- Contributors don’t waste their time implementing/proposing features which won’t land in master. +- Maintainers have the necessary context in order to support and review contributions. ## Forking @@ -102,9 +103,12 @@ specify exactly the dependency you want to update, eg. We use [Protocol Buffers](https://developers.google.com/protocol-buffers) along with [gogoproto](https://github.com/gogo/protobuf) to generate code for use across Tendermint Core. -For linting and checking breaking changes, we use [buf](https://buf.build/). If you would like to run linting and check if the changes you have made are breaking then you will have to install the needed dependencies with `make buf`. Then the linting cmd will be `make proto-lint` and the breaking changes check will be `make proto-check-breaking`. +For linting and checking breaking changes, we use [buf](https://buf.build/). If you would like to run linting and check if the changes you have made are breaking then you will need to have docker running locally. Then the linting cmd will be `make proto-lint` and the breaking changes check will be `make proto-check-breaking`. + +There are two ways to generate your proto stubs. -To generate new stubs based off of your changes you can run `make proto-gen` after installing `protoc` and gogoproto. +1. Use Docker, pull an image that will generate your proto stubs with no need to install anything. `make proto-gen-docker` +2. Run `make proto-gen` after installing `protoc` and gogoproto. ### Installation Instructions @@ -186,13 +190,29 @@ easy to reference the pull request where a change was introduced. - make changes and update the `CHANGELOG_PENDING.md` to record your change - before submitting a pull request, run `git rebase` on top of the latest `master` +When you have submitted a pull request label the pull request with either `R:minor`, if the change can be accepted in a minor release, or `R:major`, if the change is meant for a major release. + ### Pull Merge Procedure - ensure pull branch is based on a recent `master` - run `make test` to ensure that all tests pass -- squash merge pull request +- [squash](https://stackoverflow.com/questions/5189560/squash-my-last-x-commits-together-using-git) merge pull request - the `unstable` branch may be used to aggregate pull merges before fixing tests +### Git Commit Style + +We follow the [Go style guide on commit messages](https://tip.golang.org/doc/contribute.html#commit_messages). Write concise commits that start with the package name and have a description that finishes the sentence "This change modifies Tendermint to...". For example, + +\``` +cmd/debug: execute p.Signal only when p is not nil + +[potentially longer description in the body] + +Fixes #nnnn +\``` + +Each PR should have one commit once it lands on `master`; this can be accomplished by using the "squash and merge" button on Github. Be sure to edit your commit message, though! + ### Release Procedure #### Major Release @@ -207,20 +227,50 @@ easy to reference the pull request where a change was introduced. release, and add the github aliases of external contributors to the top of the changelog. To lookup an alias from an email, try `bash ./scripts/authors.sh ` - reset the `CHANGELOG_PENDING.md` - - bump versions + - bump the appropriate versions in `version.go` 4. push your changes with prepared release details to `vX.X` (this will trigger the release `vX.X.0`) 5. merge back to master (don't squash merge!) #### Minor Release -If there were no breaking changes and you need to create a release nonetheless, -the procedure is almost exactly like with a new release above. +Minor releases are done differently from major releases. Minor release pull requests should be labeled with `R:minor` if they are to be included. + +1. Checkout the last major release, `vX.X`. + + - `git checkout vX.X` + +2. Create a release candidate branch off the most recent major release with your upcoming version specified, `rc1/vX.X.x`, and push the branch. + + - `git checkout -b rc1/vX.X.x` + - `git push -u origin rc1/vX.X.x` + +3. Create a cherry-picking branch, and make a pull request into the release candidate. + + - `git checkout -b cherry-picks/rc1/vX.X.x` + + - This is for devs to approve the commits that are entering the release candidate. + - There may be merge conflicts. + +4. Begin cherry-picking. + + - `git cherry-pick {PR commit from master you wish to cherry pick}` + - Fix conflicts + - `git cherry-pick --continue` + - `git push cherry-picks/rc1/vX.X.x` + + > Once all commits are included and CI/tests have passed, then it is ready for a release. + +5. Create a release branch `release/vX.X.x` off the release candidate branch. + + - `git checkout -b release/vX.X.x` + - `git push -u origin release/vX.X.x` + > Note this Branch is protected once pushed, you will need admin help to make any change merges into the branch. + +6. Merge Commit the release branch into the latest major release branch `vX.X`, this will start the release process. -The only difference is that in the end you create a pull request against the existing `X.X` branch. -The branch name should match the release number you want to create. -Merging this PR will trigger the next release. -For example, if the PR is against an existing 0.34 branch which already contains a v0.34.0 release/tag, -the patch version will be incremented and the created release will be v0.34.1. +7. Create a Pull Request back to master with the CHANGELOG & version changes from the latest release. + - Remove all `R:minor` labels from the pull requests that were included in the release. + > Note: Do not merge the release branch into master. #### Backport Release diff --git a/DOCKER/Dockerfile b/DOCKER/Dockerfile index c3c186e05..e3a918749 100644 --- a/DOCKER/Dockerfile +++ b/DOCKER/Dockerfile @@ -22,17 +22,33 @@ RUN apk update && \ # Run the container with tmuser by default. (UID=100, GID=1000) USER tmuser -# Expose the data directory as a volume since there's mutable state in there -VOLUME [ $TMHOME ] - WORKDIR $TMHOME -# p2p and rpc port -EXPOSE 26656 26657 +# p2p, rpc and prometheus port +EXPOSE 26656 26657 26660 ENTRYPOINT ["/usr/bin/tendermint"] -CMD ["node", "--moniker=`hostname`"] +CMD ["node"] STOPSIGNAL SIGTERM ARG BINARY=tendermint COPY $BINARY /usr/bin/tendermint + +# Create default configuration for docker run. +RUN /usr/bin/tendermint init && \ + sed -i \ + -e 's/^proxy_app\s*=.*/proxy_app = "kvstore"/' \ + -e 's/^moniker\s*=.*/moniker = "dockernode"/' \ + -e 's/^addr_book_strict\s*=.*/addr_book_strict = false/' \ + -e 's/^timeout_commit\s*=.*/timeout_commit = "500ms"/' \ + -e 's/^index_all_tags\s*=.*/index_all_tags = true/' \ + -e 's,^laddr = "tcp://127.0.0.1:26657",laddr = "tcp://0.0.0.0:26657",' \ + -e 's/^prometheus\s*=.*/prometheus = true/' \ + $TMHOME/config/config.toml && \ + sed -i \ + -e 's/^\s*"chain_id":.*/ "chain_id": "dockerchain",/' \ + $TMHOME/config/genesis.json + +# Expose the data directory as a volume since there's mutable state in there +VOLUME [ $TMHOME ] + diff --git a/Makefile b/Makefile index d3d39436f..013d1255c 100644 --- a/Makefile +++ b/Makefile @@ -20,6 +20,7 @@ LIBSODIM_BUILD_TAGS='libsodium tendermint' LD_FLAGS = -X github.com/tendermint/tendermint/version.GitCommit=`git rev-parse --short=8 HEAD` -s -w BUILD_FLAGS = -mod=readonly -ldflags "$(LD_FLAGS)" HTTPS_GIT := https://github.com/tendermint/tendermint.git +DOCKER_BUF := docker run -v $(shell pwd):/workspace --workdir /workspace bufbuild/buf all: check build test install .PHONY: all @@ -68,16 +69,21 @@ proto-gen: @sh scripts/protocgen.sh .PHONY: proto-gen +proto-gen-docker: + @echo "Generating Protobuf files" + @docker run -v $(shell pwd):/workspace --workdir /workspace tendermintdev/docker-build-proto sh ./scripts/protocgen.sh +.PHONY: proto-gen-docker + proto-lint: - @buf check lint --error-format=json + @$(DOCKER_BUF) check lint --error-format=json .PHONY: proto-lint proto-check-breaking: - @buf check breaking --against-input ".git#branch=master" + @$(DOCKER_BUF) check breaking --against-input .git#branch=master .PHONY: proto-check-breaking proto-check-breaking-ci: - @buf check breaking --against-input "$(HTTPS_GIT)#branch=master" + @$(DOCKER_BUF) check breaking --against-input $(HTTPS_GIT)#branch=master .PHONY: proto-check-breaking-ci ############################################################################### @@ -165,9 +171,10 @@ clean_certs: ### Formatting, linting, and vetting ### ############################################################################### -fmt: - @go fmt ./... -.PHONY: fmt +format: + find . -name '*.go' -type f -not -path "*.git*" -not -name '*.pb.go' -not -name '*pb_test.go' | xargs gofmt -w -s + find . -name '*.go' -type f -not -path "*.git*" -not -name '*.pb.go' -not -name '*pb_test.go' | xargs goimports -w -local github.com/tendermint/tendermint +.PHONY: format lint: @echo "--> Running linter" diff --git a/README.md b/README.md index c9979aceb..835cf7a22 100644 --- a/README.md +++ b/README.md @@ -64,17 +64,16 @@ See the [install instructions](/docs/introduction/install.md) ## Contributing -Please abide by the [Code of Conduct](CODE_OF_CONDUCT.md) in all interactions, -and the [contributing guidelines](CONTRIBUTING.md) when submitting code. +Please abide by the [Code of Conduct](CODE_OF_CONDUCT.md) in all interactions. -Join the larger community on the [forum](https://forum.cosmos.network/) and the [chat](https://riot.im/app/#/room/#tendermint:matrix.org). +Before contributing to the project, please take a look at the [contributing guidelines](CONTRIBUTING.md) +and the [style guide](STYLE_GUIDE.md). -To learn more about the structure of the software, watch the [Developer -Sessions](/docs/DEV_SESSIONS.md) and read some [Architectural Decision -Records](https://github.com/tendermint/tendermint/tree/master/docs/architecture). +To get more active, Join the wider community at [Discord](https://discord.gg/AzefAFd) or jump onto the [Forum](https://forum.cosmos.network/). -Learn more by reading the code and comparing it to the -[specification](https://github.com/tendermint/spec). +Learn more by reading the code and the +[specifications](https://github.com/tendermint/spec) or watch the [Developer Sessions](/docs/DEV_SESSIONS.md) and read up on the +[Architectural Decision Records](https://github.com/tendermint/tendermint/tree/master/docs/architecture). ## Versioning @@ -118,7 +117,14 @@ data into the new chain. However, any bump in the PATCH version should be compatible with existing histories (if not please open an [issue](https://github.com/tendermint/tendermint/issues)). -For more information on upgrading, see [UPGRADING.md](./UPGRADING.md) +For more information on upgrading, see [UPGRADING.md](./UPGRADING.md). + +### Supported Versions + +Because we are a small core team, we only ship patch updates, including security updates, +to the most recent minor release and the second-most recent minor release. Consequently, +we strongly recommend keeping Tendermint up-to-date. Upgrading instructions can be found +in [UPGRADING.md](./UPGRADING.md). ## Resources @@ -133,7 +139,7 @@ hosted at: https://docs.tendermint.com/master/ ### Tools Benchmarking is provided by `tm-load-test`. -The code for `tm-load-test` can be found [here](https://github.com/interchainio/tm-load-test) this binary needs to be built separately. +The code for `tm-load-test` can be found [here](https://github.com/informalsystems/tm-load-test) this binary needs to be built separately. Additional documentation is found [here](/docs/tools). ### Sub-projects diff --git a/STYLE_GUIDE.md b/STYLE_GUIDE.md new file mode 100644 index 000000000..6d0deda6d --- /dev/null +++ b/STYLE_GUIDE.md @@ -0,0 +1,159 @@ +# Go Coding Style Guide + +In order to keep our code looking good with lots of programmers working on it, it helps to have a "style guide", so all +the code generally looks quite similar. This doesn't mean there is only one "right way" to write code, or even that this +standard is better than your style. But if we agree to a number of stylistic practices, it makes it much easier to read +and modify new code. Please feel free to make suggestions if there's something you would like to add or modify. + +We expect all contributors to be familiar with [Effective Go](https://golang.org/doc/effective_go.html) +(and it's recommended reading for all Go programmers anyways). Additionally, we generally agree with the suggestions + in [Uber's style guide](https://github.com/uber-go/guide/blob/master/style.md) and use that as a starting point. + + +## Code Structure + +Perhaps more key for code readability than good commenting is having the right structure. As a rule of thumb, try to write +in a logical order of importance, taking a little time to think how to order and divide the code such that someone could +scroll down and understand the functionality of it just as well as you do. A loose example of such order would be: +* Constants, global and package-level variables +* Main Struct +* Options (only if they are seen as critical to the struct else they should be placed in another file) +* Initialization / Start and stop of the service +* Msgs/Events +* Public Functions (In order of most important) +* Private/helper functions +* Auxiliary structs and function (can also be above private functions or in a separate file) + +## General + + * Use `gofmt` (or `goimport`) to format all code upon saving it. (If you use VIM, check out vim-go). + * Use a linter (see below) and generally try to keep the linter happy (where it makes sense). + * Think about documentation, and try to leave godoc comments, when it will help new developers. + * Every package should have a high level doc.go file to describe the purpose of that package, its main functions, and any other relevant information. + * `TODO` should not be used. If important enough should be recorded as an issue. + * `BUG` / `FIXME` should be used sparingly to guide future developers on some of the vulnerabilities of the code. + * `XXX` can be used in work-in-progress (prefixed with "WIP:" on github) branches but they must be removed before approving a PR. + * Applications (e.g. clis/servers) *should* panic on unexpected unrecoverable errors and print a stack trace. + +## Comments + + * Use a space after comment deliminter (ex. `// your comment`). + * Many comments are not sentences. These should begin with a lower case letter and end without a period. + * Conversely, sentences in comments should be sentenced-cased and end with a period. + +## Linters + +These must be applied to all (Go) repos. + + * [shellcheck](https://github.com/koalaman/shellcheck) + * [golangci-lint](https://github.com/golangci/golangci-lint) (covers all important linters) + - See the `.golangci.yml` file in each repo for linter configuration. + +## Various + + * Reserve "Save" and "Load" for long-running persistence operations. When parsing bytes, use "Encode" or "Decode". + * Maintain consistency across the codebase. + * Functions that return functions should have the suffix `Fn` + * Names should not [stutter](https://blog.golang.org/package-names). For example, a struct generally shouldn’t have + a field named after itself; e.g., this shouldn't occur: +``` golang +type middleware struct { + middleware Middleware +} +``` + * In comments, use "iff" to mean, "if and only if". + * Product names are capitalized, like "Tendermint", "Basecoin", "Protobuf", etc except in command lines: `tendermint --help` + * Acronyms are all capitalized, like "RPC", "gRPC", "API". "MyID", rather than "MyId". + * Prefer errors.New() instead of fmt.Errorf() unless you're actually using the format feature with arguments. + +## Importing Libraries + +Sometimes it's necessary to rename libraries to avoid naming collisions or ambiguity. + + * Use [goimports](https://godoc.org/golang.org/x/tools/cmd/goimports) + * Separate imports into blocks - one for the standard lib, one for external libs and one for application libs. + * Here are some common library labels for consistency: + - dbm "github.com/tendermint/tm-db" + - tmcmd "github.com/tendermint/tendermint/cmd/tendermint/commands" + - tmcfg "github.com/tendermint/tendermint/config/tendermint" + - tmtypes "github.com/tendermint/tendermint/types" + * Never use anonymous imports (the `.`), for example, `tmlibs/common` or anything else. + * When importing a pkg from the `tendermint/libs` directory, prefix the pkg alias with tm. + - tmbits "github.com/tendermint/tendermint/libs/bits" + * tip: Use the `_` library import to import a library for initialization effects (side effects) + +## Dependencies + + * Dependencies should be pinned by a release tag, or specific commit, to avoid breaking `go get` when external dependencies are updated. + * Refer to the [contributing](CONTRIBUTING.md) document for more details + +## Testing + + * The first rule of testing is: we add tests to our code + * The second rule of testing is: we add tests to our code + * For Golang testing: + * Make use of table driven testing where possible and not-cumbersome + - [Inspiration](https://dave.cheney.net/2013/06/09/writing-table-driven-tests-in-go) + * Make use of [assert](https://godoc.org/github.com/stretchr/testify/assert) and [require](https://godoc.org/github.com/stretchr/testify/require) + * When using mocks, it is recommended to use Testify [mock] (https://pkg.go.dev/github.com/stretchr/testify/mock + ) along with [Mockery](https://github.com/vektra/mockery) for autogeneration + +## Errors + + * Ensure that errors are concise, clear and traceable. + * Use stdlib errors package. + * For wrapping errors, use `fmt.Errorf()` with `%w`. + * Panic is appropriate when an internal invariant of a system is broken, while all other cases (in particular, + incorrect or invalid usage) should return errors. + +## Config + + * Currently the TOML filetype is being used for config files + * A good practice is to store per-user config files under `~/.[yourAppName]/config.toml` + +## CLI + + * When implementing a CLI use [Cobra](https://github.com/spf13/cobra) and [Viper](https://github.com/spf13/viper). + * Helper messages for commands and flags must be all lowercase. + * Instead of using pointer flags (eg. `FlagSet().StringVar`) use Viper to retrieve flag values (eg. `viper.GetString`) + - The flag key used when setting and getting the flag should always be stored in a + variable taking the form `FlagXxx` or `flagXxx`. + - Flag short variable descriptions should always start with a lower case character as to remain consistent with + the description provided in the default `--help` flag. + +## Version + + * Every repo should have a version/version.go file that mimics the Tendermint Core repo + * We read the value of the constant version in our build scripts and hence it has to be a string + +## Non-Go Code + + * All non-Go code (`*.proto`, `Makefile`, `*.sh`), where there is no common + agreement on style, should be formatted according to + [EditorConfig](http://editorconfig.org/) config: + + ``` + # top-most EditorConfig file + root = true + + # Unix-style newlines with a newline ending every file + [*] + charset = utf-8 + end_of_line = lf + insert_final_newline = true + trim_trailing_whitespace = true + + [Makefile] + indent_style = tab + + [*.sh] + indent_style = tab + + [*.proto] + indent_style = space + indent_size = 2 + ``` + + Make sure the file above (`.editorconfig`) are in the root directory of your + repo and you have a [plugin for your + editor](http://editorconfig.org/#download) installed. diff --git a/UPGRADING.md b/UPGRADING.md index e395b9fb1..5410186af 100644 --- a/UPGRADING.md +++ b/UPGRADING.md @@ -7,6 +7,17 @@ a newer version of Tendermint Core. + +## v0.33.4 + +### Go API + +- `rpc/client` HTTP and local clients have been moved into `http` and `local` subpackages, and their constructors have been renamed to `New()`. + +### Protobuf Changes + +When upgrading to version 0.33.4 you will have to fetch the `third_party` directory along with the updated proto files. + ## v0.33.1 This release is compatible with the previous version. The only change that is required is if you are fetching the protobuf files for application use. @@ -15,6 +26,7 @@ This release is compatible with the previous version. The only change that is re When upgrading to version 0.33.1 you will have to fetch the `third_party` directory along with the updated proto files. + ## v0.33.0 This release is not compatible with previous blockchains due to commit becoming signatures only and fields in the header have been removed. diff --git a/abci/example/kvstore/kvstore.go b/abci/example/kvstore/kvstore.go index 4e7449938..42f00231f 100644 --- a/abci/example/kvstore/kvstore.go +++ b/abci/example/kvstore/kvstore.go @@ -6,11 +6,12 @@ import ( "encoding/json" "fmt" + dbm "github.com/tendermint/tm-db" + "github.com/tendermint/tendermint/abci/example/code" "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/kv" "github.com/tendermint/tendermint/version" - dbm "github.com/tendermint/tm-db" ) var ( @@ -63,7 +64,8 @@ var _ types.Application = (*Application)(nil) type Application struct { types.BaseApplication - state State + state State + RetainBlocks int64 // blocks to retain after commit (via ResponseCommit.RetainHeight) } func NewApplication() *Application { @@ -118,7 +120,12 @@ func (app *Application) Commit() types.ResponseCommit { app.state.AppHash = appHash app.state.Height++ saveState(app.state) - return types.ResponseCommit{Data: appHash} + + resp := types.ResponseCommit{Data: appHash} + if app.RetainBlocks > 0 && app.state.Height >= app.RetainBlocks { + resp.RetainHeight = app.state.Height - app.RetainBlocks + 1 + } + return resp } // Returns an associated value or nil if missing. diff --git a/abci/example/kvstore/persistent_kvstore.go b/abci/example/kvstore/persistent_kvstore.go index 0c5498bee..fffc617be 100644 --- a/abci/example/kvstore/persistent_kvstore.go +++ b/abci/example/kvstore/persistent_kvstore.go @@ -7,12 +7,13 @@ import ( "strconv" "strings" + dbm "github.com/tendermint/tm-db" + "github.com/tendermint/tendermint/abci/example/code" "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/libs/log" tmtypes "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) const ( diff --git a/abci/types/types.pb.go b/abci/types/types.pb.go index 6c18cfbd1..51ff4aedd 100644 --- a/abci/types/types.pb.go +++ b/abci/types/types.pb.go @@ -1865,6 +1865,7 @@ func (m *ResponseEndBlock) GetEvents() []Event { type ResponseCommit struct { // reserve 1 Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + RetainHeight int64 `protobuf:"varint,3,opt,name=retain_height,json=retainHeight,proto3" json:"retain_height,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1910,6 +1911,13 @@ func (m *ResponseCommit) GetData() []byte { return nil } +func (m *ResponseCommit) GetRetainHeight() int64 { + if m != nil { + return m.RetainHeight + } + return 0 +} + // ConsensusParams contains all consensus-relevant parameters // that can be adjusted by the abci app type ConsensusParams struct { @@ -2960,155 +2968,156 @@ func init() { proto.RegisterFile("abci/types/types.proto", fileDescriptor_9f1eaa func init() { golang_proto.RegisterFile("abci/types/types.proto", fileDescriptor_9f1eaa49c51fa1ac) } var fileDescriptor_9f1eaa49c51fa1ac = []byte{ - // 2370 bytes of a gzipped FileDescriptorProto + // 2386 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe4, 0x59, 0x4d, 0x90, 0x1b, 0x47, - 0x15, 0xde, 0xd1, 0x6a, 0x57, 0xd2, 0xd3, 0xee, 0x4a, 0x69, 0x3b, 0x89, 0x22, 0x92, 0x5d, 0xd7, - 0xf8, 0x6f, 0x9d, 0x04, 0x6d, 0x58, 0x2a, 0x54, 0x8c, 0x5d, 0xa1, 0x56, 0x6b, 0x07, 0xa9, 0x62, - 0x3b, 0x9b, 0xb1, 0xbd, 0x18, 0xa8, 0xca, 0x54, 0x4b, 0xd3, 0x96, 0xa6, 0x56, 0x9a, 0x99, 0xcc, - 0xb4, 0x64, 0x89, 0xe2, 0x4e, 0x51, 0xc5, 0x81, 0x0b, 0x55, 0x5c, 0xb8, 0x73, 0xe4, 0xc0, 0x21, - 0x47, 0x8e, 0x39, 0x70, 0xe0, 0xc0, 0xd9, 0xc0, 0xc2, 0x89, 0xca, 0x91, 0xa2, 0x38, 0x52, 0xfd, - 0xba, 0xe7, 0x4f, 0x2b, 0xad, 0xc6, 0xc1, 0x37, 0x2e, 0xd2, 0x74, 0xf7, 0x7b, 0xaf, 0xbb, 0x5f, - 0xbf, 0x7e, 0xdf, 0x7b, 0xaf, 0xe1, 0x35, 0xda, 0xe9, 0xda, 0x7b, 0x7c, 0xea, 0xb1, 0x40, 0xfe, - 0x36, 0x3c, 0xdf, 0xe5, 0x2e, 0x79, 0x95, 0x33, 0xc7, 0x62, 0xfe, 0xd0, 0x76, 0x78, 0x43, 0x90, - 0x34, 0x70, 0xb0, 0x7e, 0x8d, 0xf7, 0x6d, 0xdf, 0x32, 0x3d, 0xea, 0xf3, 0xe9, 0x1e, 0x52, 0xee, - 0xf5, 0xdc, 0x9e, 0x1b, 0x7f, 0x49, 0xf6, 0x7a, 0xbd, 0xeb, 0x4f, 0x3d, 0xee, 0xee, 0x0d, 0x99, - 0x7f, 0x32, 0x60, 0xea, 0x4f, 0x8d, 0x5d, 0x18, 0xd8, 0x9d, 0x60, 0xef, 0x64, 0x9c, 0x9c, 0xaf, - 0xbe, 0xd3, 0x73, 0xdd, 0xde, 0x80, 0x49, 0x99, 0x9d, 0xd1, 0xd3, 0x3d, 0x6e, 0x0f, 0x59, 0xc0, - 0xe9, 0xd0, 0x53, 0x04, 0xdb, 0xb3, 0x04, 0xd6, 0xc8, 0xa7, 0xdc, 0x76, 0x1d, 0x39, 0xae, 0xff, - 0x7b, 0x0d, 0x0a, 0x06, 0xfb, 0x7c, 0xc4, 0x02, 0x4e, 0x3e, 0x80, 0x3c, 0xeb, 0xf6, 0xdd, 0x5a, - 0xee, 0x92, 0xb6, 0x5b, 0xde, 0xd7, 0x1b, 0x73, 0xf7, 0xd2, 0x50, 0xd4, 0x77, 0xbb, 0x7d, 0xb7, - 0xb5, 0x62, 0x20, 0x07, 0xb9, 0x05, 0x6b, 0x4f, 0x07, 0xa3, 0xa0, 0x5f, 0x5b, 0x45, 0xd6, 0xcb, - 0xe7, 0xb3, 0x7e, 0x24, 0x48, 0x5b, 0x2b, 0x86, 0xe4, 0x11, 0xd3, 0xda, 0xce, 0x53, 0xb7, 0x96, - 0xcf, 0x32, 0x6d, 0xdb, 0x79, 0x8a, 0xd3, 0x0a, 0x0e, 0xd2, 0x02, 0x08, 0x18, 0x37, 0x5d, 0x4f, - 0x6c, 0xa8, 0xb6, 0x86, 0xfc, 0xd7, 0xcf, 0xe7, 0x7f, 0xc8, 0xf8, 0x27, 0x48, 0xde, 0x5a, 0x31, - 0x4a, 0x41, 0xd8, 0x10, 0x92, 0x6c, 0xc7, 0xe6, 0x66, 0xb7, 0x4f, 0x6d, 0xa7, 0xb6, 0x9e, 0x45, - 0x52, 0xdb, 0xb1, 0xf9, 0xa1, 0x20, 0x17, 0x92, 0xec, 0xb0, 0x21, 0x54, 0xf1, 0xf9, 0x88, 0xf9, - 0xd3, 0x5a, 0x21, 0x8b, 0x2a, 0x3e, 0x15, 0xa4, 0x42, 0x15, 0xc8, 0x43, 0x3e, 0x86, 0x72, 0x87, - 0xf5, 0x6c, 0xc7, 0xec, 0x0c, 0xdc, 0xee, 0x49, 0xad, 0x88, 0x22, 0x76, 0xcf, 0x17, 0xd1, 0x14, - 0x0c, 0x4d, 0x41, 0xdf, 0x5a, 0x31, 0xa0, 0x13, 0xb5, 0x48, 0x13, 0x8a, 0xdd, 0x3e, 0xeb, 0x9e, - 0x98, 0x7c, 0x52, 0x2b, 0xa1, 0xa4, 0xab, 0xe7, 0x4b, 0x3a, 0x14, 0xd4, 0x8f, 0x26, 0xad, 0x15, - 0xa3, 0xd0, 0x95, 0x9f, 0x42, 0x2f, 0x16, 0x1b, 0xd8, 0x63, 0xe6, 0x0b, 0x29, 0x17, 0xb2, 0xe8, - 0xe5, 0x8e, 0xa4, 0x47, 0x39, 0x25, 0x2b, 0x6c, 0x90, 0xbb, 0x50, 0x62, 0x8e, 0xa5, 0x36, 0x56, - 0x46, 0x41, 0xd7, 0x96, 0x58, 0x98, 0x63, 0x85, 0xdb, 0x2a, 0x32, 0xf5, 0x4d, 0x3e, 0x84, 0xf5, - 0xae, 0x3b, 0x1c, 0xda, 0xbc, 0xb6, 0x81, 0x32, 0xae, 0x2c, 0xd9, 0x12, 0xd2, 0xb6, 0x56, 0x0c, - 0xc5, 0xd5, 0x2c, 0xc0, 0xda, 0x98, 0x0e, 0x46, 0x4c, 0xbf, 0x0e, 0xe5, 0x84, 0x25, 0x93, 0x1a, - 0x14, 0x86, 0x2c, 0x08, 0x68, 0x8f, 0xd5, 0xb4, 0x4b, 0xda, 0x6e, 0xc9, 0x08, 0x9b, 0xfa, 0x16, - 0x6c, 0x24, 0xed, 0x56, 0x1f, 0x46, 0x8c, 0xc2, 0x16, 0x05, 0xe3, 0x98, 0xf9, 0x81, 0x30, 0x40, - 0xc5, 0xa8, 0x9a, 0xe4, 0x32, 0x6c, 0xe2, 0x6e, 0xcd, 0x70, 0x5c, 0xdc, 0xab, 0xbc, 0xb1, 0x81, - 0x9d, 0xc7, 0x8a, 0x68, 0x07, 0xca, 0xde, 0xbe, 0x17, 0x91, 0xac, 0x22, 0x09, 0x78, 0xfb, 0x9e, - 0x22, 0xd0, 0xbf, 0x0b, 0xd5, 0x59, 0xd3, 0x25, 0x55, 0x58, 0x3d, 0x61, 0x53, 0x35, 0x9f, 0xf8, - 0x24, 0x17, 0xd5, 0xb6, 0x70, 0x8e, 0x92, 0xa1, 0xf6, 0xf8, 0xbb, 0x5c, 0xc4, 0x1c, 0x59, 0xab, - 0xb8, 0x6e, 0xc2, 0x49, 0x20, 0x77, 0x79, 0xbf, 0xde, 0x90, 0x0e, 0xa2, 0x11, 0x3a, 0x88, 0xc6, - 0xa3, 0xd0, 0x83, 0x34, 0x8b, 0x5f, 0x3e, 0xdf, 0x59, 0xf9, 0xe5, 0x5f, 0x76, 0x34, 0x03, 0x39, - 0xc8, 0x1b, 0xc2, 0xa0, 0xa8, 0xed, 0x98, 0xb6, 0xa5, 0xe6, 0x29, 0x60, 0xbb, 0x6d, 0x91, 0x4f, - 0xa1, 0xda, 0x75, 0x9d, 0x80, 0x39, 0xc1, 0x28, 0x10, 0x6e, 0x8e, 0x0e, 0x03, 0xe5, 0x0b, 0x16, - 0x1d, 0xf2, 0x61, 0x48, 0x7e, 0x84, 0xd4, 0x46, 0xa5, 0x9b, 0xee, 0x20, 0xf7, 0x00, 0xc6, 0x74, - 0x60, 0x5b, 0x94, 0xbb, 0x7e, 0x50, 0xcb, 0x5f, 0x5a, 0x3d, 0x47, 0xd8, 0x71, 0x48, 0xf8, 0xd8, - 0xb3, 0x28, 0x67, 0xcd, 0xbc, 0x58, 0xb9, 0x91, 0xe0, 0x27, 0xd7, 0xa0, 0x42, 0x3d, 0xcf, 0x0c, - 0x38, 0xe5, 0xcc, 0xec, 0x4c, 0x39, 0x0b, 0xd0, 0x5f, 0x6c, 0x18, 0x9b, 0xd4, 0xf3, 0x1e, 0x8a, - 0xde, 0xa6, 0xe8, 0xd4, 0xad, 0xe8, 0xb4, 0xf1, 0x6a, 0x12, 0x02, 0x79, 0x8b, 0x72, 0x8a, 0xda, - 0xda, 0x30, 0xf0, 0x5b, 0xf4, 0x79, 0x94, 0xf7, 0x95, 0x0e, 0xf0, 0x9b, 0xbc, 0x06, 0xeb, 0x7d, - 0x66, 0xf7, 0xfa, 0x1c, 0xb7, 0xbd, 0x6a, 0xa8, 0x96, 0x38, 0x18, 0xcf, 0x77, 0xc7, 0x0c, 0xbd, - 0x5b, 0xd1, 0x90, 0x0d, 0xfd, 0x57, 0x39, 0x78, 0xe5, 0xcc, 0xf5, 0x15, 0x72, 0xfb, 0x34, 0xe8, - 0x87, 0x73, 0x89, 0x6f, 0x72, 0x4b, 0xc8, 0xa5, 0x16, 0xf3, 0x95, 0x57, 0x7e, 0x6b, 0x81, 0x06, - 0x5a, 0x48, 0xa4, 0x36, 0xae, 0x58, 0xc8, 0x63, 0xa8, 0x0e, 0x68, 0xc0, 0x4d, 0x69, 0xfb, 0x26, - 0x7a, 0xd9, 0xd5, 0x73, 0x3d, 0xc1, 0x3d, 0x1a, 0xde, 0x19, 0x61, 0xdc, 0x4a, 0xdc, 0xd6, 0x20, - 0xd5, 0x4b, 0x9e, 0xc0, 0xc5, 0xce, 0xf4, 0x27, 0xd4, 0xe1, 0xb6, 0xc3, 0xcc, 0x33, 0x67, 0xb4, - 0xb3, 0x40, 0xf4, 0xdd, 0xb1, 0x6d, 0x31, 0xa7, 0x1b, 0x1e, 0xce, 0x85, 0x48, 0x44, 0x74, 0x78, - 0x81, 0xfe, 0x04, 0xb6, 0xd2, 0xbe, 0x88, 0x6c, 0x41, 0x8e, 0x4f, 0x94, 0x46, 0x72, 0x7c, 0x42, - 0xbe, 0x03, 0x79, 0x21, 0x0e, 0xb5, 0xb1, 0xb5, 0x10, 0x2c, 0x14, 0xf7, 0xa3, 0xa9, 0xc7, 0x0c, - 0xa4, 0xd7, 0xf5, 0xe8, 0x26, 0x44, 0xfe, 0x69, 0x56, 0xb6, 0x7e, 0x03, 0x2a, 0x33, 0xae, 0x27, - 0x71, 0xac, 0x5a, 0xf2, 0x58, 0xf5, 0x0a, 0x6c, 0xa6, 0x3c, 0x8c, 0xfe, 0xc7, 0x75, 0x28, 0x1a, - 0x2c, 0xf0, 0x84, 0x11, 0x93, 0x16, 0x94, 0xd8, 0xa4, 0xcb, 0x24, 0x2c, 0x69, 0x4b, 0x9c, 0xb8, - 0xe4, 0xb9, 0x1b, 0xd2, 0x0b, 0xaf, 0x19, 0x31, 0x93, 0x9b, 0x29, 0x48, 0xbe, 0xbc, 0x4c, 0x48, - 0x12, 0x93, 0x6f, 0xa7, 0x31, 0xf9, 0xca, 0x12, 0xde, 0x19, 0x50, 0xbe, 0x99, 0x02, 0xe5, 0x65, - 0x13, 0xa7, 0x50, 0xb9, 0x3d, 0x07, 0x95, 0x97, 0x6d, 0x7f, 0x01, 0x2c, 0xb7, 0xe7, 0xc0, 0xf2, - 0xee, 0xd2, 0xb5, 0xcc, 0xc5, 0xe5, 0xdb, 0x69, 0x5c, 0x5e, 0xa6, 0x8e, 0x19, 0x60, 0xbe, 0x37, - 0x0f, 0x98, 0x6f, 0x2c, 0x91, 0xb1, 0x10, 0x99, 0x0f, 0xcf, 0x20, 0xf3, 0xb5, 0x25, 0xa2, 0xe6, - 0x40, 0x73, 0x3b, 0x05, 0xcd, 0x90, 0x49, 0x37, 0x0b, 0xb0, 0xf9, 0xa3, 0xb3, 0xd8, 0x7c, 0x7d, - 0x99, 0xa9, 0xcd, 0x03, 0xe7, 0xef, 0xcd, 0x80, 0xf3, 0xd5, 0x65, 0xbb, 0x5a, 0x88, 0xce, 0x37, - 0x84, 0x7f, 0x9c, 0xb9, 0x19, 0xc2, 0x97, 0x32, 0xdf, 0x77, 0x7d, 0x05, 0x7c, 0xb2, 0xa1, 0xef, - 0x0a, 0x8f, 0x1d, 0xdb, 0xff, 0x39, 0x48, 0x8e, 0x97, 0x36, 0x61, 0xed, 0xfa, 0x17, 0x5a, 0xcc, - 0x8b, 0x9e, 0x2d, 0xe9, 0xed, 0x4b, 0xca, 0xdb, 0x27, 0x00, 0x3e, 0x97, 0x06, 0xf8, 0x1d, 0x28, - 0x0b, 0x4c, 0x99, 0xc1, 0x6e, 0xea, 0x85, 0xd8, 0x4d, 0xde, 0x86, 0x57, 0xd0, 0xff, 0xca, 0x30, - 0x40, 0x39, 0x92, 0x3c, 0x3a, 0x92, 0x8a, 0x18, 0x90, 0x1a, 0x94, 0x40, 0xf1, 0x4d, 0xb8, 0x90, - 0xa0, 0x15, 0x72, 0x11, 0x0b, 0x24, 0x48, 0x55, 0x23, 0xea, 0x03, 0xcf, 0x6b, 0xd1, 0xa0, 0xaf, - 0xdf, 0x8f, 0x15, 0x14, 0xc7, 0x05, 0x04, 0xf2, 0x5d, 0xd7, 0x92, 0xfb, 0xde, 0x34, 0xf0, 0x5b, - 0xc4, 0x0a, 0x03, 0xb7, 0x87, 0x8b, 0x2b, 0x19, 0xe2, 0x53, 0x50, 0x45, 0x57, 0xbb, 0x24, 0xef, - 0xac, 0xfe, 0x7b, 0x2d, 0x96, 0x17, 0x87, 0x0a, 0xf3, 0x50, 0x5d, 0x7b, 0x99, 0xa8, 0x9e, 0xfb, - 0xdf, 0x50, 0x5d, 0xff, 0x97, 0x16, 0x1f, 0x69, 0x84, 0xd7, 0x5f, 0x4f, 0x05, 0xc2, 0xba, 0x6c, - 0xc7, 0x62, 0x13, 0x54, 0xf9, 0xaa, 0x21, 0x1b, 0x61, 0xa8, 0xb5, 0x8e, 0xc7, 0x90, 0x0e, 0xb5, - 0x0a, 0xd8, 0x27, 0x1b, 0xe4, 0x7d, 0xc4, 0x79, 0xf7, 0xa9, 0x72, 0x0d, 0x29, 0x10, 0x94, 0x49, - 0x5d, 0x43, 0x65, 0x73, 0x47, 0x82, 0xcc, 0x90, 0xd4, 0x09, 0x7c, 0x29, 0xa5, 0xc2, 0x86, 0x37, - 0xa1, 0x24, 0x96, 0x1e, 0x78, 0xb4, 0xcb, 0xf0, 0x6e, 0x97, 0x8c, 0xb8, 0x43, 0xb7, 0x80, 0x9c, - 0xf5, 0x31, 0xe4, 0x01, 0xac, 0xb3, 0x31, 0x73, 0xb8, 0x38, 0x23, 0xa1, 0xd6, 0x37, 0x17, 0x02, - 0x31, 0x73, 0x78, 0xb3, 0x26, 0x94, 0xf9, 0xcf, 0xe7, 0x3b, 0x55, 0xc9, 0xf3, 0xae, 0x3b, 0xb4, - 0x39, 0x1b, 0x7a, 0x7c, 0x6a, 0x28, 0x29, 0xfa, 0xcf, 0x72, 0x02, 0x0f, 0x53, 0xfe, 0x67, 0xae, - 0x7a, 0xc3, 0x4b, 0x93, 0x4b, 0x84, 0x48, 0xd9, 0x54, 0xfe, 0x16, 0x40, 0x8f, 0x06, 0xe6, 0x33, - 0xea, 0x70, 0x66, 0x29, 0xbd, 0x97, 0x7a, 0x34, 0xf8, 0x01, 0x76, 0x88, 0x78, 0x53, 0x0c, 0x8f, - 0x02, 0x66, 0xe1, 0x01, 0xac, 0x1a, 0x85, 0x1e, 0x0d, 0x1e, 0x07, 0xcc, 0x4a, 0xec, 0xb5, 0xf0, - 0x32, 0xf6, 0x9a, 0xd6, 0x77, 0x71, 0x56, 0xdf, 0x3f, 0xcf, 0xc5, 0xb7, 0x23, 0x0e, 0x1f, 0xfe, - 0x3f, 0x75, 0xf1, 0x1b, 0xcc, 0x29, 0xd2, 0x20, 0x40, 0x7e, 0x08, 0xaf, 0x44, 0xb7, 0xd2, 0x1c, - 0xe1, 0x6d, 0x0d, 0xad, 0xf0, 0xc5, 0x2e, 0x77, 0x75, 0x9c, 0xee, 0x0e, 0xc8, 0x67, 0xf0, 0xfa, - 0x8c, 0x0f, 0x8a, 0x26, 0xc8, 0xbd, 0x90, 0x2b, 0x7a, 0x35, 0xed, 0x8a, 0x42, 0xf9, 0xb1, 0xf6, - 0x56, 0x5f, 0xca, 0xad, 0xb9, 0x22, 0x42, 0xd8, 0x24, 0xbc, 0xcd, 0xb3, 0x09, 0xfd, 0xcf, 0x1a, - 0x54, 0x66, 0x16, 0x48, 0x3e, 0x80, 0x35, 0x89, 0xc0, 0xda, 0xb9, 0x85, 0x10, 0xd4, 0xb8, 0xda, - 0x93, 0x64, 0x20, 0x07, 0x50, 0x64, 0x2a, 0xba, 0x56, 0x4a, 0xb9, 0xba, 0x24, 0x08, 0x57, 0xfc, - 0x11, 0x1b, 0xb9, 0x03, 0xa5, 0x48, 0xf5, 0x4b, 0x32, 0xb7, 0xe8, 0xe4, 0x94, 0x90, 0x98, 0x51, - 0x3f, 0x84, 0x72, 0x62, 0x79, 0xe4, 0x1b, 0x50, 0x1a, 0xd2, 0x89, 0x4a, 0xb7, 0x64, 0x00, 0x5d, - 0x1c, 0xd2, 0x09, 0x66, 0x5a, 0xe4, 0x75, 0x28, 0x88, 0xc1, 0x1e, 0x95, 0x07, 0xb9, 0x6a, 0xac, - 0x0f, 0xe9, 0xe4, 0xfb, 0x34, 0xd0, 0x7f, 0xa1, 0xc1, 0x56, 0x7a, 0x9d, 0xe4, 0x1d, 0x20, 0x82, - 0x96, 0xf6, 0x98, 0xe9, 0x8c, 0x86, 0x12, 0x23, 0x43, 0x89, 0x95, 0x21, 0x9d, 0x1c, 0xf4, 0xd8, - 0x83, 0xd1, 0x10, 0xa7, 0x0e, 0xc8, 0x7d, 0xa8, 0x86, 0xc4, 0x61, 0xb1, 0x4b, 0x69, 0xe5, 0x8d, - 0x33, 0xc9, 0xee, 0x1d, 0x45, 0x20, 0x73, 0xdd, 0x5f, 0x8b, 0x5c, 0x77, 0x4b, 0xca, 0x0b, 0x47, - 0xf4, 0xf7, 0xa1, 0x32, 0xb3, 0x63, 0xa2, 0xc3, 0xa6, 0x37, 0xea, 0x98, 0x27, 0x6c, 0x6a, 0xa2, - 0x4a, 0xd0, 0xd4, 0x4b, 0x46, 0xd9, 0x1b, 0x75, 0x3e, 0x66, 0x53, 0x91, 0x75, 0x04, 0x7a, 0x17, - 0xb6, 0xd2, 0xc9, 0x94, 0x00, 0x0e, 0xdf, 0x1d, 0x39, 0x16, 0xae, 0x7b, 0xcd, 0x90, 0x0d, 0x72, - 0x0b, 0xd6, 0xc6, 0xae, 0xb4, 0xe6, 0xf3, 0xb2, 0xa7, 0x63, 0x97, 0xb3, 0x44, 0x4a, 0x26, 0x79, - 0xf4, 0x00, 0xd6, 0xd0, 0x2e, 0x85, 0x8d, 0x61, 0x5a, 0xa4, 0x02, 0x17, 0xf1, 0x4d, 0x8e, 0x01, - 0x28, 0xe7, 0xbe, 0xdd, 0x19, 0xc5, 0xe2, 0x6b, 0x49, 0xf1, 0x03, 0xbb, 0x13, 0x34, 0x4e, 0xc6, - 0x8d, 0x23, 0x6a, 0xfb, 0xcd, 0x37, 0x95, 0x65, 0x5f, 0x8c, 0x79, 0x12, 0xd6, 0x9d, 0x90, 0xa4, - 0x7f, 0x95, 0x87, 0x75, 0x99, 0x6e, 0x92, 0x0f, 0xd3, 0xc5, 0x8f, 0xf2, 0xfe, 0xf6, 0xa2, 0xe5, - 0x4b, 0x2a, 0xb5, 0xfa, 0x28, 0x82, 0xba, 0x36, 0x5b, 0x51, 0x68, 0x96, 0x4f, 0x9f, 0xef, 0x14, - 0x30, 0xfa, 0x68, 0xdf, 0x89, 0xcb, 0x0b, 0x8b, 0xb2, 0xeb, 0xb0, 0x96, 0x91, 0x7f, 0xe1, 0x5a, - 0x46, 0x0b, 0x36, 0x13, 0xe1, 0x96, 0x6d, 0xa9, 0x3c, 0x65, 0xfb, 0xbc, 0x4b, 0xd7, 0xbe, 0xa3, - 0xd6, 0x5f, 0x8e, 0xc2, 0xb1, 0xb6, 0x45, 0x76, 0xd3, 0x49, 0x36, 0x46, 0x6d, 0x32, 0x5c, 0x48, - 0xe4, 0xcd, 0x22, 0x66, 0x13, 0xd7, 0x41, 0x5c, 0x7e, 0x49, 0x22, 0xa3, 0x87, 0xa2, 0xe8, 0xc0, - 0xc1, 0xeb, 0x50, 0x89, 0x03, 0x1b, 0x49, 0x52, 0x94, 0x52, 0xe2, 0x6e, 0x24, 0x7c, 0x0f, 0x2e, - 0x3a, 0x6c, 0xc2, 0xcd, 0x59, 0xea, 0x12, 0x52, 0x13, 0x31, 0x76, 0x9c, 0xe6, 0xb8, 0x0a, 0x5b, - 0xb1, 0x0b, 0x45, 0x5a, 0x90, 0xa5, 0x8f, 0xa8, 0x17, 0xc9, 0xde, 0x80, 0x62, 0x14, 0x76, 0x96, - 0x91, 0xa0, 0x40, 0x65, 0xb4, 0x19, 0x05, 0xb2, 0x3e, 0x0b, 0x46, 0x03, 0xae, 0x84, 0x6c, 0x20, - 0x0d, 0x06, 0xb2, 0x86, 0xec, 0x47, 0xda, 0xcb, 0xb0, 0x19, 0x7a, 0x15, 0x49, 0xb7, 0x89, 0x74, - 0x1b, 0x61, 0x27, 0x12, 0xdd, 0x80, 0xaa, 0xe7, 0xbb, 0x9e, 0x1b, 0x30, 0xdf, 0xa4, 0x96, 0xe5, - 0xb3, 0x20, 0xa8, 0x6d, 0x49, 0x79, 0x61, 0xff, 0x81, 0xec, 0xd6, 0xbf, 0x05, 0x85, 0x30, 0x9e, - 0xbe, 0x08, 0x6b, 0xcd, 0xc8, 0x43, 0xe6, 0x0d, 0xd9, 0x10, 0xf8, 0x7a, 0xe0, 0x79, 0xaa, 0xba, - 0x26, 0x3e, 0xf5, 0x01, 0x14, 0xd4, 0x81, 0xcd, 0xad, 0xa9, 0xdc, 0x87, 0x0d, 0x8f, 0xfa, 0x62, - 0x1b, 0xc9, 0xca, 0xca, 0xa2, 0x8c, 0xf0, 0x88, 0xfa, 0xfc, 0x21, 0xe3, 0xa9, 0x02, 0x4b, 0x19, - 0xf9, 0x65, 0x97, 0x7e, 0x13, 0x36, 0x53, 0x34, 0x62, 0x99, 0xdc, 0xe5, 0x74, 0x10, 0x5e, 0x74, - 0x6c, 0x44, 0x2b, 0xc9, 0xc5, 0x2b, 0xd1, 0x6f, 0x41, 0x29, 0x3a, 0x2b, 0x91, 0x68, 0x84, 0xaa, - 0xd0, 0x94, 0xfa, 0x65, 0x13, 0x8b, 0x48, 0xee, 0x33, 0xe6, 0x2b, 0xeb, 0x97, 0x0d, 0x9d, 0x25, - 0x1c, 0x93, 0x44, 0x33, 0x72, 0x1b, 0x0a, 0xca, 0x31, 0xa9, 0xfb, 0xb8, 0xa8, 0x5c, 0x74, 0x84, - 0x9e, 0x2a, 0x2c, 0x17, 0x49, 0xbf, 0x15, 0x4f, 0x93, 0x4b, 0x4e, 0xf3, 0x53, 0x28, 0x86, 0xce, - 0x27, 0x8d, 0x12, 0x72, 0x86, 0x4b, 0xcb, 0x50, 0x42, 0x4d, 0x12, 0x33, 0x0a, 0x6b, 0x0a, 0xec, - 0x9e, 0xc3, 0x2c, 0x33, 0xbe, 0x82, 0x38, 0x67, 0xd1, 0xa8, 0xc8, 0x81, 0x7b, 0xe1, 0xfd, 0xd2, - 0xdf, 0x83, 0x75, 0xb9, 0xd6, 0xb9, 0x2e, 0x6e, 0x1e, 0xb4, 0xfe, 0x43, 0x83, 0x62, 0x08, 0x1f, - 0x73, 0x99, 0x52, 0x9b, 0xc8, 0x7d, 0xdd, 0x4d, 0xbc, 0x7c, 0x97, 0xf4, 0x2e, 0x10, 0xb4, 0x14, - 0x73, 0xec, 0x72, 0xdb, 0xe9, 0x99, 0xf2, 0x2c, 0x64, 0x24, 0x58, 0xc5, 0x91, 0x63, 0x1c, 0x38, - 0x12, 0xfd, 0x6f, 0x5f, 0x86, 0x72, 0xa2, 0xca, 0x45, 0x0a, 0xb0, 0xfa, 0x80, 0x3d, 0xab, 0xae, - 0x90, 0x32, 0x14, 0x0c, 0x86, 0x35, 0x82, 0xaa, 0xb6, 0xff, 0x55, 0x01, 0x2a, 0x07, 0xcd, 0xc3, - 0xf6, 0x81, 0xe7, 0x0d, 0xec, 0x2e, 0xe2, 0x19, 0xf9, 0x04, 0xf2, 0x98, 0x27, 0x67, 0x78, 0xdf, - 0xa9, 0x67, 0x29, 0x38, 0x11, 0x03, 0xd6, 0x30, 0x9d, 0x26, 0x59, 0x9e, 0x7d, 0xea, 0x99, 0xea, - 0x50, 0x62, 0x91, 0x68, 0x70, 0x19, 0x5e, 0x83, 0xea, 0x59, 0x8a, 0x53, 0xe4, 0x33, 0x28, 0xc5, - 0x79, 0x72, 0xd6, 0x37, 0xa2, 0x7a, 0xe6, 0xb2, 0x95, 0x90, 0x1f, 0x67, 0x06, 0x59, 0x5f, 0x48, - 0xea, 0x99, 0xeb, 0x35, 0xe4, 0x09, 0x14, 0xc2, 0x1c, 0x2c, 0xdb, 0x2b, 0x4e, 0x3d, 0x63, 0x49, - 0x49, 0x1c, 0x9f, 0x4c, 0x9d, 0xb3, 0x3c, 0x55, 0xd5, 0x33, 0xd5, 0xcd, 0xc8, 0x63, 0x58, 0x57, - 0xc1, 0x6f, 0xa6, 0xf7, 0x99, 0x7a, 0xb6, 0x42, 0x91, 0x50, 0x72, 0x5c, 0x9c, 0xc8, 0xfa, 0x3c, - 0x57, 0xcf, 0x5c, 0x30, 0x24, 0x14, 0x20, 0x91, 0x4f, 0x67, 0x7e, 0x77, 0xab, 0x67, 0x2f, 0x04, - 0x92, 0x1f, 0x43, 0x31, 0xca, 0x9a, 0x32, 0xbe, 0x7f, 0xd5, 0xb3, 0xd6, 0xe2, 0x9a, 0xed, 0xff, - 0xfc, 0x6d, 0x5b, 0xfb, 0xed, 0xe9, 0xb6, 0xf6, 0xc5, 0xe9, 0xb6, 0xf6, 0xe5, 0xe9, 0xb6, 0xf6, - 0xa7, 0xd3, 0x6d, 0xed, 0xaf, 0xa7, 0xdb, 0xda, 0x1f, 0xfe, 0xbe, 0xad, 0xfd, 0xe8, 0x9d, 0x9e, - 0xcd, 0xfb, 0xa3, 0x4e, 0xa3, 0xeb, 0x0e, 0xf7, 0x62, 0x81, 0xc9, 0xcf, 0xf8, 0x51, 0xbb, 0xb3, - 0x8e, 0x0e, 0xeb, 0xdb, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xce, 0x64, 0xb9, 0xe4, 0xe9, 0x1e, + 0x15, 0xde, 0xd1, 0x6a, 0x57, 0xd2, 0xd3, 0xfe, 0xc8, 0x6d, 0x27, 0x91, 0x85, 0xb3, 0xeb, 0x9a, + 0x8d, 0xed, 0x75, 0x12, 0xb4, 0x61, 0xa9, 0x50, 0x31, 0x76, 0x85, 0x5a, 0xad, 0x1d, 0xa4, 0x8a, + 0xed, 0x6c, 0xc6, 0xf6, 0x62, 0xa0, 0x2a, 0x53, 0x2d, 0x4d, 0x5b, 0x9a, 0x5a, 0x69, 0x66, 0x32, + 0xd3, 0x92, 0x25, 0x8a, 0x3b, 0x45, 0x15, 0x07, 0x2e, 0x54, 0x71, 0xe1, 0xce, 0x91, 0x03, 0x87, + 0x1c, 0x39, 0xe6, 0xc0, 0x81, 0x03, 0x67, 0x03, 0x0b, 0x27, 0x2a, 0x47, 0x8a, 0xe2, 0x48, 0xf5, + 0xeb, 0x9e, 0x3f, 0xad, 0xb4, 0x1a, 0x07, 0xdf, 0xb8, 0x48, 0xd3, 0x3d, 0xef, 0xbd, 0xee, 0x7e, + 0xfd, 0xde, 0xfb, 0xde, 0x7b, 0x03, 0xaf, 0xd3, 0x76, 0xc7, 0xde, 0xe3, 0x13, 0x8f, 0x05, 0xf2, + 0xb7, 0xee, 0xf9, 0x2e, 0x77, 0xc9, 0x6b, 0x9c, 0x39, 0x16, 0xf3, 0x07, 0xb6, 0xc3, 0xeb, 0x82, + 0xa4, 0x8e, 0x2f, 0x6b, 0xd7, 0x79, 0xcf, 0xf6, 0x2d, 0xd3, 0xa3, 0x3e, 0x9f, 0xec, 0x21, 0xe5, + 0x5e, 0xd7, 0xed, 0xba, 0xf1, 0x93, 0x64, 0xaf, 0xd5, 0x3a, 0xfe, 0xc4, 0xe3, 0xee, 0xde, 0x80, + 0xf9, 0x27, 0x7d, 0xa6, 0xfe, 0xd4, 0xbb, 0x8b, 0x7d, 0xbb, 0x1d, 0xec, 0x9d, 0x8c, 0x92, 0xeb, + 0xd5, 0xb6, 0xbb, 0xae, 0xdb, 0xed, 0x33, 0x29, 0xb3, 0x3d, 0x7c, 0xb6, 0xc7, 0xed, 0x01, 0x0b, + 0x38, 0x1d, 0x78, 0x8a, 0x60, 0x6b, 0x9a, 0xc0, 0x1a, 0xfa, 0x94, 0xdb, 0xae, 0x23, 0xdf, 0xeb, + 0xff, 0x5e, 0x81, 0x82, 0xc1, 0x3e, 0x1f, 0xb2, 0x80, 0x93, 0x0f, 0x20, 0xcf, 0x3a, 0x3d, 0xb7, + 0x9a, 0xbb, 0xaa, 0xed, 0x96, 0xf7, 0xf5, 0xfa, 0xcc, 0xb3, 0xd4, 0x15, 0xf5, 0xbd, 0x4e, 0xcf, + 0x6d, 0x2e, 0x19, 0xc8, 0x41, 0x6e, 0xc3, 0xca, 0xb3, 0xfe, 0x30, 0xe8, 0x55, 0x97, 0x91, 0x75, + 0xe7, 0x7c, 0xd6, 0x8f, 0x04, 0x69, 0x73, 0xc9, 0x90, 0x3c, 0x62, 0x59, 0xdb, 0x79, 0xe6, 0x56, + 0xf3, 0x59, 0x96, 0x6d, 0x39, 0xcf, 0x70, 0x59, 0xc1, 0x41, 0x9a, 0x00, 0x01, 0xe3, 0xa6, 0xeb, + 0x89, 0x03, 0x55, 0x57, 0x90, 0xff, 0xc6, 0xf9, 0xfc, 0x8f, 0x18, 0xff, 0x04, 0xc9, 0x9b, 0x4b, + 0x46, 0x29, 0x08, 0x07, 0x42, 0x92, 0xed, 0xd8, 0xdc, 0xec, 0xf4, 0xa8, 0xed, 0x54, 0x57, 0xb3, + 0x48, 0x6a, 0x39, 0x36, 0x3f, 0x14, 0xe4, 0x42, 0x92, 0x1d, 0x0e, 0x84, 0x2a, 0x3e, 0x1f, 0x32, + 0x7f, 0x52, 0x2d, 0x64, 0x51, 0xc5, 0xa7, 0x82, 0x54, 0xa8, 0x02, 0x79, 0xc8, 0xc7, 0x50, 0x6e, + 0xb3, 0xae, 0xed, 0x98, 0xed, 0xbe, 0xdb, 0x39, 0xa9, 0x16, 0x51, 0xc4, 0xee, 0xf9, 0x22, 0x1a, + 0x82, 0xa1, 0x21, 0xe8, 0x9b, 0x4b, 0x06, 0xb4, 0xa3, 0x11, 0x69, 0x40, 0xb1, 0xd3, 0x63, 0x9d, + 0x13, 0x93, 0x8f, 0xab, 0x25, 0x94, 0x74, 0xed, 0x7c, 0x49, 0x87, 0x82, 0xfa, 0xf1, 0xb8, 0xb9, + 0x64, 0x14, 0x3a, 0xf2, 0x51, 0xe8, 0xc5, 0x62, 0x7d, 0x7b, 0xc4, 0x7c, 0x21, 0xe5, 0x62, 0x16, + 0xbd, 0xdc, 0x95, 0xf4, 0x28, 0xa7, 0x64, 0x85, 0x03, 0x72, 0x0f, 0x4a, 0xcc, 0xb1, 0xd4, 0xc1, + 0xca, 0x28, 0xe8, 0xfa, 0x02, 0x0b, 0x73, 0xac, 0xf0, 0x58, 0x45, 0xa6, 0x9e, 0xc9, 0x87, 0xb0, + 0xda, 0x71, 0x07, 0x03, 0x9b, 0x57, 0xd7, 0x50, 0xc6, 0x5b, 0x0b, 0x8e, 0x84, 0xb4, 0xcd, 0x25, + 0x43, 0x71, 0x35, 0x0a, 0xb0, 0x32, 0xa2, 0xfd, 0x21, 0xd3, 0x6f, 0x40, 0x39, 0x61, 0xc9, 0xa4, + 0x0a, 0x85, 0x01, 0x0b, 0x02, 0xda, 0x65, 0x55, 0xed, 0xaa, 0xb6, 0x5b, 0x32, 0xc2, 0xa1, 0xbe, + 0x01, 0x6b, 0x49, 0xbb, 0xd5, 0x07, 0x11, 0xa3, 0xb0, 0x45, 0xc1, 0x38, 0x62, 0x7e, 0x20, 0x0c, + 0x50, 0x31, 0xaa, 0x21, 0xd9, 0x81, 0x75, 0x3c, 0xad, 0x19, 0xbe, 0x17, 0x7e, 0x95, 0x37, 0xd6, + 0x70, 0xf2, 0x58, 0x11, 0x6d, 0x43, 0xd9, 0xdb, 0xf7, 0x22, 0x92, 0x65, 0x24, 0x01, 0x6f, 0xdf, + 0x53, 0x04, 0xfa, 0x77, 0xa1, 0x32, 0x6d, 0xba, 0xa4, 0x02, 0xcb, 0x27, 0x6c, 0xa2, 0xd6, 0x13, + 0x8f, 0xe4, 0x92, 0x3a, 0x16, 0xae, 0x51, 0x32, 0xd4, 0x19, 0x7f, 0x97, 0x8b, 0x98, 0x23, 0x6b, + 0x15, 0xee, 0x26, 0x82, 0x04, 0x72, 0x97, 0xf7, 0x6b, 0x75, 0x19, 0x20, 0xea, 0x61, 0x80, 0xa8, + 0x3f, 0x0e, 0x23, 0x48, 0xa3, 0xf8, 0xe5, 0x8b, 0xed, 0xa5, 0x5f, 0xfe, 0x65, 0x5b, 0x33, 0x90, + 0x83, 0x5c, 0x16, 0x06, 0x45, 0x6d, 0xc7, 0xb4, 0x2d, 0xb5, 0x4e, 0x01, 0xc7, 0x2d, 0x8b, 0x7c, + 0x0a, 0x95, 0x8e, 0xeb, 0x04, 0xcc, 0x09, 0x86, 0x81, 0x08, 0x73, 0x74, 0x10, 0xa8, 0x58, 0x30, + 0xef, 0x92, 0x0f, 0x43, 0xf2, 0x23, 0xa4, 0x36, 0x36, 0x3b, 0xe9, 0x09, 0x72, 0x1f, 0x60, 0x44, + 0xfb, 0xb6, 0x45, 0xb9, 0xeb, 0x07, 0xd5, 0xfc, 0xd5, 0xe5, 0x73, 0x84, 0x1d, 0x87, 0x84, 0x4f, + 0x3c, 0x8b, 0x72, 0xd6, 0xc8, 0x8b, 0x9d, 0x1b, 0x09, 0x7e, 0x72, 0x1d, 0x36, 0xa9, 0xe7, 0x99, + 0x01, 0xa7, 0x9c, 0x99, 0xed, 0x09, 0x67, 0x01, 0xc6, 0x8b, 0x35, 0x63, 0x9d, 0x7a, 0xde, 0x23, + 0x31, 0xdb, 0x10, 0x93, 0xba, 0x15, 0xdd, 0x36, 0xba, 0x26, 0x21, 0x90, 0xb7, 0x28, 0xa7, 0xa8, + 0xad, 0x35, 0x03, 0x9f, 0xc5, 0x9c, 0x47, 0x79, 0x4f, 0xe9, 0x00, 0x9f, 0xc9, 0xeb, 0xb0, 0xda, + 0x63, 0x76, 0xb7, 0xc7, 0xf1, 0xd8, 0xcb, 0x86, 0x1a, 0x89, 0x8b, 0xf1, 0x7c, 0x77, 0xc4, 0x30, + 0xba, 0x15, 0x0d, 0x39, 0xd0, 0x7f, 0x95, 0x83, 0x0b, 0x67, 0xdc, 0x57, 0xc8, 0xed, 0xd1, 0xa0, + 0x17, 0xae, 0x25, 0x9e, 0xc9, 0x6d, 0x21, 0x97, 0x5a, 0xcc, 0x57, 0x51, 0xf9, 0xcd, 0x39, 0x1a, + 0x68, 0x22, 0x91, 0x3a, 0xb8, 0x62, 0x21, 0x4f, 0xa0, 0xd2, 0xa7, 0x01, 0x37, 0xa5, 0xed, 0x9b, + 0x18, 0x65, 0x97, 0xcf, 0x8d, 0x04, 0xf7, 0x69, 0xe8, 0x33, 0xc2, 0xb8, 0x95, 0xb8, 0x8d, 0x7e, + 0x6a, 0x96, 0x3c, 0x85, 0x4b, 0xed, 0xc9, 0x4f, 0xa8, 0xc3, 0x6d, 0x87, 0x99, 0x67, 0xee, 0x68, + 0x7b, 0x8e, 0xe8, 0x7b, 0x23, 0xdb, 0x62, 0x4e, 0x27, 0xbc, 0x9c, 0x8b, 0x91, 0x88, 0xe8, 0xf2, + 0x02, 0xfd, 0x29, 0x6c, 0xa4, 0x63, 0x11, 0xd9, 0x80, 0x1c, 0x1f, 0x2b, 0x8d, 0xe4, 0xf8, 0x98, + 0x7c, 0x07, 0xf2, 0x42, 0x1c, 0x6a, 0x63, 0x63, 0x2e, 0x58, 0x28, 0xee, 0xc7, 0x13, 0x8f, 0x19, + 0x48, 0xaf, 0xeb, 0x91, 0x27, 0x44, 0xf1, 0x69, 0x5a, 0xb6, 0x7e, 0x13, 0x36, 0xa7, 0x42, 0x4f, + 0xe2, 0x5a, 0xb5, 0xe4, 0xb5, 0xea, 0x9b, 0xb0, 0x9e, 0x8a, 0x30, 0xfa, 0x1f, 0x57, 0xa1, 0x68, + 0xb0, 0xc0, 0x13, 0x46, 0x4c, 0x9a, 0x50, 0x62, 0xe3, 0x0e, 0x93, 0xb0, 0xa4, 0x2d, 0x08, 0xe2, + 0x92, 0xe7, 0x5e, 0x48, 0x2f, 0xa2, 0x66, 0xc4, 0x4c, 0x6e, 0xa5, 0x20, 0x79, 0x67, 0x91, 0x90, + 0x24, 0x26, 0xdf, 0x49, 0x63, 0xf2, 0x5b, 0x0b, 0x78, 0xa7, 0x40, 0xf9, 0x56, 0x0a, 0x94, 0x17, + 0x2d, 0x9c, 0x42, 0xe5, 0xd6, 0x0c, 0x54, 0x5e, 0x74, 0xfc, 0x39, 0xb0, 0xdc, 0x9a, 0x01, 0xcb, + 0xbb, 0x0b, 0xf7, 0x32, 0x13, 0x97, 0xef, 0xa4, 0x71, 0x79, 0x91, 0x3a, 0xa6, 0x80, 0xf9, 0xfe, + 0x2c, 0x60, 0xbe, 0xb9, 0x40, 0xc6, 0x5c, 0x64, 0x3e, 0x3c, 0x83, 0xcc, 0xd7, 0x17, 0x88, 0x9a, + 0x01, 0xcd, 0xad, 0x14, 0x34, 0x43, 0x26, 0xdd, 0xcc, 0xc1, 0xe6, 0x8f, 0xce, 0x62, 0xf3, 0x8d, + 0x45, 0xa6, 0x36, 0x0b, 0x9c, 0xbf, 0x37, 0x05, 0xce, 0xd7, 0x16, 0x9d, 0x6a, 0x2e, 0x3a, 0xdf, + 0x14, 0xf1, 0x71, 0xca, 0x33, 0x44, 0x2c, 0x65, 0xbe, 0xef, 0xfa, 0x0a, 0xf8, 0xe4, 0x40, 0xdf, + 0x15, 0x11, 0x3b, 0xb6, 0xff, 0x73, 0x90, 0x1c, 0x9d, 0x36, 0x61, 0xed, 0xfa, 0x17, 0x5a, 0xcc, + 0x8b, 0x91, 0x2d, 0x19, 0xed, 0x4b, 0x2a, 0xda, 0x27, 0x00, 0x3e, 0x97, 0x06, 0xf8, 0x6d, 0x28, + 0x0b, 0x4c, 0x99, 0xc2, 0x6e, 0xea, 0x85, 0xd8, 0x4d, 0xde, 0x86, 0x0b, 0x18, 0x7f, 0x65, 0x1a, + 0xa0, 0x02, 0x49, 0x1e, 0x03, 0xc9, 0xa6, 0x78, 0x21, 0x35, 0x28, 0x81, 0xe2, 0x9b, 0x70, 0x31, + 0x41, 0x2b, 0xe4, 0x22, 0x16, 0x48, 0x90, 0xaa, 0x44, 0xd4, 0x07, 0x9e, 0xd7, 0xa4, 0x41, 0x4f, + 0x7f, 0x10, 0x2b, 0x28, 0xce, 0x0b, 0x08, 0xe4, 0x3b, 0xae, 0x25, 0xcf, 0xbd, 0x6e, 0xe0, 0xb3, + 0xc8, 0x15, 0xfa, 0x6e, 0x17, 0x37, 0x57, 0x32, 0xc4, 0xa3, 0xa0, 0x8a, 0x5c, 0xbb, 0x24, 0x7d, + 0x56, 0xff, 0xbd, 0x16, 0xcb, 0x8b, 0x53, 0x85, 0x59, 0xa8, 0xae, 0xbd, 0x4a, 0x54, 0xcf, 0xfd, + 0x6f, 0xa8, 0xae, 0xff, 0x4b, 0x8b, 0xaf, 0x34, 0xc2, 0xeb, 0xaf, 0xa7, 0x02, 0x61, 0x5d, 0xb6, + 0x63, 0xb1, 0x31, 0xaa, 0x7c, 0xd9, 0x90, 0x83, 0x30, 0xd5, 0x5a, 0xc5, 0x6b, 0x48, 0xa7, 0x5a, + 0x05, 0x9c, 0x93, 0x03, 0xf2, 0x3e, 0xe2, 0xbc, 0xfb, 0x4c, 0x85, 0x86, 0x14, 0x08, 0xca, 0xa2, + 0xae, 0xae, 0xaa, 0xb9, 0x23, 0x41, 0x66, 0x48, 0xea, 0x04, 0xbe, 0x94, 0x52, 0x69, 0xc3, 0x15, + 0x28, 0x89, 0xad, 0x07, 0x1e, 0xed, 0x30, 0xf4, 0xed, 0x92, 0x11, 0x4f, 0xe8, 0x16, 0x90, 0xb3, + 0x31, 0x86, 0x3c, 0x84, 0x55, 0x36, 0x62, 0x0e, 0x17, 0x77, 0x24, 0xd4, 0x7a, 0x65, 0x2e, 0x10, + 0x33, 0x87, 0x37, 0xaa, 0x42, 0x99, 0xff, 0x7c, 0xb1, 0x5d, 0x91, 0x3c, 0xef, 0xba, 0x03, 0x9b, + 0xb3, 0x81, 0xc7, 0x27, 0x86, 0x92, 0xa2, 0xff, 0x2c, 0x27, 0xf0, 0x30, 0x15, 0x7f, 0x66, 0xaa, + 0x37, 0x74, 0x9a, 0x5c, 0x22, 0x45, 0xca, 0xa6, 0xf2, 0x37, 0x01, 0xba, 0x34, 0x30, 0x9f, 0x53, + 0x87, 0x33, 0x4b, 0xe9, 0xbd, 0xd4, 0xa5, 0xc1, 0x0f, 0x70, 0x42, 0xe4, 0x9b, 0xe2, 0xf5, 0x30, + 0x60, 0x16, 0x5e, 0xc0, 0xb2, 0x51, 0xe8, 0xd2, 0xe0, 0x49, 0xc0, 0xac, 0xc4, 0x59, 0x0b, 0xaf, + 0xe2, 0xac, 0x69, 0x7d, 0x17, 0xa7, 0xf5, 0xfd, 0xf3, 0x5c, 0xec, 0x1d, 0x71, 0xfa, 0xf0, 0xff, + 0xa9, 0x8b, 0xdf, 0x60, 0x4d, 0x91, 0x06, 0x01, 0xf2, 0x43, 0xb8, 0x10, 0x79, 0xa5, 0x39, 0x44, + 0x6f, 0x0d, 0xad, 0xf0, 0xe5, 0x9c, 0xbb, 0x32, 0x4a, 0x4f, 0x07, 0xe4, 0x33, 0x78, 0x63, 0x2a, + 0x06, 0x45, 0x0b, 0xe4, 0x5e, 0x2a, 0x14, 0xbd, 0x96, 0x0e, 0x45, 0xa1, 0xfc, 0x58, 0x7b, 0xcb, + 0xaf, 0xc4, 0x6b, 0x5a, 0x22, 0x85, 0x4d, 0xc2, 0xdb, 0x4c, 0x9b, 0xd8, 0x81, 0x75, 0x9f, 0x71, + 0x51, 0x4b, 0xa5, 0xaa, 0x86, 0x35, 0x39, 0x29, 0x21, 0x41, 0xff, 0xb3, 0x06, 0x9b, 0x53, 0xa7, + 0x20, 0x1f, 0xc0, 0x8a, 0x84, 0x69, 0xed, 0xdc, 0x6e, 0x09, 0x5e, 0x8b, 0x3a, 0xb8, 0x64, 0x20, + 0x07, 0x50, 0x64, 0x2a, 0x05, 0x57, 0x9a, 0xbb, 0xb6, 0x20, 0x53, 0x57, 0xfc, 0x11, 0x1b, 0xb9, + 0x0b, 0xa5, 0xe8, 0x7e, 0x16, 0x94, 0x77, 0xd1, 0xf5, 0x2a, 0x21, 0x31, 0xa3, 0x7e, 0x08, 0xe5, + 0xc4, 0xf6, 0xc8, 0x37, 0xa0, 0x34, 0xa0, 0x63, 0x55, 0x93, 0xc9, 0x2c, 0xbb, 0x38, 0xa0, 0x63, + 0x2c, 0xc7, 0xc8, 0x1b, 0x50, 0x10, 0x2f, 0xbb, 0x54, 0xde, 0xf6, 0xb2, 0xb1, 0x3a, 0xa0, 0xe3, + 0xef, 0xd3, 0x40, 0xff, 0x85, 0x06, 0x1b, 0xe9, 0x7d, 0x92, 0x77, 0x80, 0x08, 0x5a, 0xda, 0x65, + 0xa6, 0x33, 0x1c, 0x48, 0x20, 0x0d, 0x25, 0x6e, 0x0e, 0xe8, 0xf8, 0xa0, 0xcb, 0x1e, 0x0e, 0x07, + 0xb8, 0x74, 0x40, 0x1e, 0x40, 0x25, 0x24, 0x0e, 0x3b, 0x62, 0x4a, 0x2b, 0x97, 0xcf, 0x54, 0xc4, + 0x77, 0x15, 0x81, 0x2c, 0x88, 0x7f, 0x2d, 0x0a, 0xe2, 0x0d, 0x29, 0x2f, 0x7c, 0xa3, 0xbf, 0x0f, + 0x9b, 0x53, 0x27, 0x26, 0x3a, 0xac, 0x7b, 0xc3, 0xb6, 0x79, 0xc2, 0x26, 0x26, 0xaa, 0x04, 0xfd, + 0xa1, 0x64, 0x94, 0xbd, 0x61, 0xfb, 0x63, 0x36, 0x11, 0xa5, 0x49, 0xa0, 0x77, 0x60, 0x23, 0x5d, + 0x71, 0x09, 0x74, 0xf1, 0xdd, 0xa1, 0x63, 0xe1, 0xbe, 0x57, 0x0c, 0x39, 0x20, 0xb7, 0x61, 0x65, + 0xe4, 0x4a, 0x93, 0x3f, 0xaf, 0xc4, 0x3a, 0x76, 0x39, 0x4b, 0xd4, 0x6d, 0x92, 0x47, 0x0f, 0x60, + 0x05, 0x8d, 0x57, 0x18, 0x22, 0xd6, 0x4e, 0x2a, 0xbb, 0x11, 0xcf, 0xe4, 0x18, 0x80, 0x72, 0xee, + 0xdb, 0xed, 0x61, 0x2c, 0xbe, 0x9a, 0x14, 0xdf, 0xb7, 0xdb, 0x41, 0xfd, 0x64, 0x54, 0x3f, 0xa2, + 0xb6, 0xdf, 0xb8, 0xa2, 0xcc, 0xff, 0x52, 0xcc, 0x93, 0x70, 0x81, 0x84, 0x24, 0xfd, 0xab, 0x3c, + 0xac, 0xca, 0x9a, 0x94, 0x7c, 0x98, 0xee, 0x90, 0x94, 0xf7, 0xb7, 0xe6, 0x6d, 0x5f, 0x52, 0xa9, + 0xdd, 0x47, 0x69, 0xd6, 0xf5, 0xe9, 0xb6, 0x43, 0xa3, 0x7c, 0xfa, 0x62, 0xbb, 0x80, 0x29, 0x4a, + 0xeb, 0x6e, 0xdc, 0x83, 0x98, 0x57, 0x82, 0x87, 0x0d, 0x8f, 0xfc, 0x4b, 0x37, 0x3c, 0x9a, 0xb0, + 0x9e, 0xc8, 0xc9, 0x6c, 0x4b, 0x15, 0x33, 0x5b, 0xe7, 0x39, 0x5d, 0xeb, 0xae, 0xda, 0x7f, 0x39, + 0xca, 0xd9, 0x5a, 0x16, 0xd9, 0x4d, 0x57, 0xe2, 0x98, 0xda, 0xc9, 0x9c, 0x22, 0x51, 0x5c, 0x8b, + 0xc4, 0x4e, 0xb8, 0x83, 0x88, 0x10, 0x92, 0x44, 0xa6, 0x18, 0x45, 0x31, 0x81, 0x2f, 0x6f, 0xc0, + 0x66, 0x9c, 0xfd, 0x48, 0x92, 0xa2, 0x94, 0x12, 0x4f, 0x23, 0xe1, 0x7b, 0x70, 0xc9, 0x61, 0x63, + 0x6e, 0x4e, 0x53, 0x97, 0x90, 0x9a, 0x88, 0x77, 0xc7, 0x69, 0x8e, 0x6b, 0xb0, 0x11, 0xc7, 0x59, + 0xa4, 0x05, 0xd9, 0x1f, 0x89, 0x66, 0x91, 0xec, 0x32, 0x14, 0xa3, 0xdc, 0xb4, 0x8c, 0x04, 0x05, + 0x2a, 0x53, 0xd2, 0x28, 0xdb, 0xf5, 0x59, 0x30, 0xec, 0x73, 0x25, 0x64, 0x0d, 0x69, 0x30, 0xdb, + 0x35, 0xe4, 0x3c, 0xd2, 0xee, 0xc0, 0x7a, 0x18, 0x55, 0x24, 0xdd, 0x3a, 0xd2, 0xad, 0x85, 0x93, + 0x48, 0x74, 0x13, 0x2a, 0x9e, 0xef, 0x7a, 0x6e, 0xc0, 0x7c, 0x93, 0x5a, 0x96, 0xcf, 0x82, 0xa0, + 0xba, 0x21, 0xe5, 0x85, 0xf3, 0x07, 0x72, 0x5a, 0xff, 0x16, 0x14, 0xc2, 0xa4, 0xfb, 0x12, 0xac, + 0x34, 0xa2, 0x08, 0x99, 0x37, 0xe4, 0x40, 0x80, 0xf0, 0x81, 0xe7, 0xa9, 0x16, 0x9c, 0x78, 0xd4, + 0xfb, 0x50, 0x50, 0x17, 0x36, 0xb3, 0xf1, 0xf2, 0x00, 0xd6, 0x3c, 0xea, 0x8b, 0x63, 0x24, 0xdb, + 0x2f, 0xf3, 0xca, 0xc6, 0x23, 0xea, 0xf3, 0x47, 0x8c, 0xa7, 0xba, 0x30, 0x65, 0xe4, 0x97, 0x53, + 0xfa, 0x2d, 0x58, 0x4f, 0xd1, 0x88, 0x6d, 0x72, 0x97, 0xd3, 0x7e, 0xe8, 0xe8, 0x38, 0x88, 0x76, + 0x92, 0x8b, 0x77, 0xa2, 0xdf, 0x86, 0x52, 0x74, 0x57, 0xa2, 0x1a, 0x09, 0x55, 0xa1, 0x29, 0xf5, + 0xcb, 0x21, 0x76, 0x9a, 0xdc, 0xe7, 0xcc, 0x57, 0xd6, 0x2f, 0x07, 0x3a, 0x4b, 0x04, 0x26, 0x09, + 0x79, 0xe4, 0x0e, 0x14, 0x54, 0x60, 0x52, 0xfe, 0x38, 0xaf, 0xa7, 0x74, 0x84, 0x91, 0x2a, 0xec, + 0x29, 0xc9, 0xb8, 0x15, 0x2f, 0x93, 0x4b, 0x2e, 0xf3, 0x53, 0x28, 0x86, 0xc1, 0x27, 0x8d, 0x12, + 0x72, 0x85, 0xab, 0x8b, 0x50, 0x42, 0x2d, 0x12, 0x33, 0x0a, 0x6b, 0x0a, 0xec, 0xae, 0xc3, 0x2c, + 0x33, 0x76, 0x41, 0x5c, 0xb3, 0x68, 0x6c, 0xca, 0x17, 0xf7, 0x43, 0xff, 0xd2, 0xdf, 0x83, 0x55, + 0xb9, 0xd7, 0x99, 0x21, 0x6e, 0x06, 0xfe, 0xea, 0xff, 0xd0, 0xa0, 0x18, 0xc2, 0xc7, 0x4c, 0xa6, + 0xd4, 0x21, 0x72, 0x5f, 0xf7, 0x10, 0xaf, 0x3e, 0x24, 0xbd, 0x0b, 0x04, 0x2d, 0xc5, 0x1c, 0xb9, + 0xdc, 0x76, 0xba, 0xa6, 0xbc, 0x0b, 0x99, 0x2e, 0x56, 0xf0, 0xcd, 0x31, 0xbe, 0x38, 0x12, 0xf3, + 0x6f, 0xef, 0x40, 0x39, 0xd1, 0x0a, 0x23, 0x05, 0x58, 0x7e, 0xc8, 0x9e, 0x57, 0x96, 0x48, 0x19, + 0x0a, 0x06, 0xc3, 0x46, 0x42, 0x45, 0xdb, 0xff, 0xaa, 0x00, 0x9b, 0x07, 0x8d, 0xc3, 0xd6, 0x81, + 0xe7, 0xf5, 0xed, 0x0e, 0xe2, 0x19, 0xf9, 0x04, 0xf2, 0x58, 0x4c, 0x67, 0xf8, 0x08, 0x54, 0xcb, + 0xd2, 0x95, 0x22, 0x06, 0xac, 0x60, 0xcd, 0x4d, 0xb2, 0x7c, 0x1b, 0xaa, 0x65, 0x6a, 0x56, 0x89, + 0x4d, 0xa2, 0xc1, 0x65, 0xf8, 0x64, 0x54, 0xcb, 0xd2, 0xc1, 0x22, 0x9f, 0x41, 0x29, 0x2e, 0xa6, + 0xb3, 0x7e, 0x48, 0xaa, 0x65, 0xee, 0x6d, 0x09, 0xf9, 0x71, 0xf9, 0x90, 0xf5, 0x33, 0x4a, 0x2d, + 0x73, 0x53, 0x87, 0x3c, 0x85, 0x42, 0x58, 0xa8, 0x65, 0xfb, 0xd4, 0x53, 0xcb, 0xd8, 0x77, 0x12, + 0xd7, 0x27, 0xeb, 0xeb, 0x2c, 0xdf, 0xb3, 0x6a, 0x99, 0x9a, 0x6b, 0xe4, 0x09, 0xac, 0xaa, 0x0c, + 0x39, 0xd3, 0x47, 0x9c, 0x5a, 0xb6, 0x6e, 0x92, 0x50, 0x72, 0xdc, 0xc1, 0xc8, 0xfa, 0x0d, 0xaf, + 0x96, 0xb9, 0xab, 0x48, 0x28, 0x40, 0xa2, 0xe8, 0xce, 0xfc, 0x71, 0xae, 0x96, 0xbd, 0x5b, 0x48, + 0x7e, 0x0c, 0xc5, 0xa8, 0xb4, 0xca, 0xf8, 0x91, 0xac, 0x96, 0xb5, 0x61, 0xd7, 0x68, 0xfd, 0xe7, + 0x6f, 0x5b, 0xda, 0x6f, 0x4f, 0xb7, 0xb4, 0x2f, 0x4e, 0xb7, 0xb4, 0x2f, 0x4f, 0xb7, 0xb4, 0x3f, + 0x9d, 0x6e, 0x69, 0x7f, 0x3d, 0xdd, 0xd2, 0xfe, 0xf0, 0xf7, 0x2d, 0xed, 0x47, 0xef, 0x74, 0x6d, + 0xde, 0x1b, 0xb6, 0xeb, 0x1d, 0x77, 0xb0, 0x17, 0x0b, 0x4c, 0x3e, 0xc6, 0x5f, 0xbe, 0xdb, 0xab, + 0x18, 0xb0, 0xbe, 0xfd, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0c, 0x66, 0x8a, 0xe9, 0x0e, 0x1f, 0x00, 0x00, } @@ -4512,6 +4521,9 @@ func (this *ResponseCommit) Equal(that interface{}) bool { if !bytes.Equal(this.Data, that1.Data) { return false } + if this.RetainHeight != that1.RetainHeight { + return false + } if !bytes.Equal(this.XXX_unrecognized, that1.XXX_unrecognized) { return false } @@ -7143,6 +7155,11 @@ func (m *ResponseCommit) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.RetainHeight != 0 { + i = encodeVarintTypes(dAtA, i, uint64(m.RetainHeight)) + i-- + dAtA[i] = 0x18 + } if len(m.Data) > 0 { i -= len(m.Data) copy(dAtA[i:], m.Data) @@ -8479,8 +8496,12 @@ func NewPopulatedResponseCommit(r randyTypes, easy bool) *ResponseCommit { for i := 0; i < v30; i++ { this.Data[i] = byte(r.Intn(256)) } + this.RetainHeight = int64(r.Int63()) + if r.Intn(2) == 0 { + this.RetainHeight *= -1 + } if !easy && r.Intn(10) != 0 { - this.XXX_unrecognized = randUnrecognizedTypes(r, 3) + this.XXX_unrecognized = randUnrecognizedTypes(r, 4) } return this } @@ -9665,6 +9686,9 @@ func (m *ResponseCommit) Size() (n int) { if l > 0 { n += 1 + l + sovTypes(uint64(l)) } + if m.RetainHeight != 0 { + n += 1 + sovTypes(uint64(m.RetainHeight)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -14046,6 +14070,25 @@ func (m *ResponseCommit) Unmarshal(dAtA []byte) error { m.Data = []byte{} } iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RetainHeight", wireType) + } + m.RetainHeight = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTypes + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.RetainHeight |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } default: iNdEx = preIndex skippy, err := skipTypes(dAtA[iNdEx:]) diff --git a/abci/types/types.proto b/abci/types/types.proto index 0d47ad9b3..351329de1 100644 --- a/abci/types/types.proto +++ b/abci/types/types.proto @@ -207,7 +207,8 @@ message ResponseEndBlock { message ResponseCommit { // reserve 1 - bytes data = 2; + bytes data = 2; + int64 retain_height = 3; } //---------------------------------------- diff --git a/blockchain/v0/codec.go b/blockchain/v0/codec.go index 4494f41aa..f023bbfa1 100644 --- a/blockchain/v0/codec.go +++ b/blockchain/v0/codec.go @@ -2,6 +2,7 @@ package v0 import ( amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/types" ) diff --git a/blockchain/v0/pool.go b/blockchain/v0/pool.go index 1931d7960..bd8165752 100644 --- a/blockchain/v0/pool.go +++ b/blockchain/v0/pool.go @@ -284,16 +284,17 @@ func (pool *BlockPool) MaxPeerHeight() int64 { return pool.maxPeerHeight } -// SetPeerHeight sets the peer's alleged blockchain height. -func (pool *BlockPool) SetPeerHeight(peerID p2p.ID, height int64) { +// SetPeerRange sets the peer's alleged blockchain base and height. +func (pool *BlockPool) SetPeerRange(peerID p2p.ID, base int64, height int64) { pool.mtx.Lock() defer pool.mtx.Unlock() peer := pool.peers[peerID] if peer != nil { + peer.base = base peer.height = height } else { - peer = newBPPeer(pool, peerID, height) + peer = newBPPeer(pool, peerID, base, height) peer.setLogger(pool.Logger.With("peer", peerID)) pool.peers[peerID] = peer } @@ -346,9 +347,9 @@ func (pool *BlockPool) updateMaxPeerHeight() { pool.maxPeerHeight = max } -// Pick an available peer with at least the given minHeight. +// Pick an available peer with the given height available. // If no peers are available, returns nil. -func (pool *BlockPool) pickIncrAvailablePeer(minHeight int64) *bpPeer { +func (pool *BlockPool) pickIncrAvailablePeer(height int64) *bpPeer { pool.mtx.Lock() defer pool.mtx.Unlock() @@ -360,7 +361,7 @@ func (pool *BlockPool) pickIncrAvailablePeer(minHeight int64) *bpPeer { if peer.numPending >= maxPendingRequestsPerPeer { continue } - if peer.height < minHeight { + if height < peer.base || height > peer.height { continue } peer.incrPending() @@ -432,6 +433,7 @@ type bpPeer struct { didTimeout bool numPending int32 height int64 + base int64 pool *BlockPool id p2p.ID recvMonitor *flow.Monitor @@ -441,10 +443,11 @@ type bpPeer struct { logger log.Logger } -func newBPPeer(pool *BlockPool, peerID p2p.ID, height int64) *bpPeer { +func newBPPeer(pool *BlockPool, peerID p2p.ID, base int64, height int64) *bpPeer { peer := &bpPeer{ pool: pool, id: peerID, + base: base, height: height, numPending: 0, logger: log.NewNopLogger(), diff --git a/blockchain/v0/pool_test.go b/blockchain/v0/pool_test.go index 783ff2526..9a3dd299c 100644 --- a/blockchain/v0/pool_test.go +++ b/blockchain/v0/pool_test.go @@ -20,6 +20,7 @@ func init() { type testPeer struct { id p2p.ID + base int64 height int64 inputChan chan inputData //make sure each peer's data is sequential } @@ -67,7 +68,11 @@ func makePeers(numPeers int, minHeight, maxHeight int64) testPeers { for i := 0; i < numPeers; i++ { peerID := p2p.ID(tmrand.Str(12)) height := minHeight + tmrand.Int63n(maxHeight-minHeight) - peers[peerID] = testPeer{peerID, height, make(chan inputData, 10)} + base := minHeight + int64(i) + if base > height { + base = height + } + peers[peerID] = testPeer{peerID, base, height, make(chan inputData, 10)} } return peers } @@ -93,7 +98,7 @@ func TestBlockPoolBasic(t *testing.T) { // Introduce each peer. go func() { for _, peer := range peers { - pool.SetPeerHeight(peer.id, peer.height) + pool.SetPeerRange(peer.id, peer.base, peer.height) } }() @@ -148,7 +153,7 @@ func TestBlockPoolTimeout(t *testing.T) { // Introduce each peer. go func() { for _, peer := range peers { - pool.SetPeerHeight(peer.id, peer.height) + pool.SetPeerRange(peer.id, peer.base, peer.height) } }() @@ -192,7 +197,7 @@ func TestBlockPoolRemovePeer(t *testing.T) { for i := 0; i < 10; i++ { peerID := p2p.ID(fmt.Sprintf("%d", i+1)) height := int64(i + 1) - peers[peerID] = testPeer{peerID, height, make(chan inputData)} + peers[peerID] = testPeer{peerID, 0, height, make(chan inputData)} } requestsCh := make(chan BlockRequest) errorsCh := make(chan peerError) @@ -205,7 +210,7 @@ func TestBlockPoolRemovePeer(t *testing.T) { // add peers for peerID, peer := range peers { - pool.SetPeerHeight(peerID, peer.height) + pool.SetPeerRange(peerID, peer.base, peer.height) } assert.EqualValues(t, 10, pool.MaxPeerHeight()) diff --git a/blockchain/v0/reactor.go b/blockchain/v0/reactor.go index d47e892c2..a51b7949e 100644 --- a/blockchain/v0/reactor.go +++ b/blockchain/v0/reactor.go @@ -140,12 +140,15 @@ func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor { // AddPeer implements Reactor by sending our state to peer. func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) { - msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()}) + msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{ + Height: bcR.store.Height(), + Base: bcR.store.Base(), + }) peer.Send(BlockchainChannel, msgBytes) // it's OK if send fails. will try later in poolRoutine // peer is added to the pool once we receive the first - // bcStatusResponseMessage from the peer and call pool.SetPeerHeight + // bcStatusResponseMessage from the peer and call pool.SetPeerRange } // RemovePeer implements Reactor by removing peer from the pool. @@ -155,8 +158,6 @@ func (bcR *BlockchainReactor) RemovePeer(peer p2p.Peer, reason interface{}) { // respondToPeer loads a block and sends it to the requesting peer, // if we have it. Otherwise, we'll respond saying we don't have it. -// According to the Tendermint spec, if all nodes are honest, -// no node should be requesting for a block that's non-existent. func (bcR *BlockchainReactor) respondToPeer(msg *bcBlockRequestMessage, src p2p.Peer) (queued bool) { @@ -196,11 +197,15 @@ func (bcR *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) bcR.pool.AddBlock(src.ID(), msg.Block, len(msgBytes)) case *bcStatusRequestMessage: // Send peer our state. - msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()}) - src.TrySend(BlockchainChannel, msgBytes) + src.TrySend(BlockchainChannel, cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{ + Height: bcR.store.Height(), + Base: bcR.store.Base(), + })) case *bcStatusResponseMessage: // Got a peer status. Unverified. - bcR.pool.SetPeerHeight(src.ID(), msg.Height) + bcR.pool.SetPeerRange(src.ID(), msg.Base, msg.Height) + case *bcNoBlockResponseMessage: + bcR.Logger.Debug("Peer does not have requested block", "peer", src, "height", msg.Height) default: bcR.Logger.Error(fmt.Sprintf("Unknown message type %v", reflect.TypeOf(msg))) } @@ -310,7 +315,7 @@ FOR_LOOP: // NOTE: we can probably make this more efficient, but note that calling // first.Hash() doesn't verify the tx contents, so MakePartSet() is // currently necessary. - err := state.Validators.VerifyCommit( + err := state.Voters.VerifyCommit( chainID, firstID, first.Height, second.LastCommit) if err != nil { bcR.Logger.Error("Error in validation", "err", err) @@ -338,7 +343,7 @@ FOR_LOOP: // TODO: same thing for app - but we would need a way to // get the hash without persisting the state var err error - state, err = bcR.blockExec.ApplyBlock(state, firstID, first) + state, _, err = bcR.blockExec.ApplyBlock(state, firstID, first) if err != nil { // TODO This is bad, are we zombie? panic(fmt.Sprintf("Failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err)) @@ -360,9 +365,12 @@ FOR_LOOP: } } -// BroadcastStatusRequest broadcasts `BlockStore` height. +// BroadcastStatusRequest broadcasts `BlockStore` base and height. func (bcR *BlockchainReactor) BroadcastStatusRequest() error { - msgBytes := cdc.MustMarshalBinaryBare(&bcStatusRequestMessage{bcR.store.Height()}) + msgBytes := cdc.MustMarshalBinaryBare(&bcStatusRequestMessage{ + Base: bcR.store.Base(), + Height: bcR.store.Height(), + }) bcR.Switch.Broadcast(BlockchainChannel, msgBytes) return nil } @@ -446,34 +454,48 @@ func (m *bcBlockResponseMessage) String() string { type bcStatusRequestMessage struct { Height int64 + Base int64 } // ValidateBasic performs basic validation. func (m *bcStatusRequestMessage) ValidateBasic() error { + if m.Base < 0 { + return errors.New("negative Base") + } if m.Height < 0 { return errors.New("negative Height") } + if m.Base > m.Height { + return fmt.Errorf("base %v cannot be greater than height %v", m.Base, m.Height) + } return nil } func (m *bcStatusRequestMessage) String() string { - return fmt.Sprintf("[bcStatusRequestMessage %v]", m.Height) + return fmt.Sprintf("[bcStatusRequestMessage %v:%v]", m.Base, m.Height) } //------------------------------------- type bcStatusResponseMessage struct { Height int64 + Base int64 } // ValidateBasic performs basic validation. func (m *bcStatusResponseMessage) ValidateBasic() error { + if m.Base < 0 { + return errors.New("negative Base") + } if m.Height < 0 { return errors.New("negative Height") } + if m.Base > m.Height { + return fmt.Errorf("base %v cannot be greater than height %v", m.Base, m.Height) + } return nil } func (m *bcStatusResponseMessage) String() string { - return fmt.Sprintf("[bcStatusResponseMessage %v]", m.Height) + return fmt.Sprintf("[bcStatusResponseMessage %v:%v]", m.Base, m.Height) } diff --git a/blockchain/v0/reactor_test.go b/blockchain/v0/reactor_test.go index 334cdf942..362e1a6b4 100644 --- a/blockchain/v0/reactor_test.go +++ b/blockchain/v0/reactor_test.go @@ -7,10 +7,10 @@ import ( "time" "github.com/pkg/errors" - "github.com/tendermint/tendermint/store" - "github.com/stretchr/testify/assert" + dbm "github.com/tendermint/tm-db" + abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/log" @@ -18,9 +18,9 @@ import ( "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/proxy" sm "github.com/tendermint/tendermint/state" + "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" - dbm "github.com/tendermint/tm-db" ) var config *cfg.Config @@ -32,7 +32,7 @@ func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.G val, privVal := types.RandValidator(randPower, minPower) validators[i] = types.GenesisValidator{ PubKey: val.PubKey, - Power: val.VotingPower, + Power: val.StakingPower, } privValidators[i] = privVal } @@ -112,7 +112,7 @@ func newBlockchainReactor( thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes) blockID := types.BlockID{Hash: thisBlock.Hash(), PartsHeader: thisParts.Header()} - state, err = blockExec.ApplyBlock(state, blockID, thisBlock) + state, _, err = blockExec.ApplyBlock(state, blockID, thisBlock) if err != nil { panic(errors.Wrap(err, "error apply block")) } @@ -351,7 +351,7 @@ func makeBlock(privVal types.PrivValidator, height int64, state sm.State, lastCo message := state.MakeHashMessage(0) proof, _ := privVal.GenerateVRFProof(message) block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, - types.SelectProposer(state.Validators, state.LastProofHash, height, 0).Address, 0, proof) + state.Validators.SelectProposer(state.LastProofHash, height, 0).Address, 0, proof) return block } diff --git a/blockchain/v1/codec.go b/blockchain/v1/codec.go index 786584435..ce4f7dfab 100644 --- a/blockchain/v1/codec.go +++ b/blockchain/v1/codec.go @@ -2,6 +2,7 @@ package v1 import ( amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/types" ) diff --git a/blockchain/v1/peer.go b/blockchain/v1/peer.go index 02b1b4fc1..ad26585b3 100644 --- a/blockchain/v1/peer.go +++ b/blockchain/v1/peer.go @@ -27,6 +27,7 @@ type BpPeer struct { logger log.Logger ID p2p.ID + Base int64 // the peer reported base Height int64 // the peer reported height NumPendingBlockRequests int // number of requests still waiting for block responses blocks map[int64]*types.Block // blocks received or expected to be received from this peer @@ -38,14 +39,15 @@ type BpPeer struct { } // NewBpPeer creates a new peer. -func NewBpPeer( - peerID p2p.ID, height int64, onErr func(err error, peerID p2p.ID), params *BpPeerParams) *BpPeer { +func NewBpPeer(peerID p2p.ID, base int64, height int64, + onErr func(err error, peerID p2p.ID), params *BpPeerParams) *BpPeer { if params == nil { params = BpPeerDefaultParams() } return &BpPeer{ ID: peerID, + Base: base, Height: height, blocks: make(map[int64]*types.Block, maxRequestsPerPeer), logger: log.NewNopLogger(), diff --git a/blockchain/v1/peer_test.go b/blockchain/v1/peer_test.go index aac03db7e..7aa17ef15 100644 --- a/blockchain/v1/peer_test.go +++ b/blockchain/v1/peer_test.go @@ -16,7 +16,7 @@ import ( func TestPeerMonitor(t *testing.T) { peer := NewBpPeer( - p2p.ID(tmrand.Str(12)), 10, + p2p.ID(tmrand.Str(12)), 0, 10, func(err error, _ p2p.ID) {}, nil) peer.SetLogger(log.TestingLogger()) @@ -32,10 +32,10 @@ func TestPeerResetBlockResponseTimer(t *testing.T) { lastErr error // last generated error peerTestMtx sync.Mutex // modifications of ^^ variables are also done from timer handler goroutine ) - params := &BpPeerParams{timeout: 2 * time.Millisecond} + params := &BpPeerParams{timeout: 5 * time.Millisecond} peer := NewBpPeer( - p2p.ID(tmrand.Str(12)), 10, + p2p.ID(tmrand.Str(12)), 0, 10, func(err error, _ p2p.ID) { peerTestMtx.Lock() defer peerTestMtx.Unlock() @@ -60,7 +60,7 @@ func TestPeerResetBlockResponseTimer(t *testing.T) { assert.NotNil(t, peer.blockResponseTimer) // let the timer expire and ... - time.Sleep(3 * time.Millisecond) + time.Sleep(7 * time.Millisecond) // ... check timer is not running checkByStoppingPeerTimer(t, peer, false) @@ -75,7 +75,7 @@ func TestPeerRequestSent(t *testing.T) { params := &BpPeerParams{timeout: 2 * time.Millisecond} peer := NewBpPeer( - p2p.ID(tmrand.Str(12)), 10, + p2p.ID(tmrand.Str(12)), 0, 10, func(err error, _ p2p.ID) {}, params) @@ -94,7 +94,7 @@ func TestPeerRequestSent(t *testing.T) { func TestPeerGetAndRemoveBlock(t *testing.T) { peer := NewBpPeer( - p2p.ID(tmrand.Str(12)), 100, + p2p.ID(tmrand.Str(12)), 0, 100, func(err error, _ p2p.ID) {}, nil) @@ -142,7 +142,7 @@ func TestPeerGetAndRemoveBlock(t *testing.T) { func TestPeerAddBlock(t *testing.T) { peer := NewBpPeer( - p2p.ID(tmrand.Str(12)), 100, + p2p.ID(tmrand.Str(12)), 0, 100, func(err error, _ p2p.ID) {}, nil) @@ -189,7 +189,7 @@ func TestPeerOnErrFuncCalledDueToExpiration(t *testing.T) { ) peer := NewBpPeer( - p2p.ID(tmrand.Str(12)), 10, + p2p.ID(tmrand.Str(12)), 0, 10, func(err error, _ p2p.ID) { peerTestMtx.Lock() defer peerTestMtx.Unlock() @@ -215,7 +215,7 @@ func TestPeerCheckRate(t *testing.T) { minRecvRate: int64(100), // 100 bytes/sec exponential moving average } peer := NewBpPeer( - p2p.ID(tmrand.Str(12)), 10, + p2p.ID(tmrand.Str(12)), 0, 10, func(err error, _ p2p.ID) {}, params) peer.SetLogger(log.TestingLogger()) @@ -249,7 +249,7 @@ func TestPeerCleanup(t *testing.T) { params := &BpPeerParams{timeout: 2 * time.Millisecond} peer := NewBpPeer( - p2p.ID(tmrand.Str(12)), 10, + p2p.ID(tmrand.Str(12)), 0, 10, func(err error, _ p2p.ID) {}, params) peer.SetLogger(log.TestingLogger()) diff --git a/blockchain/v1/pool.go b/blockchain/v1/pool.go index be2edbc21..27e0f3a04 100644 --- a/blockchain/v1/pool.go +++ b/blockchain/v1/pool.go @@ -66,9 +66,9 @@ func (pool *BlockPool) updateMaxPeerHeight() { pool.MaxPeerHeight = newMax } -// UpdatePeer adds a new peer or updates an existing peer with a new height. +// UpdatePeer adds a new peer or updates an existing peer with a new base and height. // If a peer is short it is not added. -func (pool *BlockPool) UpdatePeer(peerID p2p.ID, height int64) error { +func (pool *BlockPool) UpdatePeer(peerID p2p.ID, base int64, height int64) error { peer := pool.peers[peerID] @@ -79,10 +79,10 @@ func (pool *BlockPool) UpdatePeer(peerID p2p.ID, height int64) error { return errPeerTooShort } // Add new peer. - peer = NewBpPeer(peerID, height, pool.toBcR.sendPeerError, nil) + peer = NewBpPeer(peerID, base, height, pool.toBcR.sendPeerError, nil) peer.SetLogger(pool.logger.With("peer", peerID)) pool.peers[peerID] = peer - pool.logger.Info("added peer", "peerID", peerID, "height", height, "num_peers", len(pool.peers)) + pool.logger.Info("added peer", "peerID", peerID, "base", base, "height", height, "num_peers", len(pool.peers)) } else { // Check if peer is lowering its height. This is not allowed. if height < peer.Height { @@ -90,6 +90,7 @@ func (pool *BlockPool) UpdatePeer(peerID p2p.ID, height int64) error { return errPeerLowersItsHeight } // Update existing peer. + peer.Base = base peer.Height = height } @@ -213,7 +214,7 @@ func (pool *BlockPool) sendRequest(height int64) bool { if peer.NumPendingBlockRequests >= maxRequestsPerPeer { continue } - if peer.Height < height { + if peer.Base > height || peer.Height < height { continue } diff --git a/blockchain/v1/pool_test.go b/blockchain/v1/pool_test.go index e612eb43e..31b9d09f7 100644 --- a/blockchain/v1/pool_test.go +++ b/blockchain/v1/pool_test.go @@ -13,6 +13,7 @@ import ( type testPeer struct { id p2p.ID + base int64 height int64 } @@ -70,7 +71,7 @@ func makeBlockPool(bcr *testBcR, height int64, peers []BpPeer, blocks map[int64] if p.Height > maxH { maxH = p.Height } - bPool.peers[p.ID] = NewBpPeer(p.ID, p.Height, bcr.sendPeerError, nil) + bPool.peers[p.ID] = NewBpPeer(p.ID, p.Base, p.Height, bcr.sendPeerError, nil) bPool.peers[p.ID].SetLogger(bcr.logger) } @@ -93,6 +94,7 @@ func assertPeerSetsEquivalent(t *testing.T, set1 map[p2p.ID]*BpPeer, set2 map[p2 assert.NotNil(t, peer2) assert.Equal(t, peer1.NumPendingBlockRequests, peer2.NumPendingBlockRequests) assert.Equal(t, peer1.Height, peer2.Height) + assert.Equal(t, peer1.Base, peer2.Base) assert.Equal(t, len(peer1.blocks), len(peer2.blocks)) for h, block1 := range peer1.blocks { block2 := peer2.blocks[h] @@ -123,26 +125,32 @@ func TestBlockPoolUpdatePeer(t *testing.T) { { name: "add a first short peer", pool: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}), - args: testPeer{"P1", 50}, + args: testPeer{"P1", 0, 50}, errWanted: errPeerTooShort, poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}), }, { name: "add a first good peer", pool: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}), - args: testPeer{"P1", 101}, + args: testPeer{"P1", 0, 101}, poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 101}}, map[int64]tPBlocks{}), }, + { + name: "add a first good peer with base", + pool: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}), + args: testPeer{"P1", 10, 101}, + poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Base: 10, Height: 101}}, map[int64]tPBlocks{}), + }, { name: "increase the height of P1 from 120 to 123", pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}), - args: testPeer{"P1", 123}, + args: testPeer{"P1", 0, 123}, poolWanted: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 123}}, map[int64]tPBlocks{}), }, { name: "decrease the height of P1 from 120 to 110", pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 120}}, map[int64]tPBlocks{}), - args: testPeer{"P1", 110}, + args: testPeer{"P1", 0, 110}, errWanted: errPeerLowersItsHeight, poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}), }, @@ -151,7 +159,7 @@ func TestBlockPoolUpdatePeer(t *testing.T) { pool: makeBlockPool(testBcR, 100, []BpPeer{{ID: "P1", Height: 105}}, map[int64]tPBlocks{ 100: {"P1", true}, 101: {"P1", true}, 102: {"P1", true}}), - args: testPeer{"P1", 102}, + args: testPeer{"P1", 0, 102}, errWanted: errPeerLowersItsHeight, poolWanted: makeBlockPool(testBcR, 100, []BpPeer{}, map[int64]tPBlocks{}), @@ -162,7 +170,7 @@ func TestBlockPoolUpdatePeer(t *testing.T) { tt := tt t.Run(tt.name, func(t *testing.T) { pool := tt.pool - err := pool.UpdatePeer(tt.args.id, tt.args.height) + err := pool.UpdatePeer(tt.args.id, tt.args.base, tt.args.height) assert.Equal(t, tt.errWanted, err) assert.Equal(t, tt.poolWanted.blocks, tt.pool.blocks) assertPeerSetsEquivalent(t, tt.poolWanted.peers, tt.pool.peers) @@ -300,20 +308,34 @@ func TestBlockPoolSendRequestBatch(t *testing.T) { testBcR := newTestBcR() tests := []struct { - name string - pool *BlockPool - maxRequestsPerPeer int - expRequests map[int64]bool - expPeerResults []testPeerResult - expnumPendingBlockRequests int + name string + pool *BlockPool + maxRequestsPerPeer int + expRequests map[int64]bool + expRequestsSent int + expPeerResults []testPeerResult }{ { - name: "one peer - send up to maxRequestsPerPeer block requests", - pool: makeBlockPool(testBcR, 10, []BpPeer{{ID: "P1", Height: 100}}, map[int64]tPBlocks{}), - maxRequestsPerPeer: 2, - expRequests: map[int64]bool{10: true, 11: true}, - expPeerResults: []testPeerResult{{id: "P1", numPendingBlockRequests: 2}}, - expnumPendingBlockRequests: 2, + name: "one peer - send up to maxRequestsPerPeer block requests", + pool: makeBlockPool(testBcR, 10, []BpPeer{{ID: "P1", Height: 100}}, map[int64]tPBlocks{}), + maxRequestsPerPeer: 2, + expRequests: map[int64]bool{10: true, 11: true}, + expRequestsSent: 2, + expPeerResults: []testPeerResult{{id: "P1", numPendingBlockRequests: 2}}, + }, + { + name: "multiple peers - stops at gap between height and base", + pool: makeBlockPool(testBcR, 10, []BpPeer{ + {ID: "P1", Base: 1, Height: 12}, + {ID: "P2", Base: 15, Height: 100}, + }, map[int64]tPBlocks{}), + maxRequestsPerPeer: 10, + expRequests: map[int64]bool{10: true, 11: true, 12: true}, + expRequestsSent: 3, + expPeerResults: []testPeerResult{ + {id: "P1", numPendingBlockRequests: 3}, + {id: "P2", numPendingBlockRequests: 0}, + }, }, { name: "n peers - send n*maxRequestsPerPeer block requests", @@ -324,10 +346,10 @@ func TestBlockPoolSendRequestBatch(t *testing.T) { map[int64]tPBlocks{}), maxRequestsPerPeer: 2, expRequests: map[int64]bool{10: true, 11: true}, + expRequestsSent: 4, expPeerResults: []testPeerResult{ {id: "P1", numPendingBlockRequests: 2}, {id: "P2", numPendingBlockRequests: 2}}, - expnumPendingBlockRequests: 4, }, } @@ -339,15 +361,13 @@ func TestBlockPoolSendRequestBatch(t *testing.T) { var pool = tt.pool maxRequestsPerPeer = tt.maxRequestsPerPeer pool.MakeNextRequests(10) - assert.Equal(t, testResults.numRequestsSent, maxRequestsPerPeer*len(pool.peers)) + assert.Equal(t, tt.expRequestsSent, testResults.numRequestsSent) for _, tPeer := range tt.expPeerResults { var peer = pool.peers[tPeer.id] assert.NotNil(t, peer) assert.Equal(t, tPeer.numPendingBlockRequests, peer.NumPendingBlockRequests) } - assert.Equal(t, testResults.numRequestsSent, maxRequestsPerPeer*len(pool.peers)) - }) } } diff --git a/blockchain/v1/reactor.go b/blockchain/v1/reactor.go index 1aba26b35..d716159bd 100644 --- a/blockchain/v1/reactor.go +++ b/blockchain/v1/reactor.go @@ -7,6 +7,7 @@ import ( "time" amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/behaviour" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/p2p" @@ -168,7 +169,10 @@ func (bcR *BlockchainReactor) GetChannels() []*p2p.ChannelDescriptor { // AddPeer implements Reactor by sending our state to peer. func (bcR *BlockchainReactor) AddPeer(peer p2p.Peer) { - msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()}) + msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{ + Base: bcR.store.Base(), + Height: bcR.store.Height(), + }) peer.Send(BlockchainChannel, msgBytes) // it's OK if send fails. will try later in poolRoutine @@ -195,7 +199,10 @@ func (bcR *BlockchainReactor) sendBlockToPeer(msg *bcBlockRequestMessage, } func (bcR *BlockchainReactor) sendStatusResponseToPeer(msg *bcStatusRequestMessage, src p2p.Peer) (queued bool) { - msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{bcR.store.Height()}) + msgBytes := cdc.MustMarshalBinaryBare(&bcStatusResponseMessage{ + Base: bcR.store.Base(), + Height: bcR.store.Height(), + }) return src.TrySend(BlockchainChannel, msgBytes) } @@ -420,7 +427,7 @@ func (bcR *BlockchainReactor) processBlock() error { // NOTE: we can probably make this more efficient, but note that calling // first.Hash() doesn't verify the tx contents, so MakePartSet() is // currently necessary. - err = bcR.state.Validators.VerifyCommit(chainID, firstID, first.Height, second.LastCommit) + err = bcR.state.Voters.VerifyCommit(chainID, firstID, first.Height, second.LastCommit) if err != nil { bcR.Logger.Error("error during commit verification", "err", err, "first", first.Height, "second", second.Height) @@ -429,7 +436,7 @@ func (bcR *BlockchainReactor) processBlock() error { bcR.store.SaveBlock(first, firstParts, second.LastCommit) - bcR.state, err = bcR.blockExec.ApplyBlock(bcR.state, firstID, first) + bcR.state, _, err = bcR.blockExec.ApplyBlock(bcR.state, firstID, first) if err != nil { panic(fmt.Sprintf("failed to process committed block (%d:%X): %v", first.Height, first.Hash(), err)) } @@ -440,7 +447,10 @@ func (bcR *BlockchainReactor) processBlock() error { // Implements bcRNotifier // sendStatusRequest broadcasts `BlockStore` height. func (bcR *BlockchainReactor) sendStatusRequest() { - msgBytes := cdc.MustMarshalBinaryBare(&bcStatusRequestMessage{bcR.store.Height()}) + msgBytes := cdc.MustMarshalBinaryBare(&bcStatusRequestMessage{ + Base: bcR.store.Base(), + Height: bcR.store.Height(), + }) bcR.Switch.Broadcast(BlockchainChannel, msgBytes) } @@ -589,6 +599,7 @@ func (m *bcBlockResponseMessage) String() string { type bcStatusRequestMessage struct { Height int64 + Base int64 } // ValidateBasic performs basic validation. @@ -596,17 +607,24 @@ func (m *bcStatusRequestMessage) ValidateBasic() error { if m.Height < 0 { return errors.New("negative Height") } + if m.Base < 0 { + return errors.New("negative Base") + } + if m.Base > m.Height { + return fmt.Errorf("base %v cannot be greater than height %v", m.Base, m.Height) + } return nil } func (m *bcStatusRequestMessage) String() string { - return fmt.Sprintf("[bcStatusRequestMessage %v]", m.Height) + return fmt.Sprintf("[bcStatusRequestMessage %v:%v]", m.Base, m.Height) } //------------------------------------- type bcStatusResponseMessage struct { Height int64 + Base int64 } // ValidateBasic performs basic validation. @@ -614,9 +632,15 @@ func (m *bcStatusResponseMessage) ValidateBasic() error { if m.Height < 0 { return errors.New("negative Height") } + if m.Base < 0 { + return errors.New("negative Base") + } + if m.Base > m.Height { + return fmt.Errorf("base %v cannot be greater than height %v", m.Base, m.Height) + } return nil } func (m *bcStatusResponseMessage) String() string { - return fmt.Sprintf("[bcStatusResponseMessage %v]", m.Height) + return fmt.Sprintf("[bcStatusResponseMessage %v:%v]", m.Base, m.Height) } diff --git a/blockchain/v1/reactor_fsm.go b/blockchain/v1/reactor_fsm.go index 8d3a363ae..0f65f9d66 100644 --- a/blockchain/v1/reactor_fsm.go +++ b/blockchain/v1/reactor_fsm.go @@ -58,6 +58,7 @@ func NewFSM(height int64, toBcR bcReactor) *BcReactorFSM { type bReactorEventData struct { peerID p2p.ID err error // for peer error: timeout, slow; for processed block event if error occurred + base int64 // for status response height int64 // for status response; for processed block event block *types.Block // for block response stateName string // for state timeout events @@ -89,7 +90,7 @@ func (msg *bcReactorMessage) String() string { case startFSMEv: dataStr = "" case statusResponseEv: - dataStr = fmt.Sprintf("peer=%v height=%v", msg.data.peerID, msg.data.height) + dataStr = fmt.Sprintf("peer=%v base=%v height=%v", msg.data.peerID, msg.data.base, msg.data.height) case blockResponseEv: dataStr = fmt.Sprintf("peer=%v block.height=%v length=%v", msg.data.peerID, msg.data.block.Height, msg.data.length) @@ -213,7 +214,7 @@ func init() { return finished, errNoTallerPeer case statusResponseEv: - if err := fsm.pool.UpdatePeer(data.peerID, data.height); err != nil { + if err := fsm.pool.UpdatePeer(data.peerID, data.base, data.height); err != nil { if fsm.pool.NumPeers() == 0 { return waitForPeer, err } @@ -246,7 +247,7 @@ func init() { switch ev { case statusResponseEv: - err := fsm.pool.UpdatePeer(data.peerID, data.height) + err := fsm.pool.UpdatePeer(data.peerID, data.base, data.height) if fsm.pool.NumPeers() == 0 { return waitForPeer, err } diff --git a/blockchain/v1/reactor_fsm_test.go b/blockchain/v1/reactor_fsm_test.go index f51defb51..5980ceb08 100644 --- a/blockchain/v1/reactor_fsm_test.go +++ b/blockchain/v1/reactor_fsm_test.go @@ -6,6 +6,7 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/libs/log" tmmath "github.com/tendermint/tendermint/libs/math" tmrand "github.com/tendermint/tendermint/libs/rand" diff --git a/blockchain/v1/reactor_test.go b/blockchain/v1/reactor_test.go index 2d1f5f130..a402dfb2d 100644 --- a/blockchain/v1/reactor_test.go +++ b/blockchain/v1/reactor_test.go @@ -10,6 +10,10 @@ import ( "github.com/pkg/errors" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + dbm "github.com/tendermint/tm-db" + abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/log" @@ -20,7 +24,6 @@ import ( "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" - dbm "github.com/tendermint/tm-db" ) var config *cfg.Config @@ -32,7 +35,7 @@ func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.G val, privVal := types.RandValidator(randPower, minPower) validators[i] = types.GenesisValidator{ PubKey: val.PubKey, - Power: val.VotingPower, + Power: val.StakingPower, } privValidators[i] = privVal } @@ -46,15 +49,19 @@ func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.G } func makeVote( + t *testing.T, header *types.Header, blockID types.BlockID, valset *types.ValidatorSet, privVal types.PrivValidator) *types.Vote { - addr := privVal.GetPubKey().Address() - idx, _ := valset.GetByAddress(addr) + + pubKey, err := privVal.GetPubKey() + require.NoError(t, err) + + valIdx, _ := valset.GetByAddress(pubKey.Address()) vote := &types.Vote{ - ValidatorAddress: addr, - ValidatorIndex: idx, + ValidatorAddress: pubKey.Address(), + ValidatorIndex: valIdx, Height: header.Height, Round: 1, Timestamp: tmtime.Now(), @@ -73,6 +80,7 @@ type BlockchainReactorPair struct { } func newBlockchainReactor( + t *testing.T, logger log.Logger, genDoc *types.GenesisDoc, privVals []types.PrivValidator, @@ -114,7 +122,7 @@ func newBlockchainReactor( lastBlockMeta := blockStore.LoadBlockMeta(blockHeight - 1) lastBlock := blockStore.LoadBlock(blockHeight - 1) - vote := makeVote(&lastBlock.Header, lastBlockMeta.BlockID, state.Validators, privVals[0]) + vote := makeVote(t, &lastBlock.Header, lastBlockMeta.BlockID, state.Validators, privVals[0]) lastCommit = types.NewCommit(vote.Height, vote.Round, lastBlockMeta.BlockID, []types.CommitSig{vote.CommitSig()}) } @@ -123,7 +131,7 @@ func newBlockchainReactor( thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes) blockID := types.BlockID{Hash: thisBlock.Hash(), PartsHeader: thisParts.Header()} - state, err = blockExec.ApplyBlock(state, blockID, thisBlock) + state, _, err = blockExec.ApplyBlock(state, blockID, thisBlock) if err != nil { panic(errors.Wrap(err, "error apply block")) } @@ -138,6 +146,7 @@ func newBlockchainReactor( } func newBlockchainReactorPair( + t *testing.T, logger log.Logger, genDoc *types.GenesisDoc, privVals []types.PrivValidator, @@ -147,7 +156,7 @@ func newBlockchainReactorPair( consensusReactor.BaseReactor = *p2p.NewBaseReactor("Consensus reactor", consensusReactor) return BlockchainReactorPair{ - newBlockchainReactor(logger, genDoc, privVals, maxBlockHeight), + newBlockchainReactor(t, logger, genDoc, privVals, maxBlockHeight), consensusReactor} } @@ -164,7 +173,6 @@ func (conR *consensusReactorTest) SwitchToConsensus(state sm.State, blocksSynced } func TestFastSyncNoBlockResponse(t *testing.T) { - config = cfg.ResetTestRoot("blockchain_new_reactor_test") defer os.RemoveAll(config.RootDir) genDoc, privVals := randGenesisDoc(1, false, 30) @@ -174,8 +182,8 @@ func TestFastSyncNoBlockResponse(t *testing.T) { reactorPairs := make([]BlockchainReactorPair, 2) logger := log.TestingLogger() - reactorPairs[0] = newBlockchainReactorPair(logger, genDoc, privVals, maxBlockHeight) - reactorPairs[1] = newBlockchainReactorPair(logger, genDoc, privVals, 0) + reactorPairs[0] = newBlockchainReactorPair(t, logger, genDoc, privVals, maxBlockHeight) + reactorPairs[1] = newBlockchainReactorPair(t, logger, genDoc, privVals, 0) p2p.MakeConnectedSwitches(config.P2P, 2, func(i int, s *p2p.Switch) *p2p.Switch { s.AddReactor("BLOCKCHAIN", reactorPairs[i].bcR) @@ -239,7 +247,7 @@ func TestFastSyncBadBlockStopsPeer(t *testing.T) { defer os.RemoveAll(config.RootDir) genDoc, privVals := randGenesisDoc(1, false, 30) - otherChain := newBlockchainReactorPair(log.TestingLogger(), genDoc, privVals, maxBlockHeight) + otherChain := newBlockchainReactorPair(t, log.TestingLogger(), genDoc, privVals, maxBlockHeight) defer func() { _ = otherChain.bcR.Stop() _ = otherChain.conR.Stop() @@ -254,7 +262,7 @@ func TestFastSyncBadBlockStopsPeer(t *testing.T) { if i == 0 { height = maxBlockHeight } - reactorPairs[i] = newBlockchainReactorPair(logger[i], genDoc, privVals, height) + reactorPairs[i] = newBlockchainReactorPair(t, logger[i], genDoc, privVals, height) } switches := p2p.MakeConnectedSwitches(config.P2P, numNodes, func(i int, s *p2p.Switch) *p2p.Switch { @@ -296,7 +304,7 @@ outerFor: reactorPairs[numNodes-1].bcR.store = otherChain.bcR.store lastLogger := log.TestingLogger() - lastReactorPair := newBlockchainReactorPair(lastLogger, genDoc, privVals, 0) + lastReactorPair := newBlockchainReactorPair(t, lastLogger, genDoc, privVals, 0) reactorPairs = append(reactorPairs, lastReactorPair) switches = append(switches, p2p.MakeConnectedSwitches(config.P2P, 1, func(i int, s *p2p.Switch) *p2p.Switch { @@ -423,7 +431,7 @@ func makeBlock(privVal types.PrivValidator, height int64, state sm.State, lastCo message := state.MakeHashMessage(0) proof, _ := privVal.GenerateVRFProof(message) block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, - types.SelectProposer(state.Validators, state.LastProofHash, height, 0).Address, 0, proof) + state.Validators.SelectProposer(state.LastProofHash, height, 0).Address, 0, proof) return block } diff --git a/blockchain/v2/codec.go b/blockchain/v2/codec.go index f970d115f..4e92846c4 100644 --- a/blockchain/v2/codec.go +++ b/blockchain/v2/codec.go @@ -2,6 +2,7 @@ package v2 import ( amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/types" ) diff --git a/blockchain/v2/io.go b/blockchain/v2/io.go index 3db48c8c0..32cf3aeaf 100644 --- a/blockchain/v2/io.go +++ b/blockchain/v2/io.go @@ -14,7 +14,7 @@ type iIO interface { sendBlockNotFound(height int64, peerID p2p.ID) error sendStatusResponse(height int64, peerID p2p.ID) error - broadcastStatusRequest(height int64) + broadcastStatusRequest(base int64, height int64) trySwitchToConsensus(state state.State, blocksSynced int) } @@ -104,8 +104,14 @@ func (sio *switchIO) trySwitchToConsensus(state state.State, blocksSynced int) { } } -func (sio *switchIO) broadcastStatusRequest(height int64) { - msgBytes := cdc.MustMarshalBinaryBare(&bcStatusRequestMessage{height}) +func (sio *switchIO) broadcastStatusRequest(base int64, height int64) { + if height == 0 && base > 0 { + base = 0 + } + msgBytes := cdc.MustMarshalBinaryBare(&bcStatusRequestMessage{ + Base: base, + Height: height, + }) // XXX: maybe we should use an io specific peer list here sio.sw.Broadcast(BlockchainChannel, msgBytes) } diff --git a/blockchain/v2/processor_context.go b/blockchain/v2/processor_context.go index 7e96a3a69..b693ceb6e 100644 --- a/blockchain/v2/processor_context.go +++ b/blockchain/v2/processor_context.go @@ -29,7 +29,7 @@ func newProcessorContext(st blockStore, ex blockApplier, s state.State) *pContex } func (pc *pContext) applyBlock(blockID types.BlockID, block *types.Block) error { - newState, err := pc.applier.ApplyBlock(pc.state, blockID, block) + newState, _, err := pc.applier.ApplyBlock(pc.state, blockID, block) pc.state = newState return err } @@ -39,7 +39,7 @@ func (pc pContext) tmState() state.State { } func (pc pContext) verifyCommit(chainID string, blockID types.BlockID, height int64, commit *types.Commit) error { - return pc.state.Validators.VerifyCommit(chainID, blockID, height, commit) + return pc.state.Voters.VerifyCommit(chainID, blockID, height, commit) } func (pc *pContext) saveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { diff --git a/blockchain/v2/processor_test.go b/blockchain/v2/processor_test.go index fc35c4c72..6bc36b2d3 100644 --- a/blockchain/v2/processor_test.go +++ b/blockchain/v2/processor_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/p2p" tmState "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" diff --git a/blockchain/v2/reactor.go b/blockchain/v2/reactor.go index 767e59819..ff89ee94c 100644 --- a/blockchain/v2/reactor.go +++ b/blockchain/v2/reactor.go @@ -7,6 +7,7 @@ import ( "time" "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/behaviour" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/p2p" @@ -71,41 +72,56 @@ func (m *bcBlockResponseMessage) String() string { type bcStatusRequestMessage struct { Height int64 + Base int64 } // ValidateBasic performs basic validation. func (m *bcStatusRequestMessage) ValidateBasic() error { + if m.Base < 0 { + return errors.New("negative Base") + } if m.Height < 0 { return errors.New("negative Height") } + if m.Base > m.Height { + return fmt.Errorf("base %v cannot be greater than height %v", m.Base, m.Height) + } return nil } func (m *bcStatusRequestMessage) String() string { - return fmt.Sprintf("[bcStatusRequestMessage %v]", m.Height) + return fmt.Sprintf("[bcStatusRequestMessage %v:%v]", m.Base, m.Height) } //------------------------------------- type bcStatusResponseMessage struct { Height int64 + Base int64 } // ValidateBasic performs basic validation. func (m *bcStatusResponseMessage) ValidateBasic() error { + if m.Base < 0 { + return errors.New("negative Base") + } if m.Height < 0 { return errors.New("negative Height") } + if m.Base > m.Height { + return fmt.Errorf("base %v cannot be greater than height %v", m.Base, m.Height) + } return nil } func (m *bcStatusResponseMessage) String() string { - return fmt.Sprintf("[bcStatusResponseMessage %v]", m.Height) + return fmt.Sprintf("[bcStatusResponseMessage %v:%v]", m.Base, m.Height) } type blockStore interface { LoadBlock(height int64) *types.Block SaveBlock(*types.Block, *types.PartSet, *types.Commit) + Base() int64 Height() int64 } @@ -135,7 +151,7 @@ type blockVerifier interface { //nolint:deadcode type blockApplier interface { - ApplyBlock(state state.State, blockID types.BlockID, block *types.Block) (state.State, error) + ApplyBlock(state state.State, blockID types.BlockID, block *types.Block) (state.State, int64, error) } // XXX: unify naming in this package around tmState @@ -171,12 +187,12 @@ func NewBlockchainReactor( // SetSwitch implements Reactor interface. func (r *BlockchainReactor) SetSwitch(sw *p2p.Switch) { - if sw == nil { - panic("set nil switch") - } - r.Switch = sw - r.io = newSwitchIo(sw) + if sw != nil { + r.io = newSwitchIo(sw) + } else { + r.io = nil + } } func (r *BlockchainReactor) setMaxPeerHeight(height int64) { @@ -265,6 +281,7 @@ type bcStatusResponse struct { priorityNormal time time.Time peerID p2p.ID + base int64 height int64 } @@ -336,7 +353,7 @@ func (r *BlockchainReactor) demux() { case <-doProcessBlockCh: r.processor.send(rProcessBlock{}) case <-doStatusCh: - r.io.broadcastStatusRequest(r.SyncHeight()) + r.io.broadcastStatusRequest(r.store.Base(), r.SyncHeight()) // Events from peers case event := <-r.events: @@ -482,7 +499,7 @@ func (r *BlockchainReactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { } case *bcStatusResponseMessage: - r.events <- bcStatusResponse{peerID: src.ID(), height: msg.Height} + r.events <- bcStatusResponse{peerID: src.ID(), base: msg.Base, height: msg.Height} case *bcBlockResponseMessage: r.events <- bcBlockResponse{ diff --git a/blockchain/v2/reactor_test.go b/blockchain/v2/reactor_test.go index 081fcb4a5..636368b88 100644 --- a/blockchain/v2/reactor_test.go +++ b/blockchain/v2/reactor_test.go @@ -10,6 +10,8 @@ import ( "github.com/pkg/errors" "github.com/stretchr/testify/assert" + dbm "github.com/tendermint/tm-db" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/behaviour" cfg "github.com/tendermint/tendermint/config" @@ -23,7 +25,6 @@ import ( "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" - dbm "github.com/tendermint/tm-db" ) type mockPeer struct { @@ -76,9 +77,11 @@ type mockBlockApplier struct { } // XXX: Add whitelist/blacklist? -func (mba *mockBlockApplier) ApplyBlock(state sm.State, blockID types.BlockID, block *types.Block) (sm.State, error) { +func (mba *mockBlockApplier) ApplyBlock( + state sm.State, blockID types.BlockID, block *types.Block, +) (sm.State, int64, error) { state.LastBlockHeight++ - return state, nil + return state, 0, nil } type mockSwitchIo struct { @@ -127,7 +130,7 @@ func (sio *mockSwitchIo) hasSwitchedToConsensus() bool { return sio.switchedToConsensus } -func (sio *mockSwitchIo) broadcastStatusRequest(height int64) { +func (sio *mockSwitchIo) broadcastStatusRequest(base int64, height int64) { } type testReactorParams struct { @@ -347,7 +350,6 @@ func TestReactorHelperMode(t *testing.T) { var ( channelID = byte(0x40) ) - config := cfg.ResetTestRoot("blockchain_reactor_v2_test") defer os.RemoveAll(config.RootDir) genDoc, privVals := randGenesisDoc(config.ChainID(), 1, false, 30) @@ -415,6 +417,22 @@ func TestReactorHelperMode(t *testing.T) { } } +func TestReactorSetSwitchNil(t *testing.T) { + config := cfg.ResetTestRoot("blockchain_reactor_v2_test") + defer os.RemoveAll(config.RootDir) + genDoc, privVals := randGenesisDoc(config.ChainID(), 1, false, 30) + + reactor := newTestReactor(testReactorParams{ + logger: log.TestingLogger(), + genDoc: genDoc, + privVals: privVals, + }) + reactor.SetSwitch(nil) + + assert.Nil(t, reactor.Switch) + assert.Nil(t, reactor.io) +} + //---------------------------------------------- // utility funcs @@ -428,7 +446,7 @@ func makeTxs(height int64) (txs []types.Tx) { func makeBlock(privVal types.PrivValidator, height int64, state sm.State, lastCommit *types.Commit) *types.Block { message := state.MakeHashMessage(0) proof, _ := privVal.GenerateVRFProof(message) - proposerAddr := types.SelectProposer(state.Validators, state.LastProofHash, height, 0).Address + proposerAddr := state.Validators.SelectProposer(state.LastProofHash, height, 0).Address block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, proposerAddr, 0, proof) return block } @@ -445,7 +463,7 @@ func randGenesisDoc(chainID string, numValidators int, randPower bool, minPower val, privVal := types.RandValidator(randPower, minPower) validators[i] = types.GenesisValidator{ PubKey: val.PubKey, - Power: val.VotingPower, + Power: val.StakingPower, } privValidators[i] = privVal } @@ -514,7 +532,7 @@ func newReactorStore( thisParts := thisBlock.MakePartSet(types.BlockPartSizeBytes) blockID := types.BlockID{Hash: thisBlock.Hash(), PartsHeader: thisParts.Header()} - state, err = blockExec.ApplyBlock(state, blockID, thisBlock) + state, _, err = blockExec.ApplyBlock(state, blockID, thisBlock) if err != nil { panic(errors.Wrap(err, "error apply block")) } diff --git a/blockchain/v2/routine.go b/blockchain/v2/routine.go index 1a883c3c4..ff12bfebc 100644 --- a/blockchain/v2/routine.go +++ b/blockchain/v2/routine.go @@ -5,6 +5,7 @@ import ( "sync/atomic" "github.com/Workiva/go-datastructures/queue" + "github.com/tendermint/tendermint/libs/log" ) diff --git a/blockchain/v2/scheduler.go b/blockchain/v2/scheduler.go index 3cf0b2468..803955b22 100644 --- a/blockchain/v2/scheduler.go +++ b/blockchain/v2/scheduler.go @@ -111,20 +111,22 @@ type scPeer struct { // updated to Removed when peer is removed state peerState + base int64 // updated when statusResponse is received height int64 // updated when statusResponse is received lastTouched time.Time lastRate int64 // last receive rate in bytes } func (p scPeer) String() string { - return fmt.Sprintf("{state %v, height %d, lastTouched %v, lastRate %d, id %v}", - p.state, p.height, p.lastTouched, p.lastRate, p.peerID) + return fmt.Sprintf("{state %v, base %d, height %d, lastTouched %v, lastRate %d, id %v}", + p.state, p.base, p.height, p.lastTouched, p.lastRate, p.peerID) } func newScPeer(peerID p2p.ID) *scPeer { return &scPeer{ peerID: peerID, state: peerStateNew, + base: -1, height: -1, lastTouched: time.Time{}, } @@ -280,7 +282,7 @@ func (sc *scheduler) addNewBlocks() { } } -func (sc *scheduler) setPeerHeight(peerID p2p.ID, height int64) error { +func (sc *scheduler) setPeerRange(peerID p2p.ID, base int64, height int64) error { peer, ok := sc.peers[peerID] if !ok { return fmt.Errorf("cannot find peer %s", peerID) @@ -295,6 +297,11 @@ func (sc *scheduler) setPeerHeight(peerID p2p.ID, height int64) error { return fmt.Errorf("cannot move peer height lower. from %d to %d", peer.height, height) } + if base > height { + return fmt.Errorf("cannot set peer base higher than its height") + } + + peer.base = base peer.height = height peer.state = peerStateReady @@ -312,13 +319,13 @@ func (sc *scheduler) getStateAtHeight(height int64) blockState { } } -func (sc *scheduler) getPeersAtHeightOrAbove(height int64) []p2p.ID { +func (sc *scheduler) getPeersWithHeight(height int64) []p2p.ID { peers := make([]p2p.ID, 0) for _, peer := range sc.peers { if peer.state != peerStateReady { continue } - if peer.height >= height { + if peer.base <= height && peer.height >= height { peers = append(peers, peer.peerID) } } @@ -395,6 +402,11 @@ func (sc *scheduler) markPending(peerID p2p.ID, height int64, time time.Time) er height, peerID, peer.height) } + if height < peer.base { + return fmt.Errorf("cannot request height %d for peer %s with base %d", + height, peerID, peer.base) + } + sc.setStateAtHeight(height, blockStatePending) sc.pendingBlocks[height] = peerID sc.pendingTime[height] = time @@ -463,7 +475,7 @@ func (sc *scheduler) pendingFrom(peerID p2p.ID) []int64 { } func (sc *scheduler) selectPeer(height int64) (p2p.ID, error) { - peers := sc.getPeersAtHeightOrAbove(height) + peers := sc.getPeersWithHeight(height) if len(peers) == 0 { return "", fmt.Errorf("cannot find peer for height %d", height) } @@ -535,8 +547,8 @@ func (sc *scheduler) handleNoBlockResponse(event bcNoBlockResponse) (Event, erro _ = sc.removePeer(event.peerID) return scPeerError{peerID: event.peerID, - reason: fmt.Errorf("peer %v with height %d claims no block for %d", - event.peerID, peer.height, event.height)}, nil + reason: fmt.Errorf("peer %v with base %d height %d claims no block for %d", + event.peerID, peer.base, peer.height, event.height)}, nil } func (sc *scheduler) handleBlockProcessed(event pcBlockProcessed) (Event, error) { @@ -653,7 +665,7 @@ func (sc *scheduler) handleTrySchedule(event rTrySchedule) (Event, error) { } func (sc *scheduler) handleStatusResponse(event bcStatusResponse) (Event, error) { - err := sc.setPeerHeight(event.peerID, event.height) + err := sc.setPeerRange(event.peerID, event.base, event.height) if err != nil { return scPeerError{peerID: event.peerID, reason: err}, nil } diff --git a/blockchain/v2/scheduler_test.go b/blockchain/v2/scheduler_test.go index 445ba51a7..4ec81e123 100644 --- a/blockchain/v2/scheduler_test.go +++ b/blockchain/v2/scheduler_test.go @@ -8,6 +8,7 @@ import ( "time" "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/types" ) @@ -144,8 +145,8 @@ func TestScMaxHeights(t *testing.T) { sc: scheduler{ height: 1, peers: map[p2p.ID]*scPeer{ - "P1": {height: -1, state: peerStateNew}, - "P2": {height: -1, state: peerStateNew}}, + "P1": {base: -1, height: -1, state: peerStateNew}, + "P2": {base: -1, height: -1, state: peerStateNew}}, }, wantMax: 0, }, @@ -193,15 +194,15 @@ func TestScAddPeer(t *testing.T) { name: "add first peer", fields: scTestParams{}, args: args{peerID: "P1"}, - wantFields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1, state: peerStateNew}}}, + wantFields: scTestParams{peers: map[string]*scPeer{"P1": {base: -1, height: -1, state: peerStateNew}}}, }, { name: "add second peer", - fields: scTestParams{peers: map[string]*scPeer{"P1": {height: -1, state: peerStateNew}}}, + fields: scTestParams{peers: map[string]*scPeer{"P1": {base: -1, height: -1, state: peerStateNew}}}, args: args{peerID: "P2"}, wantFields: scTestParams{peers: map[string]*scPeer{ - "P1": {height: -1, state: peerStateNew}, - "P2": {height: -1, state: peerStateNew}}}, + "P1": {base: -1, height: -1, state: peerStateNew}, + "P2": {base: -1, height: -1, state: peerStateNew}}}, }, { name: "attempt to add duplicate peer", @@ -500,10 +501,11 @@ func TestScRemovePeer(t *testing.T) { } } -func TestScSetPeerHeight(t *testing.T) { +func TestScSetPeerRange(t *testing.T) { type args struct { peerID p2p.ID + base int64 height int64 } tests := []struct { @@ -575,13 +577,37 @@ func TestScSetPeerHeight(t *testing.T) { peers: map[string]*scPeer{"P2": {height: 10000000000, state: peerStateReady}}, allB: []int64{1, 2, 3, 4}}, }, + { + name: "add peer with base > height should error", + fields: scTestParams{ + peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, + allB: []int64{1, 2, 3, 4}}, + args: args{peerID: "P1", base: 6, height: 5}, + wantFields: scTestParams{ + peers: map[string]*scPeer{"P1": {height: 4, state: peerStateReady}}, + allB: []int64{1, 2, 3, 4}}, + wantErr: true, + }, + { + name: "add peer with base == height is fine", + fields: scTestParams{ + peers: map[string]*scPeer{"P1": {height: 4, state: peerStateNew}}, + targetPending: 4, + }, + args: args{peerID: "P1", base: 6, height: 6}, + wantFields: scTestParams{ + targetPending: 4, + peers: map[string]*scPeer{"P1": {base: 6, height: 6, state: peerStateReady}}, + allB: []int64{1, 2, 3, 4}}, + }, } for _, tt := range tests { tt := tt t.Run(tt.name, func(t *testing.T) { sc := newTestScheduler(tt.fields) - if err := sc.setPeerHeight(tt.args.peerID, tt.args.height); (err != nil) != tt.wantErr { + err := sc.setPeerRange(tt.args.peerID, tt.args.base, tt.args.height) + if (err != nil) != tt.wantErr { t.Errorf("setPeerHeight() wantErr %v, error = %v", tt.wantErr, err) } wantSc := newTestScheduler(tt.wantFields) @@ -590,7 +616,7 @@ func TestScSetPeerHeight(t *testing.T) { } } -func TestScGetPeersAtHeight(t *testing.T) { +func TestScGetPeersWithHeight(t *testing.T) { type args struct { height int64 @@ -647,6 +673,26 @@ func TestScGetPeersAtHeight(t *testing.T) { args: args{height: 4}, wantResult: []p2p.ID{"P1"}, }, + { + name: "one Ready higher peer at base", + fields: scTestParams{ + targetPending: 4, + peers: map[string]*scPeer{"P1": {base: 4, height: 20, state: peerStateReady}}, + allB: []int64{1, 2, 3, 4}, + }, + args: args{height: 4}, + wantResult: []p2p.ID{"P1"}, + }, + { + name: "one Ready higher peer with higher base", + fields: scTestParams{ + targetPending: 4, + peers: map[string]*scPeer{"P1": {base: 10, height: 20, state: peerStateReady}}, + allB: []int64{1, 2, 3, 4}, + }, + args: args{height: 4}, + wantResult: []p2p.ID{}, + }, { name: "multiple mixed peers", fields: scTestParams{ @@ -668,9 +714,9 @@ func TestScGetPeersAtHeight(t *testing.T) { tt := tt t.Run(tt.name, func(t *testing.T) { sc := newTestScheduler(tt.fields) - // getPeersAtHeight should not mutate the scheduler + // getPeersWithHeight should not mutate the scheduler wantSc := sc - res := sc.getPeersAtHeightOrAbove(tt.args.height) + res := sc.getPeersWithHeight(tt.args.height) sort.Sort(PeerByID(res)) assert.Equal(t, tt.wantResult, res) assert.Equal(t, wantSc, sc) @@ -694,7 +740,7 @@ func TestScMarkPending(t *testing.T) { wantErr bool }{ { - name: "attempt mark pending an unknown block", + name: "attempt mark pending an unknown block above height", fields: scTestParams{ peers: map[string]*scPeer{"P1": {height: 2, state: peerStateReady}}, allB: []int64{1, 2}}, @@ -704,6 +750,17 @@ func TestScMarkPending(t *testing.T) { allB: []int64{1, 2}}, wantErr: true, }, + { + name: "attempt mark pending an unknown block below base", + fields: scTestParams{ + peers: map[string]*scPeer{"P1": {base: 4, height: 6, state: peerStateReady}}, + allB: []int64{1, 2, 3, 4, 5, 6}}, + args: args{peerID: "P1", height: 3, tm: now}, + wantFields: scTestParams{ + peers: map[string]*scPeer{"P1": {base: 4, height: 6, state: peerStateReady}}, + allB: []int64{1, 2, 3, 4, 5, 6}}, + wantErr: true, + }, { name: "attempt mark pending from non existing peer", fields: scTestParams{ @@ -1201,6 +1258,16 @@ func TestScSelectPeer(t *testing.T) { args: args{height: 4}, wantResult: "P1", }, + { + name: "one Ready higher peer with higher base", + fields: scTestParams{ + peers: map[string]*scPeer{"P1": {base: 4, height: 6, state: peerStateReady}}, + allB: []int64{1, 2, 3, 4, 5, 6}, + }, + args: args{height: 3}, + wantResult: "", + wantError: true, + }, { name: "many Ready higher peers with different number of pending requests", fields: scTestParams{ @@ -1989,7 +2056,7 @@ func TestScHandle(t *testing.T) { args: args{event: bcAddNewPeer{peerID: "P1"}}, wantEvent: noOpEvent{}, wantSc: &scTestParams{startTime: now, peers: map[string]*scPeer{ - "P1": {height: -1, state: peerStateNew}}, height: 1}, + "P1": {base: -1, height: -1, state: peerStateNew}}, height: 1}, }, { // set height of P1 args: args{event: bcStatusResponse{peerID: "P1", time: tick[0], height: 3}}, diff --git a/cmd/contract_tests/main.go b/cmd/contract_tests/main.go index 487537824..727828ce7 100644 --- a/cmd/contract_tests/main.go +++ b/cmd/contract_tests/main.go @@ -1,7 +1,9 @@ package main import ( + "encoding/json" "fmt" + "github.com/tendermint/tendermint/cmd/contract_tests/unmarshaler" "strings" "github.com/snikch/goodman/hooks" @@ -16,19 +18,60 @@ func main() { fmt.Println(t[0].Name) }) h.BeforeEach(func(t *transaction.Transaction) { - if strings.HasPrefix(t.Name, "Tx") || + if t.Expected.StatusCode != "200" { + t.Skip = true + } else if strings.HasPrefix(t.Name, "Tx") || // We need a proper example of evidence to broadcast - strings.HasPrefix(t.Name, "Info > /broadcast_evidence") || + strings.HasPrefix(t.Name, "/broadcast_evidence >") || // We need a proper example of path and data - strings.HasPrefix(t.Name, "ABCI > /abci_query") || + strings.HasPrefix(t.Name, "/abci_query >") || // We need to find a way to make a transaction before starting the tests, // that hash should replace the dummy one in hte swagger file - strings.HasPrefix(t.Name, "Info > /tx") { + strings.HasPrefix(t.Name, "/tx >") { t.Skip = true - fmt.Printf("%s Has been skipped\n", t.Name) } }) + + // TODO This check need to remove if dredd is updated to check optional + // dredd can not validate optional items + h.Before("/genesis > Get Genesis > 200 > application/json", func(t *transaction.Transaction) { + removeOptionalFieldsOfExpected(t, []string{"result.genesis.app_state"}) + }) + h.Before("/broadcast_tx_async > Returns right away, with no response. "+ + "Does not wait for CheckTx nor DeliverTx results. > 200 > application/json", func(t *transaction.Transaction) { + removeOptionalFieldsOfExpected(t, []string{"error"}) + }) + h.Before("/broadcast_tx_sync > Returns with the response from CheckTx. "+ + "Does not wait for DeliverTx result. > 200 > application/json", func(t *transaction.Transaction) { + removeOptionalFieldsOfExpected(t, []string{"error"}) + }) + h.Before("/broadcast_tx_commit > Returns with the responses from CheckTx and DeliverTx. "+ + "> 200 > application/json", func(t *transaction.Transaction) { + removeOptionalFieldsOfExpected(t, []string{"error"}) + }) + h.Before("/block_results > Get block results at a specified height > 200 > application/json", + func(t *transaction.Transaction) { + removeOptionalFieldsOfExpected(t, []string{ + "result.txs_results", + "result.begin_block_events", + "result.end_block", + "result.end_block_events", + "result.validator_updates", + "result.consensus_param_updates"}) + }) + server.Serve() defer server.Listener.Close() - fmt.Print("FINE") +} + +func removeOptionalFieldsOfExpected(t *transaction.Transaction, paths []string) { + expected := unmarshaler.UnmarshalJSON(&t.Expected.Body) + for _, path := range paths { + expected.DeleteProperty(strings.Split(path, ".")...) + } + newBody, err := json.Marshal(expected.Body) + if err != nil { + panic(fmt.Sprintf("fail to marshal expected body with %s", err)) + } + t.Expected.Body = string(newBody) } diff --git a/cmd/contract_tests/unmarshaler/unmarshal.go b/cmd/contract_tests/unmarshaler/unmarshal.go new file mode 100644 index 000000000..d336c9009 --- /dev/null +++ b/cmd/contract_tests/unmarshaler/unmarshal.go @@ -0,0 +1,57 @@ +package unmarshaler + +import ( + "encoding/json" + "gopkg.in/yaml.v3" +) + +type UnmarshalledArbitraryObject struct { + Body interface{} +} + +func (obj *UnmarshalledArbitraryObject) GetProperty(keys ...string) interface{} { + body := obj.Body + for _, key := range keys { + body = body.(map[string]interface{})[key] + } + return body +} + +func (obj *UnmarshalledArbitraryObject) SetProperty(keys []string, value interface{}) { + prevKeys := keys[:len(keys)-1] + lastKey := keys[len(keys)-1] + + body := obj.Body + for _, key := range prevKeys { + body = body.(map[string]interface{})[key] + } + body.(map[string]interface{})[lastKey] = value +} + +func (obj *UnmarshalledArbitraryObject) DeleteProperty(keys ...string) { + prevKeys := keys[:len(keys)-1] + lastKey := keys[len(keys)-1] + + body := obj.Body + for _, key := range prevKeys { + body = body.(map[string]interface{})[key] + } + delete(body.(map[string]interface{}), lastKey) +} + +func UnmarshalJSON(str *string) UnmarshalledArbitraryObject { + return UnmarshalledArbitraryObject{unmarshalArbitraryFormat(json.Unmarshal, str)} +} + +func UnmarshalYAML(str *string) UnmarshalledArbitraryObject { + return UnmarshalledArbitraryObject{unmarshalArbitraryFormat(yaml.Unmarshal, str)} +} + +func unmarshalArbitraryFormat(unmarshal func([]byte, interface{}) error, str *string) interface{} { + var body interface{} + err := unmarshal([]byte(*str), &body) + if err != nil { + panic(err) + } + return body +} diff --git a/cmd/contract_tests/unmarshaler/unmarshal_test.go b/cmd/contract_tests/unmarshaler/unmarshal_test.go new file mode 100644 index 000000000..82b53e896 --- /dev/null +++ b/cmd/contract_tests/unmarshaler/unmarshal_test.go @@ -0,0 +1,176 @@ +package unmarshaler + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestUnmarshalElementJSON(t *testing.T) { + var unmarshalJSONTests = []struct { + json string + expected interface{} + }{ + { + `"30"`, + "30", + }, + { + "30", + float64(30), + }, + { + "null", + nil, + }, + { + "[]", + []interface{}{}, + }, + { + `["a", "b", "c"]`, + []interface{}{"a", "b", "c"}, + }, + { + "{}", + map[string]interface{}{}, + }, + { + `{"key1":"value1", "key2":"value2"}`, + map[string]interface{}{"key1": "value1", "key2": "value2"}, + }, + { + testJSON1, + map[string]interface{}{ + "key1": float64(119), + "sub1": map[string]interface{}{"key2": "value2", "sub2": map[string]interface{}{"key3": "value3"}}, + "sub3": map[string]interface{}{"key4": "value4", "key5": "value5"}}, + }, + { + testJSON2, + map[string]interface{}{ + "key1": float64(119), + "sub1": []interface{}{map[string]interface{}{ + "key2": "value2", "sub2": map[string]interface{}{"key3": []interface{}{"value2"}}}}, + "sub3": map[string]interface{}{"key4": "value2", "key5": "value2"}}, + }, + } + + for _, tt := range unmarshalJSONTests { + t.Logf("unmarshal json test %s", tt.json) + { + unmarshaledJSON := UnmarshalJSON(&tt.json) + require.Equal(t, tt.expected, unmarshaledJSON.Body) + } + } +} + +func TestUnmarshalElementYAML(t *testing.T) { + var unmarshalJSONTests = []struct { + yaml string + expected interface{} + }{ + { + `"30"`, + "30", + }, + { + "30", + 30, + }, + { + "null", + nil, + }, + { + "[]", + []interface{}{}, + }, + { + `["a", "b", "c"]`, + []interface{}{"a", "b", "c"}, + }, + { + "{}", + map[string]interface{}{}, + }, + { + "key1: value1\nkey2: value2", + map[string]interface{}{"key1": "value1", "key2": "value2"}, + }, + { + testYAML1, + map[string]interface{}{ + "key1": 119, + "sub1": map[string]interface{}{"key2": "value2", "sub2": map[string]interface{}{"key3": "value3"}}, + "sub3": map[string]interface{}{"key4": "value4", "key5": "value5"}}, + }, + { + testYAML2, + map[string]interface{}{ + "key1": 119, + "sub1": []interface{}{map[string]interface{}{ + "key2": "value2", "sub2": map[string]interface{}{"key3": []interface{}{"value3"}}}}, + "sub3": map[string]interface{}{"key4": "value4", "key5": "value5"}}, + }, + } + + for _, tt := range unmarshalJSONTests { + t.Logf("unmarshal yaml test %s", tt.yaml) + { + unmarshaledYAML := UnmarshalYAML(&tt.yaml) + require.Equal(t, tt.expected, unmarshaledYAML.Body) + } + } +} + +func TestGetAndSetProperty(t *testing.T) { + testJSON := testJSON1 + unmarshaledJSON := UnmarshalJSON(&testJSON) + require.Equal(t, float64(119), unmarshaledJSON.GetProperty("key1")) + require.Equal(t, "value2", unmarshaledJSON.GetProperty("sub1", "key2")) + require.Equal(t, "value3", unmarshaledJSON.GetProperty("sub1", "sub2", "key3")) + require.Equal(t, "value4", unmarshaledJSON.GetProperty("sub3", "key4")) + require.Equal(t, "value5", unmarshaledJSON.GetProperty("sub3", "key5")) + + unmarshaledJSON.SetProperty([]string{"key1"}, "newValue1") + unmarshaledJSON.SetProperty([]string{"sub1", "sub2", "key3"}, "newValue2") + + require.Equal(t, "newValue1", unmarshaledJSON.GetProperty("key1")) + require.Equal(t, "newValue2", unmarshaledJSON.GetProperty("sub1", "sub2", "key3")) +} + +func TestDeleteProposer(t *testing.T) { + testJSON := testJSON1 + unmarshaledJSON := UnmarshalJSON(&testJSON) + + unmarshaledJSON.DeleteProperty("sub3", "key5") + require.Nil(t, unmarshaledJSON.GetProperty("sub3", "key5")) +} + +const ( + testJSON1 = `{"key1":119, "sub1":{"key2":"value2", "sub2":{"key3":"value3"}}, +"sub3":{"key4":"value4", "key5":"value5"}}` + testJSON2 = `{"key1":119, "sub1":[{"key2":"value2", "sub2":{"key3":["value2"]}}], +"sub3":{"key4":"value2", "key5":"value2"}}` + testYAML1 = ` +key1: 119 +sub1: + key2: value2 + sub2: + key3: value3 +sub3: + key4: value4 + key5: value5 +` + testYAML2 = ` +key1: 119 +sub1: + - key2: value2 + sub2: + key3: [value3] +sub3: + key4: value4 + key5: value5 +` +) diff --git a/cmd/tendermint/commands/codec.go b/cmd/tendermint/commands/codec.go index 717f2d21e..041b9e9ce 100644 --- a/cmd/tendermint/commands/codec.go +++ b/cmd/tendermint/commands/codec.go @@ -2,6 +2,7 @@ package commands import ( amino "github.com/tendermint/go-amino" + cryptoamino "github.com/tendermint/tendermint/crypto/encoding/amino" ) diff --git a/cmd/tendermint/commands/debug/dump.go b/cmd/tendermint/commands/debug/dump.go index 33cb3e24d..a21d8217e 100644 --- a/cmd/tendermint/commands/debug/dump.go +++ b/cmd/tendermint/commands/debug/dump.go @@ -10,9 +10,10 @@ import ( "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/viper" + cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/cli" - rpcclient "github.com/tendermint/tendermint/rpc/client" + rpchttp "github.com/tendermint/tendermint/rpc/client/http" ) var dumpCmd = &cobra.Command{ @@ -58,7 +59,7 @@ func dumpCmdHandler(_ *cobra.Command, args []string) error { } } - rpc, err := rpcclient.NewHTTP(nodeRPCAddr, "/websocket") + rpc, err := rpchttp.New(nodeRPCAddr, "/websocket") if err != nil { return errors.Wrap(err, "failed to create new http client") } @@ -78,7 +79,7 @@ func dumpCmdHandler(_ *cobra.Command, args []string) error { return nil } -func dumpDebugData(outDir string, conf *cfg.Config, rpc *rpcclient.HTTP) { +func dumpDebugData(outDir string, conf *cfg.Config, rpc *rpchttp.HTTP) { start := time.Now().UTC() tmpDir, err := ioutil.TempDir(outDir, "tendermint_debug_tmp") diff --git a/cmd/tendermint/commands/debug/kill.go b/cmd/tendermint/commands/debug/kill.go index 8d9df1161..40e298c72 100644 --- a/cmd/tendermint/commands/debug/kill.go +++ b/cmd/tendermint/commands/debug/kill.go @@ -16,7 +16,7 @@ import ( cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/cli" - rpcclient "github.com/tendermint/tendermint/rpc/client" + rpchttp "github.com/tendermint/tendermint/rpc/client/http" ) var killCmd = &cobra.Command{ @@ -44,7 +44,7 @@ func killCmdHandler(cmd *cobra.Command, args []string) error { return errors.New("invalid output file") } - rpc, err := rpcclient.NewHTTP(nodeRPCAddr, "/websocket") + rpc, err := rpchttp.New(nodeRPCAddr, "/websocket") if err != nil { return errors.Wrap(err, "failed to create new http client") } diff --git a/cmd/tendermint/commands/debug/util.go b/cmd/tendermint/commands/debug/util.go index b392d23d7..9e5e36a87 100644 --- a/cmd/tendermint/commands/debug/util.go +++ b/cmd/tendermint/commands/debug/util.go @@ -9,13 +9,14 @@ import ( "path/filepath" "github.com/pkg/errors" + cfg "github.com/tendermint/tendermint/config" - rpcclient "github.com/tendermint/tendermint/rpc/client" + rpchttp "github.com/tendermint/tendermint/rpc/client/http" ) // dumpStatus gets node status state dump from the Tendermint RPC and writes it // to file. It returns an error upon failure. -func dumpStatus(rpc *rpcclient.HTTP, dir, filename string) error { +func dumpStatus(rpc *rpchttp.HTTP, dir, filename string) error { status, err := rpc.Status() if err != nil { return errors.Wrap(err, "failed to get node status") @@ -26,7 +27,7 @@ func dumpStatus(rpc *rpcclient.HTTP, dir, filename string) error { // dumpNetInfo gets network information state dump from the Tendermint RPC and // writes it to file. It returns an error upon failure. -func dumpNetInfo(rpc *rpcclient.HTTP, dir, filename string) error { +func dumpNetInfo(rpc *rpchttp.HTTP, dir, filename string) error { netInfo, err := rpc.NetInfo() if err != nil { return errors.Wrap(err, "failed to get node network information") @@ -37,7 +38,7 @@ func dumpNetInfo(rpc *rpcclient.HTTP, dir, filename string) error { // dumpConsensusState gets consensus state dump from the Tendermint RPC and // writes it to file. It returns an error upon failure. -func dumpConsensusState(rpc *rpcclient.HTTP, dir, filename string) error { +func dumpConsensusState(rpc *rpchttp.HTTP, dir, filename string) error { consDump, err := rpc.DumpConsensusState() if err != nil { return errors.Wrap(err, "failed to get node consensus dump") diff --git a/cmd/tendermint/commands/init.go b/cmd/tendermint/commands/init.go index eedf6f2b5..cb93d5d3a 100644 --- a/cmd/tendermint/commands/init.go +++ b/cmd/tendermint/commands/init.go @@ -3,7 +3,9 @@ package commands import ( "fmt" + "github.com/pkg/errors" "github.com/spf13/cobra" + cfg "github.com/tendermint/tendermint/config" tmos "github.com/tendermint/tendermint/libs/os" tmrand "github.com/tendermint/tendermint/libs/rand" @@ -59,11 +61,15 @@ func initFilesWithConfig(config *cfg.Config) error { ChainID: fmt.Sprintf("test-chain-%v", tmrand.Str(6)), GenesisTime: tmtime.Now(), ConsensusParams: types.DefaultConsensusParams(), + VoterParams: types.DefaultVoterParams(), + } + pubKey, err := pv.GetPubKey() + if err != nil { + return errors.Wrap(err, "can't get pubkey") } - key := pv.GetPubKey() genDoc.Validators = []types.GenesisValidator{{ - Address: key.Address(), - PubKey: key, + Address: pubKey.Address(), + PubKey: pubKey, Power: 10, }} diff --git a/cmd/tendermint/commands/lite.go b/cmd/tendermint/commands/lite.go index dae72266d..e28daa6d8 100644 --- a/cmd/tendermint/commands/lite.go +++ b/cmd/tendermint/commands/lite.go @@ -18,7 +18,7 @@ import ( lproxy "github.com/tendermint/tendermint/lite2/proxy" lrpc "github.com/tendermint/tendermint/lite2/rpc" dbs "github.com/tendermint/tendermint/lite2/store/db" - rpcclient "github.com/tendermint/tendermint/rpc/client" + rpchttp "github.com/tendermint/tendermint/rpc/client/http" rpcserver "github.com/tendermint/tendermint/rpc/lib/server" ) @@ -133,7 +133,7 @@ func runProxy(cmd *cobra.Command, args []string) error { return err } - rpcClient, err := rpcclient.NewHTTP(primaryAddr, "/websocket") + rpcClient, err := rpchttp.New(primaryAddr, "/websocket") if err != nil { return errors.Wrapf(err, "http client for %s", primaryAddr) } diff --git a/cmd/tendermint/commands/show_validator.go b/cmd/tendermint/commands/show_validator.go index b0c673373..4b885a5c3 100644 --- a/cmd/tendermint/commands/show_validator.go +++ b/cmd/tendermint/commands/show_validator.go @@ -24,7 +24,13 @@ func showValidator(cmd *cobra.Command, args []string) error { } pv := privval.LoadFilePV(keyFilePath, config.PrivValidatorStateFile()) - bz, err := cdc.MarshalJSON(pv.GetPubKey()) + + pubKey, err := pv.GetPubKey() + if err != nil { + return errors.Wrap(err, "can't get pubkey") + } + + bz, err := cdc.MarshalJSON(pubKey) if err != nil { return errors.Wrap(err, "failed to marshal private validator pubkey") } diff --git a/cmd/tendermint/commands/testnet.go b/cmd/tendermint/commands/testnet.go index ddf320622..54bb1363e 100644 --- a/cmd/tendermint/commands/testnet.go +++ b/cmd/tendermint/commands/testnet.go @@ -7,6 +7,7 @@ import ( "path/filepath" "strings" + "github.com/pkg/errors" "github.com/spf13/cobra" "github.com/spf13/viper" @@ -137,11 +138,15 @@ func testnetFiles(cmd *cobra.Command, args []string) error { pvKeyFile := filepath.Join(nodeDir, config.BaseConfig.PrivValidatorKey) pvStateFile := filepath.Join(nodeDir, config.BaseConfig.PrivValidatorState) - pv := privval.LoadFilePV(pvKeyFile, pvStateFile) + + pubKey, err := pv.GetPubKey() + if err != nil { + return errors.Wrap(err, "can't get pubkey") + } genVals[i] = types.GenesisValidator{ - Address: pv.GetPubKey().Address(), - PubKey: pv.GetPubKey(), + Address: pubKey.Address(), + PubKey: pubKey, Power: 1, Name: nodeDirName, } diff --git a/cmd/tendermint/main.go b/cmd/tendermint/main.go index 0cd4b7b70..615b7e065 100644 --- a/cmd/tendermint/main.go +++ b/cmd/tendermint/main.go @@ -4,11 +4,10 @@ import ( "os" "path/filepath" - "github.com/tendermint/tendermint/libs/cli" - cmd "github.com/tendermint/tendermint/cmd/tendermint/commands" "github.com/tendermint/tendermint/cmd/tendermint/commands/debug" cfg "github.com/tendermint/tendermint/config" + "github.com/tendermint/tendermint/libs/cli" nm "github.com/tendermint/tendermint/node" ) @@ -29,6 +28,7 @@ func main() { cmd.GenNodeKeyCmd, cmd.VersionCmd, debug.DebugCmd, + cli.NewCompletionCmd(rootCmd, true), ) // NOTE: diff --git a/config/config.go b/config/config.go index dc3eff7d1..c246493e4 100644 --- a/config/config.go +++ b/config/config.go @@ -734,6 +734,8 @@ func (cfg *FastSyncConfig) ValidateBasic() error { return nil case "v1": return nil + case "v2": + return nil default: return fmt.Errorf("unknown fastsync version %s", cfg.Version) } diff --git a/config/config_test.go b/config/config_test.go index 6da032d07..c83f1c3f5 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -134,27 +134,44 @@ func TestFastSyncConfigValidateBasic(t *testing.T) { assert.Error(t, cfg.ValidateBasic()) } -func TestConsensusConfigValidateBasic(t *testing.T) { - cfg := TestConsensusConfig() - assert.NoError(t, cfg.ValidateBasic()) - - fieldsToTest := []string{ - "TimeoutPropose", - "TimeoutProposeDelta", - "TimeoutPrevote", - "TimeoutPrevoteDelta", - "TimeoutPrecommit", - "TimeoutPrecommitDelta", - "TimeoutCommit", - "CreateEmptyBlocksInterval", - "PeerGossipSleepDuration", - "PeerQueryMaj23SleepDuration", +func TestConsensusConfig_ValidateBasic(t *testing.T) { + // nolint: lll + testcases := map[string]struct { + modify func(*ConsensusConfig) + expectErr bool + }{ + "TimeoutPropose": {func(c *ConsensusConfig) { c.TimeoutPropose = time.Second }, false}, + "TimeoutPropose negative": {func(c *ConsensusConfig) { c.TimeoutPropose = -1 }, true}, + "TimeoutProposeDelta": {func(c *ConsensusConfig) { c.TimeoutProposeDelta = time.Second }, false}, + "TimeoutProposeDelta negative": {func(c *ConsensusConfig) { c.TimeoutProposeDelta = -1 }, true}, + "TimeoutPrevote": {func(c *ConsensusConfig) { c.TimeoutPrevote = time.Second }, false}, + "TimeoutPrevote negative": {func(c *ConsensusConfig) { c.TimeoutPrevote = -1 }, true}, + "TimeoutPrevoteDelta": {func(c *ConsensusConfig) { c.TimeoutPrevoteDelta = time.Second }, false}, + "TimeoutPrevoteDelta negative": {func(c *ConsensusConfig) { c.TimeoutPrevoteDelta = -1 }, true}, + "TimeoutPrecommit": {func(c *ConsensusConfig) { c.TimeoutPrecommit = time.Second }, false}, + "TimeoutPrecommit negative": {func(c *ConsensusConfig) { c.TimeoutPrecommit = -1 }, true}, + "TimeoutPrecommitDelta": {func(c *ConsensusConfig) { c.TimeoutPrecommitDelta = time.Second }, false}, + "TimeoutPrecommitDelta negative": {func(c *ConsensusConfig) { c.TimeoutPrecommitDelta = -1 }, true}, + "TimeoutCommit": {func(c *ConsensusConfig) { c.TimeoutCommit = time.Second }, false}, + "TimeoutCommit negative": {func(c *ConsensusConfig) { c.TimeoutCommit = -1 }, true}, + "PeerGossipSleepDuration": {func(c *ConsensusConfig) { c.PeerGossipSleepDuration = time.Second }, false}, + "PeerGossipSleepDuration negative": {func(c *ConsensusConfig) { c.PeerGossipSleepDuration = -1 }, true}, + "PeerQueryMaj23SleepDuration": {func(c *ConsensusConfig) { c.PeerQueryMaj23SleepDuration = time.Second }, false}, + "PeerQueryMaj23SleepDuration negative": {func(c *ConsensusConfig) { c.PeerQueryMaj23SleepDuration = -1 }, true}, } - - for _, fieldName := range fieldsToTest { - reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(-1) - assert.Error(t, cfg.ValidateBasic()) - reflect.ValueOf(cfg).Elem().FieldByName(fieldName).SetInt(0) + for desc, tc := range testcases { + tc := tc // appease linter + t.Run(desc, func(t *testing.T) { + cfg := DefaultConsensusConfig() + tc.modify(cfg) + + err := cfg.ValidateBasic() + if tc.expectErr { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) } } diff --git a/config/toml.go b/config/toml.go index 609f1487a..3fe4d1aac 100644 --- a/config/toml.go +++ b/config/toml.go @@ -321,6 +321,7 @@ max_tx_bytes = {{ .Mempool.MaxTxBytes }} # Fast Sync version to use: # 1) "v0" (default) - the legacy fast sync implementation # 2) "v1" - refactor of v0 version for better testability +# 3) "v2" - refactor of v1 version for better usability version = "{{ .FastSync.Version }}" ##### consensus configuration options ##### diff --git a/consensus/byzantine_test.go b/consensus/byzantine_test.go index a1ddc743a..2cf659fed 100644 --- a/consensus/byzantine_test.go +++ b/consensus/byzantine_test.go @@ -8,6 +8,7 @@ import ( "time" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/service" "github.com/tendermint/tendermint/p2p" sm "github.com/tendermint/tendermint/state" @@ -178,8 +179,8 @@ func TestByzantine(t *testing.T) { // find proposer of current height and round from State func findProposer(state *State) (int, *types.Validator) { - proposer := types.SelectProposer(state.Validators, state.state.LastProofHash, state.Height, state.Round) - return state.Validators.GetByAddress(proposer.PubKey.Address()) + proposer := state.Validators.SelectProposer(state.state.LastProofHash, state.Height, state.Round) + return state.Voters.GetByAddress(proposer.PubKey.Address()) } //------------------------------- diff --git a/consensus/codec.go b/consensus/codec.go index 1c5bf93df..ae7dbaab2 100644 --- a/consensus/codec.go +++ b/consensus/codec.go @@ -2,6 +2,7 @@ package consensus import ( amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/types" ) diff --git a/consensus/common_test.go b/consensus/common_test.go index d99267704..beeb7642c 100644 --- a/consensus/common_test.go +++ b/consensus/common_test.go @@ -14,10 +14,14 @@ import ( "time" "github.com/go-kit/kit/log/term" + "github.com/pkg/errors" + "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto/vrf" "path" + dbm "github.com/tendermint/tm-db" + abcicli "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/counter" "github.com/tendermint/tendermint/abci/example/kvstore" @@ -35,7 +39,6 @@ import ( "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" - dbm "github.com/tendermint/tm-db" ) const ( @@ -49,7 +52,7 @@ type cleanupFunc func() // genesis, chain_id, priv_val var config *cfg.Config // NOTE: must be reset for each _test.go file var consensusReplayConfig *cfg.Config -var ensureTimeout = time.Millisecond * 100 +var ensureTimeout = time.Millisecond * 200 func ensureDir(dir string, mode os.FileMode) { if err := tmos.EnsureDir(dir, mode); err != nil { @@ -84,17 +87,23 @@ func (vs *validatorStub) signVote( voteType types.SignedMsgType, hash []byte, header types.PartSetHeader) (*types.Vote, error) { - addr := vs.PrivValidator.GetPubKey().Address() + + pubKey, err := vs.PrivValidator.GetPubKey() + if err != nil { + return nil, errors.Wrap(err, "can't get pubkey") + } + vote := &types.Vote{ ValidatorIndex: vs.Index, - ValidatorAddress: addr, + ValidatorAddress: pubKey.Address(), Height: vs.Height, Round: vs.Round, Timestamp: tmtime.Now(), Type: voteType, BlockID: types.BlockID{Hash: hash, PartsHeader: header}, } - err := vs.PrivValidator.SignVote(config.ChainID(), vote) + + err = vs.PrivValidator.SignVote(config.ChainID(), vote) return vote, err } @@ -138,7 +147,15 @@ func (vss ValidatorStubsByAddress) Len() int { } func (vss ValidatorStubsByAddress) Less(i, j int) bool { - return bytes.Compare(vss[i].GetPubKey().Address(), vss[j].GetPubKey().Address()) == -1 + vssi, err := vss[i].GetPubKey() + if err != nil { + panic(err) + } + vssj, err := vss[j].GetPubKey() + if err != nil { + panic(err) + } + return bytes.Compare(vssi.Address(), vssj.Address()) == -1 } func (vss ValidatorStubsByAddress) Swap(i, j int) { @@ -166,8 +183,10 @@ func decideProposal( ) (proposal *types.Proposal, block *types.Block) { oldPrivValidator := cs1.privValidator cs1.mtx.Lock() - if !cs1.privValidator.GetPubKey().Equals(vs.PrivValidator.GetPubKey()) { - // block creater must be the cs.privValidator + cs1PubKey, _ := cs1.privValidator.GetPubKey() + vsPubKey, _ := vs.PrivValidator.GetPubKey() + if !cs1PubKey.Equals(vsPubKey) { + // block creator must be the cs.privValidator cs1.privValidator = vs.PrivValidator } block, blockParts := cs1.createProposalBlock(round) @@ -207,7 +226,9 @@ func signAddVotes( func validatePrevote(t *testing.T, cs *State, round int, privVal *validatorStub, blockHash []byte) { prevotes := cs.Votes.Prevotes(round) - address := privVal.GetPubKey().Address() + pubKey, err := privVal.GetPubKey() + require.NoError(t, err) + address := pubKey.Address() var vote *types.Vote if vote = prevotes.GetByAddress(address); vote == nil { panic("Failed to find prevote from validator") @@ -225,7 +246,9 @@ func validatePrevote(t *testing.T, cs *State, round int, privVal *validatorStub, func validateLastPrecommit(t *testing.T, cs *State, privVal *validatorStub, blockHash []byte) { votes := cs.LastCommit - address := privVal.GetPubKey().Address() + pv, err := privVal.GetPubKey() + require.NoError(t, err) + address := pv.Address() var vote *types.Vote if vote = votes.GetByAddress(address); vote == nil { panic("Failed to find precommit from validator") @@ -245,7 +268,9 @@ func validatePrecommit( lockedBlockHash []byte, ) { precommits := cs.Votes.Precommits(thisRound) - address := privVal.GetPubKey().Address() + pv, err := privVal.GetPubKey() + require.NoError(t, err) + address := pv.Address() var vote *types.Vote if vote = precommits.GetByAddress(address); vote == nil { panic("Failed to find precommit from validator") @@ -385,8 +410,12 @@ func loadPrivValidator(config *cfg.Config) *privval.FilePV { } func randState(nValidators int) (*State, []*validatorStub) { + return randStateWithVoterParams(nValidators, types.DefaultVoterParams()) +} + +func randStateWithVoterParams(nValidators int, voterParams *types.VoterParams) (*State, []*validatorStub) { // Get State - state, privVals := randGenesisState(nValidators, false, 10) + state, privVals := randGenesisState(nValidators, false, 10, voterParams) state.LastProofHash = []byte{2} vss := make([]*validatorStub, nValidators) @@ -408,7 +437,7 @@ func theOthers(index int) int { } func forceProposer(cs *State, vals []*validatorStub, index []int, height []int64, round []int) { - for i := 0; i < 1000; i++ { + for i := 0; i < 5000; i++ { allMatch := true firstHash := []byte{byte(i)} currentHash := firstHash @@ -422,7 +451,8 @@ func forceProposer(cs *State, vals []*validatorStub, index []int, height []int64 curVal = vals[theOthers(index[j])] mustBe = false } - if curVal.GetPubKey().Equals(types.SelectProposer(cs.Validators, currentHash, height[j], round[j]).PubKey) != + curValPubKey, _ := curVal.GetPubKey() + if curValPubKey.Equals(cs.Validators.SelectProposer(currentHash, height[j], round[j]).PubKey) != mustBe { allMatch = false break @@ -671,7 +701,7 @@ func consensusLogger() log.Logger { func randConsensusNet(nValidators int, testName string, tickerFunc func() TimeoutTicker, appFunc func() abci.Application, configOpts ...func(*cfg.Config)) ([]*State, cleanupFunc) { - genDoc, privVals := randGenesisDoc(nValidators, false, 30) + genDoc, privVals := randGenesisDoc(nValidators, false, 30, types.DefaultVoterParams()) css := make([]*State, nValidators) logger := consensusLogger() configRootDirs := make([]string, 0, nValidators) @@ -709,7 +739,7 @@ func randConsensusNetWithPeers( tickerFunc func() TimeoutTicker, appFunc func(string) abci.Application, ) ([]*State, *types.GenesisDoc, *cfg.Config, cleanupFunc) { - genDoc, privVals := randGenesisDoc(nValidators, false, testMinPower) + genDoc, privVals := randGenesisDoc(nValidators, false, testMinPower, types.DefaultVoterParams()) css := make([]*State, nPeers) logger := consensusLogger() var peer0Config *cfg.Config @@ -771,14 +801,19 @@ func getSwitchIndex(switches []*p2p.Switch, peer p2p.Peer) int { //------------------------------------------------------------------------------- // genesis -func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.GenesisDoc, []types.PrivValidator) { +func randGenesisDoc( + numValidators int, + randPower bool, + minPower int64, + voterParams *types.VoterParams, +) (*types.GenesisDoc, []types.PrivValidator) { validators := make([]types.GenesisValidator, numValidators) privValidators := make([]types.PrivValidator, numValidators) for i := 0; i < numValidators; i++ { val, privVal := types.RandValidator(randPower, minPower) validators[i] = types.GenesisValidator{ PubKey: val.PubKey, - Power: val.VotingPower, + Power: val.StakingPower, } privValidators[i] = privVal } @@ -788,11 +823,13 @@ func randGenesisDoc(numValidators int, randPower bool, minPower int64) (*types.G GenesisTime: tmtime.Now(), ChainID: config.ChainID(), Validators: validators, + VoterParams: voterParams, }, privValidators } -func randGenesisState(numValidators int, randPower bool, minPower int64) (sm.State, []types.PrivValidator) { - genDoc, privValidators := randGenesisDoc(numValidators, randPower, minPower) +func randGenesisState(numValidators int, randPower bool, minPower int64, voterParams *types.VoterParams) ( + sm.State, []types.PrivValidator) { + genDoc, privValidators := randGenesisDoc(numValidators, randPower, minPower, voterParams) s0, _ := sm.MakeGenesisState(genDoc) return s0, privValidators } diff --git a/consensus/mempool_test.go b/consensus/mempool_test.go index c7fdbd578..cbbe995b9 100644 --- a/consensus/mempool_test.go +++ b/consensus/mempool_test.go @@ -9,12 +9,13 @@ import ( "github.com/stretchr/testify/assert" + dbm "github.com/tendermint/tm-db" + "github.com/tendermint/tendermint/abci/example/code" abci "github.com/tendermint/tendermint/abci/types" mempl "github.com/tendermint/tendermint/mempool" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) // for testing @@ -26,7 +27,7 @@ func TestMempoolNoProgressUntilTxsAvailable(t *testing.T) { config := ResetConfig("consensus_mempool_txs_available_test") defer os.RemoveAll(config.RootDir) config.Consensus.CreateEmptyBlocks = false - state, privVals := randGenesisState(1, false, 10) + state, privVals := randGenesisState(1, false, 10, nil) cs := newStateWithConfig(config, state, privVals[0], NewCounterApplication()) assertMempool(cs.txNotifier).EnableTxsAvailable() height, round := cs.Height, cs.Round @@ -45,7 +46,7 @@ func TestMempoolProgressAfterCreateEmptyBlocksInterval(t *testing.T) { config := ResetConfig("consensus_mempool_txs_available_test") defer os.RemoveAll(config.RootDir) config.Consensus.CreateEmptyBlocksInterval = ensureTimeout - state, privVals := randGenesisState(1, false, 10) + state, privVals := randGenesisState(1, false, 10, nil) cs := newStateWithConfig(config, state, privVals[0], NewCounterApplication()) assertMempool(cs.txNotifier).EnableTxsAvailable() height, round := cs.Height, cs.Round @@ -61,7 +62,7 @@ func TestMempoolProgressInHigherRound(t *testing.T) { config := ResetConfig("consensus_mempool_txs_available_test") defer os.RemoveAll(config.RootDir) config.Consensus.CreateEmptyBlocks = false - state, privVals := randGenesisState(1, false, 10) + state, privVals := randGenesisState(1, false, 10, nil) cs := newStateWithConfig(config, state, privVals[0], NewCounterApplication()) assertMempool(cs.txNotifier).EnableTxsAvailable() height, round := cs.Height, cs.Round @@ -107,7 +108,7 @@ func deliverTxsRange(cs *State, start, end int) { } func TestMempoolTxConcurrentWithCommit(t *testing.T) { - state, privVals := randGenesisState(1, false, 10) + state, privVals := randGenesisState(1, false, 10, nil) blockDB := dbm.NewMemDB() cs := newStateWithConfigAndBlockStore(config, state, privVals[0], NewCounterApplication(), blockDB) sm.SaveState(blockDB, state) @@ -129,7 +130,7 @@ func TestMempoolTxConcurrentWithCommit(t *testing.T) { } func TestMempoolRmBadTx(t *testing.T) { - state, privVals := randGenesisState(1, false, 10) + state, privVals := randGenesisState(1, false, 10, nil) app := NewCounterApplication() blockDB := dbm.NewMemDB() cs := newStateWithConfigAndBlockStore(config, state, privVals[0], app, blockDB) diff --git a/consensus/metrics.go b/consensus/metrics.go index 5fa27118a..b4bb7f652 100644 --- a/consensus/metrics.go +++ b/consensus/metrics.go @@ -19,28 +19,29 @@ type Metrics struct { // Height of the chain. Height metrics.Gauge - // ValidatorLastSignedHeight of a validator. - ValidatorLastSignedHeight metrics.Gauge + // VoterLastSignedHeight of a voter. + VoterLastSignedHeight metrics.Gauge // Number of rounds. Rounds metrics.Gauge - // Number of validators. - Validators metrics.Gauge - // Total power of all validators. - ValidatorsPower metrics.Gauge - // Power of a validator. - ValidatorPower metrics.Gauge - // Amount of blocks missed by a validator. - ValidatorMissedBlocks metrics.Gauge - // Number of validators who did not sign. - MissingValidators metrics.Gauge - // Total power of the missing validators. - MissingValidatorsPower metrics.Gauge - // Number of validators who tried to double sign. - ByzantineValidators metrics.Gauge - // Total power of the byzantine validators. - ByzantineValidatorsPower metrics.Gauge + // ValidatorOrVoter: voter + // Number of voters. + Voters metrics.Gauge + // Total power of all voters. + VotersPower metrics.Gauge + // Power of a voter. + VoterPower metrics.Gauge + // Amount of blocks missed by a voter. + VoterMissedBlocks metrics.Gauge + // Number of voters who did not sign. + MissingVoters metrics.Gauge + // Total power of the missing voters. + MissingVotersPower metrics.Gauge + // Number of voters who tried to double sign. + ByzantineVoters metrics.Gauge + // Total power of the byzantine voters. + ByzantineVotersPower metrics.Gauge // Time between this and the last block. BlockIntervalSeconds metrics.Gauge @@ -82,59 +83,59 @@ func PrometheusMetrics(namespace string, labelsAndValues ...string) *Metrics { Help: "Number of rounds.", }, labels).With(labelsAndValues...), - Validators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + Voters: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, - Name: "validators", - Help: "Number of validators.", + Name: "voters", + Help: "Number of voters.", }, labels).With(labelsAndValues...), - ValidatorLastSignedHeight: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + VoterLastSignedHeight: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, - Name: "validator_last_signed_height", - Help: "Last signed height for a validator", + Name: "voter_last_signed_height", + Help: "Last signed height for a voter", }, append(labels, "validator_address")).With(labelsAndValues...), - ValidatorMissedBlocks: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + VoterMissedBlocks: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, - Name: "validator_missed_blocks", - Help: "Total missed blocks for a validator", + Name: "voter_missed_blocks", + Help: "Total missed blocks for a voter", }, append(labels, "validator_address")).With(labelsAndValues...), - ValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + VotersPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, - Name: "validators_power", - Help: "Total power of all validators.", + Name: "voters_power", + Help: "Total power of all voters.", }, labels).With(labelsAndValues...), - ValidatorPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + VoterPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, - Name: "validator_power", - Help: "Power of a validator", + Name: "voter_power", + Help: "Power of a voter", }, append(labels, "validator_address")).With(labelsAndValues...), - MissingValidators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + MissingVoters: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, - Name: "missing_validators", - Help: "Number of validators who did not sign.", + Name: "missing_voters", + Help: "Number of voters who did not sign.", }, labels).With(labelsAndValues...), - MissingValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + MissingVotersPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, - Name: "missing_validators_power", - Help: "Total power of the missing validators.", + Name: "missing_voters_power", + Help: "Total power of the missing voters.", }, labels).With(labelsAndValues...), - ByzantineValidators: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + ByzantineVoters: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, - Name: "byzantine_validators", - Help: "Number of validators who tried to double sign.", + Name: "byzantine_voters", + Help: "Number of voters who tried to double sign.", }, labels).With(labelsAndValues...), - ByzantineValidatorsPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ + ByzantineVotersPower: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ Namespace: namespace, Subsystem: MetricsSubsystem, - Name: "byzantine_validators_power", - Help: "Total power of the byzantine validators.", + Name: "byzantine_voters_power", + Help: "Total power of the byzantine voters.", }, labels).With(labelsAndValues...), BlockIntervalSeconds: prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{ @@ -188,18 +189,18 @@ func NopMetrics() *Metrics { return &Metrics{ Height: discard.NewGauge(), - ValidatorLastSignedHeight: discard.NewGauge(), + VoterLastSignedHeight: discard.NewGauge(), Rounds: discard.NewGauge(), - Validators: discard.NewGauge(), - ValidatorsPower: discard.NewGauge(), - ValidatorPower: discard.NewGauge(), - ValidatorMissedBlocks: discard.NewGauge(), - MissingValidators: discard.NewGauge(), - MissingValidatorsPower: discard.NewGauge(), - ByzantineValidators: discard.NewGauge(), - ByzantineValidatorsPower: discard.NewGauge(), + Voters: discard.NewGauge(), + VotersPower: discard.NewGauge(), + VoterPower: discard.NewGauge(), + VoterMissedBlocks: discard.NewGauge(), + MissingVoters: discard.NewGauge(), + MissingVotersPower: discard.NewGauge(), + ByzantineVoters: discard.NewGauge(), + ByzantineVotersPower: discard.NewGauge(), BlockIntervalSeconds: discard.NewGauge(), diff --git a/consensus/reactor.go b/consensus/reactor.go index 3711dd7cf..63d8d2f20 100644 --- a/consensus/reactor.go +++ b/consensus/reactor.go @@ -9,6 +9,7 @@ import ( "github.com/pkg/errors" amino "github.com/tendermint/go-amino" + cstypes "github.com/tendermint/tendermint/consensus/types" "github.com/tendermint/tendermint/libs/bits" tmevents "github.com/tendermint/tendermint/libs/events" @@ -310,9 +311,9 @@ func (conR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { case *VoteMessage: cs := conR.conS cs.mtx.RLock() - height, valSize, lastCommitSize := cs.Height, cs.Validators.Size(), cs.LastCommit.Size() + height, voterSize, lastCommitSize := cs.Height, cs.Voters.Size(), cs.LastCommit.Size() cs.mtx.RUnlock() - ps.EnsureVoteBitArrays(height, valSize) + ps.EnsureVoteBitArrays(height, voterSize) ps.EnsureVoteBitArrays(height-1, lastCommitSize) ps.SetHasVote(msg.Vote) @@ -493,8 +494,8 @@ OUTER_LOOP: } } - // If the peer is on a previous height, help catch up. - if (0 < prs.Height) && (prs.Height < rs.Height) { + // If the peer is on a previous height that we have, help catch up. + if (0 < prs.Height) && (prs.Height < rs.Height) && (prs.Height >= conR.conS.blockStore.Base()) { heightLogger := logger.With("height", prs.Height) // if we never received the commit message from the peer, the block parts wont be initialized @@ -502,7 +503,7 @@ OUTER_LOOP: blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height) if blockMeta == nil { heightLogger.Error("Failed to load block meta", - "blockstoreHeight", conR.conS.blockStore.Height()) + "blockstoreBase", conR.conS.blockStore.Base(), "blockstoreHeight", conR.conS.blockStore.Height()) time.Sleep(conR.conS.config.PeerGossipSleepDuration) } else { ps.InitProposalBlockParts(blockMeta.BlockID.PartsHeader) @@ -566,8 +567,8 @@ func (conR *Reactor) gossipDataForCatchup(logger log.Logger, rs *cstypes.RoundSt // Ensure that the peer's PartSetHeader is correct blockMeta := conR.conS.blockStore.LoadBlockMeta(prs.Height) if blockMeta == nil { - logger.Error("Failed to load block meta", - "ourHeight", rs.Height, "blockstoreHeight", conR.conS.blockStore.Height()) + logger.Error("Failed to load block meta", "ourHeight", rs.Height, + "blockstoreBase", conR.conS.blockStore.Base(), "blockstoreHeight", conR.conS.blockStore.Height()) time.Sleep(conR.conS.config.PeerGossipSleepDuration) return } else if !blockMeta.BlockID.PartsHeader.Equals(prs.ProposalBlockPartsHeader) { @@ -802,15 +803,17 @@ OUTER_LOOP: // Maybe send Height/CatchupCommitRound/CatchupCommit. { prs := ps.GetRoundState() - if prs.CatchupCommitRound != -1 && 0 < prs.Height && prs.Height <= conR.conS.blockStore.Height() { - commit := conR.conS.LoadCommit(prs.Height) - peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{ - Height: prs.Height, - Round: commit.Round, - Type: types.PrecommitType, - BlockID: commit.BlockID, - })) - time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) + if prs.CatchupCommitRound != -1 && prs.Height > 0 && prs.Height <= conR.conS.blockStore.Height() && + prs.Height >= conR.conS.blockStore.Base() { + if commit := conR.conS.LoadCommit(prs.Height); commit != nil { + peer.TrySend(StateChannel, cdc.MustMarshalBinaryBare(&VoteSetMaj23Message{ + Height: prs.Height, + Round: commit.Round, + Type: types.PrecommitType, + BlockID: commit.BlockID, + })) + time.Sleep(conR.conS.config.PeerQueryMaj23SleepDuration) + } } } @@ -1119,7 +1122,7 @@ func (ps *PeerState) getVoteBitArray(height int64, round int, votesType types.Si } // 'round': A round for which we have a +2/3 commit. -func (ps *PeerState) ensureCatchupCommitRound(height int64, round int, numValidators int) { +func (ps *PeerState) ensureCatchupCommitRound(height int64, round int, numVoters int) { if ps.PRS.Height != height { return } @@ -1143,37 +1146,37 @@ func (ps *PeerState) ensureCatchupCommitRound(height int64, round int, numValida if round == ps.PRS.Round { ps.PRS.CatchupCommit = ps.PRS.Precommits } else { - ps.PRS.CatchupCommit = bits.NewBitArray(numValidators) + ps.PRS.CatchupCommit = bits.NewBitArray(numVoters) } } // EnsureVoteBitArrays ensures the bit-arrays have been allocated for tracking // what votes this peer has received. -// NOTE: It's important to make sure that numValidators actually matches -// what the node sees as the number of validators for height. -func (ps *PeerState) EnsureVoteBitArrays(height int64, numValidators int) { +// NOTE: It's important to make sure that numVoters actually matches +// what the node sees as the number of voters for height. +func (ps *PeerState) EnsureVoteBitArrays(height int64, numVoters int) { ps.mtx.Lock() defer ps.mtx.Unlock() - ps.ensureVoteBitArrays(height, numValidators) + ps.ensureVoteBitArrays(height, numVoters) } -func (ps *PeerState) ensureVoteBitArrays(height int64, numValidators int) { +func (ps *PeerState) ensureVoteBitArrays(height int64, numVoters int) { if ps.PRS.Height == height { if ps.PRS.Prevotes == nil { - ps.PRS.Prevotes = bits.NewBitArray(numValidators) + ps.PRS.Prevotes = bits.NewBitArray(numVoters) } if ps.PRS.Precommits == nil { - ps.PRS.Precommits = bits.NewBitArray(numValidators) + ps.PRS.Precommits = bits.NewBitArray(numVoters) } if ps.PRS.CatchupCommit == nil { - ps.PRS.CatchupCommit = bits.NewBitArray(numValidators) + ps.PRS.CatchupCommit = bits.NewBitArray(numVoters) } if ps.PRS.ProposalPOL == nil { - ps.PRS.ProposalPOL = bits.NewBitArray(numValidators) + ps.PRS.ProposalPOL = bits.NewBitArray(numVoters) } } else if ps.PRS.Height == height+1 { if ps.PRS.LastCommit == nil { - ps.PRS.LastCommit = bits.NewBitArray(numValidators) + ps.PRS.LastCommit = bits.NewBitArray(numVoters) } } } diff --git a/consensus/reactor_test.go b/consensus/reactor_test.go index 50654c124..732ca79ee 100644 --- a/consensus/reactor_test.go +++ b/consensus/reactor_test.go @@ -14,6 +14,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" + abcicli "github.com/tendermint/tendermint/abci/client" "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" @@ -29,7 +31,6 @@ import ( sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) //---------------------------------------------- @@ -119,7 +120,7 @@ func TestReactorWithEvidence(t *testing.T) { // to unroll unwieldy abstractions. Here we duplicate the code from: // css := randConsensusNet(N, "consensus_reactor_test", newMockTickerFunc(true), newCounter) - genDoc, privVals := randGenesisDoc(nValidators, false, 30) + genDoc, privVals := randGenesisDoc(nValidators, false, 30, nil) css := make([]*State, nValidators) logger := consensusLogger() for i := 0; i < nValidators; i++ { @@ -154,8 +155,9 @@ func TestReactorWithEvidence(t *testing.T) { // mock the evidence pool // everyone includes evidence of another double signing vIdx := (i + 1) % nValidators - addr := privVals[vIdx].GetPubKey().Address() - evpool := newMockEvidencePool(addr) + pubKey, err := privVals[vIdx].GetPubKey() + require.NoError(t, err) + evpool := newMockEvidencePool(pubKey.Address()) // Make State blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyAppConnCon, mempool, evpool) @@ -333,7 +335,7 @@ func TestReactorRecordsVotesAndBlockParts(t *testing.T) { //------------------------------------------------------------- // ensure we can make blocks despite cycling a validator set -func TestReactorVotingPowerChange(t *testing.T) { +func TestReactorStakingPowerChange(t *testing.T) { nVals := 4 logger := log.TestingLogger() css, cleanup := randConsensusNet( @@ -348,7 +350,9 @@ func TestReactorVotingPowerChange(t *testing.T) { // map of active validators activeVals := make(map[string]struct{}) for i := 0; i < nVals; i++ { - addr := css[i].privValidator.GetPubKey().Address() + pubKey, err := css[i].privValidator.GetPubKey() + require.NoError(t, err) + addr := pubKey.Address() activeVals[string(addr)] = struct{}{} } @@ -360,51 +364,52 @@ func TestReactorVotingPowerChange(t *testing.T) { //--------------------------------------------------------------------------- logger.Debug("---------------------------- Testing changing the voting power of one validator a few times") - val1PubKey := css[0].privValidator.GetPubKey() + val1PubKey, err := css[0].privValidator.GetPubKey() + require.NoError(t, err) val1PubKeyABCI := types.TM2PB.PubKey(val1PubKey) updateValidatorTx := kvstore.MakeValSetChangeTx(val1PubKeyABCI, 25) - previousTotalVotingPower := css[0].GetRoundState().LastValidators.TotalVotingPower() + previousTotalVotingPower := css[0].GetRoundState().LastVoters.TotalVotingPower() waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) - if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { + if css[0].GetRoundState().LastVoters.TotalVotingPower() == previousTotalVotingPower { t.Fatalf( - "expected voting power to change (before: %d, after: %d)", + "expected staking power to change (before: %d, after: %d)", previousTotalVotingPower, - css[0].GetRoundState().LastValidators.TotalVotingPower()) + css[0].GetRoundState().LastVoters.TotalVotingPower()) } updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 2) - previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower() + previousTotalVotingPower = css[0].GetRoundState().LastVoters.TotalVotingPower() waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) - if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { + if css[0].GetRoundState().LastVoters.TotalVotingPower() == previousTotalVotingPower { t.Fatalf( "expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, - css[0].GetRoundState().LastValidators.TotalVotingPower()) + css[0].GetRoundState().LastVoters.TotalVotingPower()) } updateValidatorTx = kvstore.MakeValSetChangeTx(val1PubKeyABCI, 26) - previousTotalVotingPower = css[0].GetRoundState().LastValidators.TotalVotingPower() + previousTotalVotingPower = css[0].GetRoundState().LastVoters.TotalVotingPower() waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) waitForAndValidateBlockWithTx(t, nVals, activeVals, blocksSubs, css, updateValidatorTx) waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) waitForAndValidateBlock(t, nVals, activeVals, blocksSubs, css) - if css[0].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { + if css[0].GetRoundState().LastVoters.TotalVotingPower() == previousTotalVotingPower { t.Fatalf( "expected voting power to change (before: %d, after: %d)", previousTotalVotingPower, - css[0].GetRoundState().LastValidators.TotalVotingPower()) + css[0].GetRoundState().LastVoters.TotalVotingPower()) } } @@ -427,8 +432,9 @@ func TestReactorValidatorSetChanges(t *testing.T) { // map of active validators activeVals := make(map[string]struct{}) for i := 0; i < nVals; i++ { - addr := css[i].privValidator.GetPubKey().Address() - activeVals[string(addr)] = struct{}{} + pubKey, err := css[i].privValidator.GetPubKey() + require.NoError(t, err) + activeVals[string(pubKey.Address())] = struct{}{} } // wait till everyone makes block 1 @@ -439,7 +445,8 @@ func TestReactorValidatorSetChanges(t *testing.T) { //--------------------------------------------------------------------------- logger.Info("---------------------------- Testing adding one validator") - newValidatorPubKey1 := css[nVals].privValidator.GetPubKey() + newValidatorPubKey1, err := css[nVals].privValidator.GetPubKey() + require.NoError(t, err) valPubKey1ABCI := types.TM2PB.PubKey(newValidatorPubKey1) newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower) @@ -466,31 +473,34 @@ func TestReactorValidatorSetChanges(t *testing.T) { //--------------------------------------------------------------------------- logger.Info("---------------------------- Testing changing the voting power of one validator") - updateValidatorPubKey1 := css[nVals].privValidator.GetPubKey() + updateValidatorPubKey1, err := css[nVals].privValidator.GetPubKey() + require.NoError(t, err) updatePubKey1ABCI := types.TM2PB.PubKey(updateValidatorPubKey1) updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25) - previousTotalVotingPower := css[nVals].GetRoundState().LastValidators.TotalVotingPower() + previousTotalVotingPower := css[nVals].GetRoundState().LastVoters.TotalVotingPower() waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css, updateValidatorTx1) waitForAndValidateBlockWithTx(t, nPeers, activeVals, blocksSubs, css, updateValidatorTx1) waitForAndValidateBlock(t, nPeers, activeVals, blocksSubs, css) waitForBlockWithUpdatedValsAndValidateIt(t, nPeers, activeVals, blocksSubs, css) - if css[nVals].GetRoundState().LastValidators.TotalVotingPower() == previousTotalVotingPower { + if css[nVals].GetRoundState().LastVoters.TotalVotingPower() == previousTotalVotingPower { t.Errorf( - "expected voting power to change (before: %d, after: %d)", + "expected staking power to change (before: %d, after: %d)", previousTotalVotingPower, - css[nVals].GetRoundState().LastValidators.TotalVotingPower()) + css[nVals].GetRoundState().LastVoters.TotalVotingPower()) } //--------------------------------------------------------------------------- logger.Info("---------------------------- Testing adding two validators at once") - newValidatorPubKey2 := css[nVals+1].privValidator.GetPubKey() + newValidatorPubKey2, err := css[nVals+1].privValidator.GetPubKey() + require.NoError(t, err) newVal2ABCI := types.TM2PB.PubKey(newValidatorPubKey2) newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower) - newValidatorPubKey3 := css[nVals+2].privValidator.GetPubKey() + newValidatorPubKey3, err := css[nVals+2].privValidator.GetPubKey() + require.NoError(t, err) newVal3ABCI := types.TM2PB.PubKey(newValidatorPubKey3) newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower) diff --git a/consensus/replay.go b/consensus/replay.go index 1453849cc..9e0d090f8 100644 --- a/consensus/replay.go +++ b/consensus/replay.go @@ -288,6 +288,7 @@ func (h *Handshaker) ReplayBlocks( appBlockHeight int64, proxyApp proxy.AppConns, ) ([]byte, error) { + storeBlockBase := h.store.Base() storeBlockHeight := h.store.Height() stateBlockHeight := state.LastBlockHeight h.logger.Info( @@ -312,7 +313,7 @@ func (h *Handshaker) ReplayBlocks( Time: h.genDoc.GenesisTime, ChainId: h.genDoc.ChainID, ConsensusParams: csParams, - Validators: nextVals, + Validators: nextVals, // ValidatorOrVoter: validator AppStateBytes: h.genDoc.AppState, } res, err := proxyApp.Consensus().InitChainSync(req) @@ -328,7 +329,10 @@ func (h *Handshaker) ReplayBlocks( return nil, err } state.Validators = types.NewValidatorSet(vals) + state.Voters = types.ToVoterAll(state.Validators.Validators) + // Should sync it with MakeGenesisState() state.NextValidators = types.NewValidatorSet(vals) + state.NextVoters = types.SelectVoter(state.NextValidators, h.genDoc.Hash(), state.VoterParams) } else if len(h.genDoc.Validators) == 0 { // If validator set is not set in genesis and still empty after InitChain, exit. return nil, fmt.Errorf("validator set is nil in genesis and still empty after InitChain") @@ -341,12 +345,16 @@ func (h *Handshaker) ReplayBlocks( } } - // First handle edge cases and constraints on the storeBlockHeight. + // First handle edge cases and constraints on the storeBlockHeight and storeBlockBase. switch { case storeBlockHeight == 0: assertAppHashEqualsOneFromState(appHash, state) return appHash, nil + case appBlockHeight < storeBlockBase-1: + // the app is too far behind truncated store (can be 1 behind since we replay the next) + return appHash, sm.ErrAppBlockHeightTooLow{AppHeight: appBlockHeight, StoreBase: storeBlockBase} + case storeBlockHeight < appBlockHeight: // the app should never be ahead of the store (but this is under app's control) return appHash, sm.ErrAppBlockHeightTooHigh{CoreHeight: storeBlockHeight, AppHeight: appBlockHeight} @@ -442,7 +450,7 @@ func (h *Handshaker) replayBlocks( assertAppHashEqualsOneFromBlock(appHash, block) } - appHash, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, h.logger, h.stateDB) + appHash, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, h.logger, h.stateDB, state.VoterParams) if err != nil { return nil, err } @@ -472,7 +480,7 @@ func (h *Handshaker) replayBlock(state sm.State, height int64, proxyApp proxy.Ap blockExec.SetEventBus(h.eventBus) var err error - state, err = blockExec.ApplyBlock(state, meta.BlockID, block) + state, _, err = blockExec.ApplyBlock(state, meta.BlockID, block) if err != nil { return sm.State{}, err } diff --git a/consensus/replay_file.go b/consensus/replay_file.go index b8b8c51da..b7d32d2fa 100644 --- a/consensus/replay_file.go +++ b/consensus/replay_file.go @@ -248,8 +248,8 @@ func (pb *playback) replayConsoleLoop() int { switch tokens[1] { case "short": fmt.Printf("%v/%v/%v\n", rs.Height, rs.Round, rs.Step) - case "validators": - fmt.Println(rs.Validators) + case "voters": + fmt.Println(rs.Voters) case "proposal": fmt.Println(rs.Proposal) case "proposal_block": @@ -314,8 +314,7 @@ func newConsensusStateForReplay(config cfg.BaseConfig, csConfig *cfg.ConsensusCo mempool, evpool := mock.Mempool{}, sm.MockEvidencePool{} blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) - consensusState := NewState(csConfig, state.Copy(), blockExec, - blockStore, mempool, evpool) + consensusState := NewState(csConfig, state.Copy(), blockExec, blockStore, mempool, evpool) consensusState.SetEventBus(eventBus) return consensusState diff --git a/consensus/replay_test.go b/consensus/replay_test.go index 6753731e0..78e73d5cf 100644 --- a/consensus/replay_test.go +++ b/consensus/replay_test.go @@ -17,6 +17,8 @@ import ( "sort" + dbm "github.com/tendermint/tm-db" + "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" cfg "github.com/tendermint/tendermint/config" @@ -31,7 +33,6 @@ import ( sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" "github.com/tendermint/tendermint/version" - dbm "github.com/tendermint/tm-db" ) func TestMain(m *testing.M) { @@ -307,21 +308,25 @@ var ( // 0 - all synced up // 1 - saved block but app and state are behind // 2 - save block and committed but state is behind -var modes = []uint{0, 1, 2} +// 3 - save block and committed with truncated block store and state behind +var modes = []uint{0, 1, 2, 3} func getProposerIdx(state *State, height int64, round int) (int, *types.Validator) { - proposer := types.SelectProposer(state.Validators, state.state.LastProofHash, height, round) - return state.Validators.GetByAddress(proposer.PubKey.Address()) + proposer := state.Validators.SelectProposer(state.state.LastProofHash, height, round) + return state.Voters.GetByAddress(proposer.PubKey.Address()) } -func createProposalBlock(cs *State, proposerState *State, round int) (*types.Block, *types.PartSet) { +func createProposalBlock(t *testing.T, cs *State, proposerState *State, round int) (*types.Block, *types.PartSet) { var commit *types.Commit if cs.Height == 1 { commit = types.NewCommit(0, 0, types.BlockID{}, nil) } else { commit = cs.LastCommit.MakeCommit() } - proposerAddr := proposerState.privValidator.GetPubKey().Address() + //proposerAddr := proposerState.privValidator.GetPubKey().Address() + proposerPubKey, err := proposerState.privValidator.GetPubKey() + require.NoError(t, err) + proposerAddr := proposerPubKey.Address() message := cs.state.MakeHashMessage(round) proof, err := proposerState.privValidator.GenerateVRFProof(message) if err != nil { @@ -340,23 +345,26 @@ func consensusNewBlock(t *testing.T, height int64, vss []*validatorStub, css []* proposerIdx, prop := getProposerIdx(css[0], height, 0) // search idx of proposer in the css - proposerIdxOfCss := 0 + proposerIdxOfCSS := 0 for i, cs := range css { - if prop.PubKey.Equals(cs.privValidator.GetPubKey()) { - proposerIdxOfCss = i + + csPubKey, err := cs.privValidator.GetPubKey() + require.NoError(t, err) + if prop.PubKey.Equals(csPubKey) { + proposerIdxOfCSS = i break } } // state0 is main started machine (css[0]) - if proposerIdxOfCss == 0 { + if proposerIdxOfCSS == 0 { ensureNewProposal(proposalCH, height, 0) rs := css[0].GetRoundState() for _, voterIdx := range voterList { signAddVotes(css[0], types.PrecommitType, rs.ProposalBlock.Hash(), rs.ProposalBlockParts.Header(), vss[voterIdx]) } } else { - propBlock, _ := createProposalBlock(css[0], css[proposerIdxOfCss], 0) + propBlock, _ := createProposalBlock(t, css[0], css[proposerIdxOfCSS], 0) propBlockParts := propBlock.MakePartSet(types.BlockPartSizeBytes) blockID := types.BlockID{Hash: propBlock.Hash(), PartsHeader: propBlockParts.Header()} proposal := types.NewProposal(vss[proposerIdx].Height, 0, -1, blockID) @@ -413,10 +421,11 @@ func TestSimulateValidatorsChange(t *testing.T) { height++ incrementHeight(vss...) consensusNewBlock(t, height, vss, css, newRoundCh, proposalCh, []int{1, 2, 3}, func() { - newValidatorPubKey1 := css[nVals].privValidator.GetPubKey() + newValidatorPubKey1, err := css[nVals].privValidator.GetPubKey() + require.NoError(t, err) valPubKey1ABCI := types.TM2PB.PubKey(newValidatorPubKey1) newValidatorTx1 := kvstore.MakeValSetChangeTx(valPubKey1ABCI, testMinPower) - err := assertMempool(css[0].txNotifier).CheckTx(newValidatorTx1, nil, mempl.TxInfo{}) + err = assertMempool(css[0].txNotifier).CheckTx(newValidatorTx1, nil, mempl.TxInfo{}) assert.Nil(t, err) }) @@ -424,10 +433,11 @@ func TestSimulateValidatorsChange(t *testing.T) { height++ incrementHeight(vss...) consensusNewBlock(t, height, vss, css, newRoundCh, proposalCh, []int{1, 2, 3}, func() { - updateValidatorPubKey1 := css[nVals].privValidator.GetPubKey() + updateValidatorPubKey1, err := css[nVals].privValidator.GetPubKey() + require.NoError(t, err) updatePubKey1ABCI := types.TM2PB.PubKey(updateValidatorPubKey1) updateValidatorTx1 := kvstore.MakeValSetChangeTx(updatePubKey1ABCI, 25) - err := assertMempool(css[0].txNotifier).CheckTx(updateValidatorTx1, nil, mempl.TxInfo{}) + err = assertMempool(css[0].txNotifier).CheckTx(updateValidatorTx1, nil, mempl.TxInfo{}) assert.Nil(t, err) }) @@ -442,19 +452,27 @@ func TestSimulateValidatorsChange(t *testing.T) { // re-calculate voterList idx := 0 for i, vs := range newVss { - if vs.GetPubKey().Equals(css[0].privValidator.GetPubKey()) { + vsPubKey, err0 := vs.GetPubKey() + require.NoError(t, err0) + + css0PubKey, err1 := css[0].privValidator.GetPubKey() + require.NoError(t, err1) + + if vsPubKey.Equals(css0PubKey) { continue } voterList[idx] = i idx++ } consensusNewBlock(t, height, newVss, css, newRoundCh, proposalCh, voterList, func() { - newValidatorPubKey2 := css[nVals+1].privValidator.GetPubKey() + newValidatorPubKey2, err1 := css[nVals+1].privValidator.GetPubKey() + require.NoError(t, err1) newVal2ABCI := types.TM2PB.PubKey(newValidatorPubKey2) newValidatorTx2 := kvstore.MakeValSetChangeTx(newVal2ABCI, testMinPower) err := assertMempool(css[0].txNotifier).CheckTx(newValidatorTx2, nil, mempl.TxInfo{}) assert.Nil(t, err) - newValidatorPubKey3 := css[nVals+2].privValidator.GetPubKey() + newValidatorPubKey3, err2 := css[nVals+2].privValidator.GetPubKey() + require.NoError(t, err2) newVal3ABCI := types.TM2PB.PubKey(newValidatorPubKey3) newValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, testMinPower) err = assertMempool(css[0].txNotifier).CheckTx(newValidatorTx3, nil, mempl.TxInfo{}) @@ -477,17 +495,24 @@ func TestSimulateValidatorsChange(t *testing.T) { voterList = make([]int, nVals+2) idx = 0 for i, vs := range newVss { - if vs.GetPubKey().Equals(css[0].privValidator.GetPubKey()) { + vsPubKey, err1 := vs.GetPubKey() + require.NoError(t, err1) + + css0PubKey, err2 := css[0].privValidator.GetPubKey() + require.NoError(t, err2) + + if vsPubKey.Equals(css0PubKey) { continue } voterList[idx] = i idx++ } consensusNewBlock(t, height, newVss, css, newRoundCh, proposalCh, voterList, func() { - newValidatorPubKey3 := css[nVals+2].privValidator.GetPubKey() + newValidatorPubKey3, err := css[nVals+2].privValidator.GetPubKey() + require.NoError(t, err) newVal3ABCI := types.TM2PB.PubKey(newValidatorPubKey3) removeValidatorTx3 := kvstore.MakeValSetChangeTx(newVal3ABCI, 0) - err := assertMempool(css[0].txNotifier).CheckTx(removeValidatorTx3, nil, mempl.TxInfo{}) + err = assertMempool(css[0].txNotifier).CheckTx(removeValidatorTx3, nil, mempl.TxInfo{}) assert.Nil(t, err) }) @@ -512,10 +537,10 @@ func TestHandshakeReplayAll(t *testing.T) { // Sync many, not from scratch func TestHandshakeReplaySome(t *testing.T) { for _, m := range modes { - testHandshakeReplay(t, config, 1, m, false) + testHandshakeReplay(t, config, 2, m, false) } for _, m := range modes { - testHandshakeReplay(t, config, 1, m, true) + testHandshakeReplay(t, config, 2, m, true) } } @@ -618,7 +643,7 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin stateDB = dbm.NewMemDB() genisisState = sim.GenesisState config = sim.Config - chain = sim.Chain + chain = append([]*types.Block{}, sim.Chain...) // copy chain commits = sim.Commits store = newMockBlockStore(config, genisisState.ConsensusParams) } else { //test single node @@ -640,7 +665,9 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin chain, commits, err = makeBlockchainFromWAL(wal) require.NoError(t, err) - stateDB, genisisState, store = stateAndStore(config, privVal.GetPubKey(), kvstore.ProtocolVersion) + pubKey, err := privVal.GetPubKey() + require.NoError(t, err) + stateDB, genisisState, store = stateAndStore(config, pubKey, kvstore.ProtocolVersion) } store.chain = chain store.commits = commits @@ -664,6 +691,15 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin buildAppStateFromChain(proxyApp, stateDB1, genisisState, chain, nBlocks, mode) } + // Prune block store if requested + expectError := false + if mode == 3 { + pruned, err := store.PruneBlocks(2) + require.NoError(t, err) + require.EqualValues(t, 1, pruned) + expectError = int64(nBlocks) < 2 + } + // now start the app using the handshake - it should sync genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile()) handshaker := NewHandshaker(stateDB, state, store, genDoc) @@ -672,7 +708,11 @@ func testHandshakeReplay(t *testing.T, config *cfg.Config, nBlocks int, mode uin t.Fatalf("Error starting proxy app connections: %v", err) } defer proxyApp.Stop() - if err := handshaker.Handshake(proxyApp); err != nil { + err := handshaker.Handshake(proxyApp) + if expectError { + require.Error(t, err) + return + } else if err != nil { t.Fatalf("Error on abci handshake: %v", err) } @@ -707,7 +747,7 @@ func applyBlock(stateDB dbm.DB, st sm.State, blk *types.Block, proxyApp proxy.Ap blockExec := sm.NewBlockExecutor(stateDB, log.TestingLogger(), proxyApp.Consensus(), mempool, evpool) blkID := types.BlockID{Hash: blk.Hash(), PartsHeader: blk.MakePartSet(testPartSize).Header()} - newState, err := blockExec.ApplyBlock(st, blkID, blk) + newState, _, err := blockExec.ApplyBlock(st, blkID, blk) if err != nil { panic(err) } @@ -737,17 +777,19 @@ func buildAppStateFromChain(proxyApp proxy.AppConns, stateDB dbm.DB, block := chain[i] state = applyBlock(stateDB, state, block, proxyApp) } - case 1, 2: + case 1, 2, 3: for i := 0; i < nBlocks-1; i++ { block := chain[i] state = applyBlock(stateDB, state, block, proxyApp) } - if mode == 2 { + if mode == 2 || mode == 3 { // update the kvstore height and apphash // as if we ran commit but not state = applyBlock(stateDB, state, chain[nBlocks-1], proxyApp) } + default: + panic(fmt.Sprintf("unknown mode %v", mode)) } } @@ -785,7 +827,7 @@ func buildTMStateFromChain( state = applyBlock(stateDB, state, block, proxyApp) } - case 1, 2: + case 1, 2, 3: // sync up to the penultimate as if we stored the block. // whether we commit or not depends on the appHash for _, block := range chain[:len(chain)-1] { @@ -795,6 +837,8 @@ func buildTMStateFromChain( // apply the final block to a state copy so we can // get the right next appHash but keep the state back applyBlock(stateDB, state, chain[len(chain)-1], proxyApp) + default: + panic(fmt.Sprintf("unknown mode %v", mode)) } return state @@ -809,9 +853,11 @@ func TestHandshakePanicsIfAppReturnsWrongAppHash(t *testing.T) { defer os.RemoveAll(config.RootDir) privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()) const appVersion = 0x0 - stateDB, state, store := stateAndStore(config, privVal.GetPubKey(), appVersion) + pubKey, err := privVal.GetPubKey() + require.NoError(t, err) + stateDB, state, store := stateAndStore(config, pubKey, appVersion) genDoc, _ := sm.MakeGenesisDocFromFile(config.GenesisFile()) - state.LastValidators = state.Validators.Copy() + state.LastVoters = state.Voters.Copy() // mode = 0 for committing all the blocks blocks := makeBlocks(3, &state, privVal) store.chain = blocks @@ -899,7 +945,7 @@ func makeBlock(state sm.State, lastBlock *types.Block, lastBlockMeta *types.Bloc message := state.MakeHashMessage(0) proof, _ := privVal.GenerateVRFProof(message) return state.MakeBlock(height, []types.Tx{}, lastCommit, nil, - types.SelectProposer(state.Validators, state.LastProofHash, height, 0).Address, 0, proof) + state.Validators.SelectProposer(state.LastProofHash, height, 0).Address, 0, proof) } type badApp struct { @@ -1055,14 +1101,17 @@ type mockBlockStore struct { params types.ConsensusParams chain []*types.Block commits []*types.Commit + base int64 } // TODO: NewBlockStore(db.NewMemDB) ... func newMockBlockStore(config *cfg.Config, params types.ConsensusParams) *mockBlockStore { - return &mockBlockStore{config, params, nil, nil} + return &mockBlockStore{config, params, nil, nil, 0} } func (bs *mockBlockStore) Height() int64 { return int64(len(bs.chain)) } +func (bs *mockBlockStore) Base() int64 { return bs.base } +func (bs *mockBlockStore) Size() int64 { return bs.Height() - bs.Base() + 1 } func (bs *mockBlockStore) LoadBlock(height int64) *types.Block { return bs.chain[height-1] } func (bs *mockBlockStore) LoadBlockByHash(hash []byte) *types.Block { return bs.chain[int64(len(bs.chain))-1] @@ -1084,6 +1133,17 @@ func (bs *mockBlockStore) LoadSeenCommit(height int64) *types.Commit { return bs.commits[height-1] } +func (bs *mockBlockStore) PruneBlocks(height int64) (uint64, error) { + pruned := uint64(0) + for i := int64(0); i < height-1; i++ { + bs.chain[i] = nil + bs.commits[i] = nil + pruned++ + } + bs.base = height + return pruned, nil +} + //--------------------------------------- // Test handshake/init chain @@ -1096,7 +1156,9 @@ func TestHandshakeUpdatesValidators(t *testing.T) { config := ResetConfig("handshake_test_") defer os.RemoveAll(config.RootDir) privVal := privval.LoadFilePV(config.PrivValidatorKeyFile(), config.PrivValidatorStateFile()) - stateDB, state, store := stateAndStore(config, privVal.GetPubKey(), 0x0) + pubKey, err := privVal.GetPubKey() + require.NoError(t, err) + stateDB, state, store := stateAndStore(config, pubKey, 0x0) oldValAddr := state.Validators.Validators[0].Address diff --git a/consensus/state.go b/consensus/state.go index b1969efdd..02d09db01 100644 --- a/consensus/state.go +++ b/consensus/state.go @@ -9,6 +9,7 @@ import ( "time" "github.com/pkg/errors" + "github.com/tendermint/tendermint/libs/fail" "github.com/tendermint/tendermint/libs/log" tmos "github.com/tendermint/tendermint/libs/os" @@ -204,7 +205,7 @@ func StateMetrics(metrics *Metrics) StateOption { // String returns a string. func (cs *State) String() string { // better not to access shared variables - return fmt.Sprintf("ConsensusState") //(H:%v R:%v S:%v", cs.Height, cs.Round, cs.Step) + return "ConsensusState" // (H:%v R:%v S:%v", cs.Height, cs.Round, cs.Step) } // GetState returns a copy of the chain state. @@ -245,6 +246,7 @@ func (cs *State) GetRoundStateSimpleJSON() ([]byte, error) { } // GetValidators returns a copy of the current validators. +// ValidatorOrVoter: validator func (cs *State) GetValidators() (int64, []*types.Validator) { cs.mtx.RLock() defer cs.mtx.RUnlock() @@ -489,7 +491,11 @@ func (cs *State) reconstructLastCommit(state sm.State) { return } seenCommit := cs.blockStore.LoadSeenCommit(state.LastBlockHeight) - lastPrecommits := types.CommitToVoteSet(state.ChainID, seenCommit, state.LastValidators) + if seenCommit == nil { + panic(fmt.Sprintf("Failed to reconstruct LastCommit: seen commit for height %v not found", + state.LastBlockHeight)) + } + lastPrecommits := types.CommitToVoteSet(state.ChainID, seenCommit, state.LastVoters) if !lastPrecommits.HasTwoThirdsMajority() { panic("Failed to reconstruct LastCommit: Does not have +2/3 maj") } @@ -527,7 +533,7 @@ func (cs *State) updateToState(state sm.State) { } // Reset fields based on state. - validators := state.Validators + voters := state.Voters lastPrecommits := (*types.VoteSet)(nil) if cs.CommitRound > -1 && cs.Votes != nil { if !cs.Votes.Precommits(cs.CommitRound).HasTwoThirdsMajority() { @@ -553,7 +559,8 @@ func (cs *State) updateToState(state sm.State) { cs.StartTime = cs.config.Commit(cs.CommitTime) } - cs.Validators = validators + cs.Validators = state.Validators.Copy() + cs.Voters = state.Voters.Copy() cs.Proposal = nil cs.ProposalBlock = nil cs.ProposalBlockParts = nil @@ -563,10 +570,10 @@ func (cs *State) updateToState(state sm.State) { cs.ValidRound = -1 cs.ValidBlock = nil cs.ValidBlockParts = nil - cs.Votes = cstypes.NewHeightVoteSet(state.ChainID, height, validators) + cs.Votes = cstypes.NewHeightVoteSet(state.ChainID, height, voters) cs.CommitRound = -1 cs.LastCommit = lastPrecommits - cs.LastValidators = state.LastValidators + cs.LastVoters = state.LastVoters cs.TriggeredTimeoutPrecommit = false cs.state = state @@ -828,7 +835,7 @@ func (cs *State) enterNewRound(height int64, round int) { logger.Info(fmt.Sprintf("enterNewRound(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) // Select the current height and round Proposer - cs.Proposer = types.SelectProposer(cs.Validators, cs.state.LastProofHash, height, round) + cs.Proposer = cs.Validators.SelectProposer(cs.state.LastProofHash, height, round) // Setup new round // we don't fire newStep for this step, @@ -872,6 +879,9 @@ func (cs *State) needProofBlock(height int64) bool { } lastBlockMeta := cs.blockStore.LoadBlockMeta(height - 1) + if lastBlockMeta == nil { + panic(fmt.Sprintf("needProofBlock: last block meta for height %d not found", height-1)) + } return !bytes.Equal(cs.state.AppHash, lastBlockMeta.Header.AppHash) } @@ -893,6 +903,7 @@ func (cs *State) enterPropose(height int64, round int) { return } logger.Info(fmt.Sprintf("enterPropose(%v/%v). Current: %v/%v/%v", height, round, cs.Height, cs.Round, cs.Step)) + defer func() { // Done enterPropose: cs.updateRoundStep(round, cstypes.RoundStepPropose) @@ -914,15 +925,18 @@ func (cs *State) enterPropose(height int64, round int) { logger.Debug("This node is not a validator") return } + logger.Debug("This node is a validator") - // if not a validator, we're done - address := cs.privValidator.GetPubKey().Address() - if !cs.Validators.HasAddress(address) { - logger.Debug("This node is not a validator", "addr", address, "vals", cs.Validators) + pubKey, err := cs.privValidator.GetPubKey() + if err != nil { + // If this node is a validator & proposer in the current round, it will + // miss the opportunity to create a block. + logger.Error("Error on retrival of pubkey", "err", err) return } - logger.Debug("This node is a validator") + address := pubKey.Address() + // I'm a proposer, but I might not be a voter if cs.isProposer(address) { logger.Info("enterPropose: Our turn to propose", "proposer", @@ -936,6 +950,13 @@ func (cs *State) enterPropose(height int64, round int) { cs.Proposer.Address, "privValidator", cs.privValidator) + + } + + if !cs.Voters.HasAddress(address) { + logger.Debug("This node is not elected as a voter") + } else { + logger.Debug("This node is elected as a voter") } } @@ -999,11 +1020,13 @@ func (cs *State) isProposalComplete() bool { } -// Create the next block to propose and return it. -// We really only need to return the parts, but the block -// is returned for convenience so we can log the proposal block. -// Returns nil block upon error. +// Create the next block to propose and return it. Returns nil block upon error. +// +// We really only need to return the parts, but the block is returned for +// convenience so we can log the proposal block. +// // NOTE: keep it side-effect free for clarity. +// CONTRACT: cs.privValidator is not nil. func (cs *State) createProposalBlock(round int) (block *types.Block, blockParts *types.PartSet) { var commit *types.Commit switch { @@ -1014,13 +1037,22 @@ func (cs *State) createProposalBlock(round int) (block *types.Block, blockParts case cs.LastCommit.HasTwoThirdsMajority(): // Make the commit from LastCommit commit = cs.LastCommit.MakeCommit() - default: - // This shouldn't happen. - cs.Logger.Error("enterPropose: Cannot propose anything: No commit for the previous block.") + default: // This shouldn't happen. + cs.Logger.Error("enterPropose: Cannot propose anything: No commit for the previous block") return } - proposerAddr := cs.privValidator.GetPubKey().Address() + if cs.privValidator == nil { + panic("entered createProposalBlock with privValidator being nil") + } + pubKey, err := cs.privValidator.GetPubKey() + if err != nil { + // If this node is a validator & proposer in the current round, it will + // miss the opportunity to create a block. + cs.Logger.Error("Error on retrival of pubkey", "err", err) + return + } + proposerAddr := pubKey.Address() message := cs.state.MakeHashMessage(round) proof, err := cs.privValidator.GenerateVRFProof(message) @@ -1368,13 +1400,13 @@ func (cs *State) finalizeCommit(height int64) { block, blockParts := cs.ProposalBlock, cs.ProposalBlockParts if !ok { - panic(fmt.Sprintf("Cannot finalizeCommit, commit does not have two thirds majority")) + panic("Cannot finalizeCommit, commit does not have two thirds majority") } if !blockParts.HasHeader(blockID.PartsHeader) { - panic(fmt.Sprintf("Expected ProposalBlockParts header to be commit header")) + panic("Expected ProposalBlockParts header to be commit header") } if !block.HashesTo(blockID.Hash) { - panic(fmt.Sprintf("Cannot finalizeCommit, ProposalBlock does not hash to commit hash")) + panic("Cannot finalizeCommit, ProposalBlock does not hash to commit hash") } if err := cs.blockExec.ValidateBlock(cs.state, cs.CommitRound, block); err != nil { panic(fmt.Sprintf("+2/3 committed an invalid block: %v", err)) @@ -1430,10 +1462,9 @@ func (cs *State) finalizeCommit(height int64) { // Execute and commit the block, update and save the state, and update the mempool. // NOTE The block.AppHash wont reflect these txs until the next block. var err error - stateCopy, err = cs.blockExec.ApplyBlock( - stateCopy, - types.BlockID{Hash: block.Hash(), PartsHeader: blockParts.Header()}, - block) + var retainHeight int64 + stateCopy, retainHeight, err = cs.blockExec.ApplyBlock( + stateCopy, types.BlockID{Hash: block.Hash(), PartsHeader: blockParts.Header()}, block) if err != nil { cs.Logger.Error("Error on ApplyBlock. Did the application crash? Please restart tendermint", "err", err) err := tmos.Kill() @@ -1445,11 +1476,22 @@ func (cs *State) finalizeCommit(height int64) { fail.Fail() // XXX + // Prune old heights, if requested by ABCI app. + if retainHeight > 0 { + pruned, err := cs.pruneBlocks(retainHeight) + if err != nil { + cs.Logger.Error("Failed to prune blocks", "retainHeight", retainHeight, "err", err) + } else { + cs.Logger.Info("Pruned blocks", "pruned", pruned, "retainHeight", retainHeight) + } + } + // must be called before we update state cs.recordMetrics(height, block) // NewHeightStep! cs.updateToState(stateCopy) + fail.Fail() // XXX // cs.StartTime is already set. @@ -1462,15 +1504,31 @@ func (cs *State) finalizeCommit(height int64) { // * cs.StartTime is set to when we will start round0. } +func (cs *State) pruneBlocks(retainHeight int64) (uint64, error) { + base := cs.blockStore.Base() + if retainHeight <= base { + return 0, nil + } + pruned, err := cs.blockStore.PruneBlocks(retainHeight) + if err != nil { + return 0, fmt.Errorf("failed to prune block store: %w", err) + } + err = sm.PruneStates(cs.blockExec.DB(), base, retainHeight) + if err != nil { + return 0, fmt.Errorf("failed to prune state database: %w", err) + } + return pruned, nil +} + func (cs *State) recordMetrics(height int64, block *types.Block) { - cs.metrics.Validators.Set(float64(cs.Validators.Size())) - cs.metrics.ValidatorsPower.Set(float64(cs.Validators.TotalVotingPower())) + cs.metrics.Voters.Set(float64(cs.Voters.Size())) + cs.metrics.VotersPower.Set(float64(cs.Voters.TotalVotingPower())) var ( - missingValidators int - missingValidatorsPower int64 + missingVoters int + missingVotersPower int64 ) - // height=0 -> MissingValidators and MissingValidatorsPower are both 0. + // height=0 -> MissingVoters and MissingVotersPower are both 0. // Remember that the first LastCommit is intentionally empty, so it's not // fair to increment missing validators number. if height > 1 { @@ -1478,50 +1536,60 @@ func (cs *State) recordMetrics(height int64, block *types.Block) { // after first block. var ( commitSize = block.LastCommit.Size() - valSetLen = len(cs.LastValidators.Validators) + valSetLen = len(cs.LastVoters.Voters) ) if commitSize != valSetLen { panic(fmt.Sprintf("commit size (%d) doesn't match valset length (%d) at height %d\n\n%v\n\n%v", - commitSize, valSetLen, block.Height, block.LastCommit.Signatures, cs.LastValidators.Validators)) + commitSize, valSetLen, block.Height, block.LastCommit.Signatures, cs.LastVoters.Voters)) } - for i, val := range cs.LastValidators.Validators { + for i, val := range cs.LastVoters.Voters { commitSig := block.LastCommit.Signatures[i] if commitSig.Absent() { - missingValidators++ - missingValidatorsPower += val.VotingPower + missingVoters++ + missingVotersPower += val.VotingPower } - if cs.privValidator != nil && bytes.Equal(val.Address, cs.privValidator.GetPubKey().Address()) { - label := []string{ - "validator_address", val.Address.String(), + if cs.privValidator != nil { + pubKey, err := cs.privValidator.GetPubKey() + if err != nil { + // Metrics won't be updated, but it's not critical. + cs.Logger.Error("Error on retrival of pubkey", "err", err) + continue } - cs.metrics.ValidatorPower.With(label...).Set(float64(val.VotingPower)) - if commitSig.ForBlock() { - cs.metrics.ValidatorLastSignedHeight.With(label...).Set(float64(height)) - } else { - cs.metrics.ValidatorMissedBlocks.With(label...).Add(float64(1)) + if bytes.Equal(val.Address, pubKey.Address()) { + label := []string{ + "validator_address", val.Address.String(), + } + cs.metrics.VoterPower.With(label...).Set(float64(val.VotingPower)) + if commitSig.ForBlock() { + cs.metrics.VoterLastSignedHeight.With(label...).Set(float64(height)) + } else { + cs.metrics.VoterMissedBlocks.With(label...).Add(float64(1)) + } } } } } - cs.metrics.MissingValidators.Set(float64(missingValidators)) - cs.metrics.MissingValidatorsPower.Set(float64(missingValidatorsPower)) + cs.metrics.MissingVoters.Set(float64(missingVoters)) + cs.metrics.MissingVotersPower.Set(float64(missingVotersPower)) - cs.metrics.ByzantineValidators.Set(float64(len(block.Evidence.Evidence))) - byzantineValidatorsPower := int64(0) + cs.metrics.ByzantineVoters.Set(float64(len(block.Evidence.Evidence))) + byzantineVotersPower := int64(0) for _, ev := range block.Evidence.Evidence { - if _, val := cs.Validators.GetByAddress(ev.Address()); val != nil { - byzantineValidatorsPower += val.VotingPower + if _, val := cs.Voters.GetByAddress(ev.Address()); val != nil { + byzantineVotersPower += val.VotingPower } } - cs.metrics.ByzantineValidatorsPower.Set(float64(byzantineValidatorsPower)) + cs.metrics.ByzantineVotersPower.Set(float64(byzantineVotersPower)) if height > 1 { lastBlockMeta := cs.blockStore.LoadBlockMeta(height - 1) - cs.metrics.BlockIntervalSeconds.Set( - block.Time.Sub(lastBlockMeta.Header.Time).Seconds(), - ) + if lastBlockMeta != nil { + cs.metrics.BlockIntervalSeconds.Set( + block.Time.Sub(lastBlockMeta.Header.Time).Seconds(), + ) + } } cs.metrics.NumTxs.Set(float64(len(block.Data.Txs))) @@ -1551,7 +1619,7 @@ func (cs *State) defaultSetProposal(proposal *types.Proposal) error { } // If consensus does not enterNewRound yet, cs.Proposer may be nil or prior proposer, so don't use cs.Proposer - proposer := types.SelectProposer(cs.Validators, cs.state.LastProofHash, proposal.Height, proposal.Round) + proposer := cs.Validators.SelectProposer(cs.state.LastProofHash, proposal.Height, proposal.Round) // Verify signature if !proposer.PubKey.VerifyBytes(proposal.SignBytes(cs.state.ChainID), proposal.Signature) { @@ -1652,8 +1720,12 @@ func (cs *State) tryAddVote(vote *types.Vote, peerID p2p.ID) (bool, error) { if err == ErrVoteHeightMismatch { return added, err } else if voteErr, ok := err.(*types.ErrVoteConflictingVotes); ok { - addr := cs.privValidator.GetPubKey().Address() - if bytes.Equal(vote.ValidatorAddress, addr) { + pubKey, err := cs.privValidator.GetPubKey() + if err != nil { + return false, errors.Wrap(err, "can't get pubkey") + } + + if bytes.Equal(vote.ValidatorAddress, pubKey.Address()) { cs.Logger.Error( "Found conflicting vote from ourselves. Did you unsafe_reset a validator?", "height", @@ -1842,6 +1914,7 @@ func (cs *State) addVote( return added, err } +// CONTRACT: cs.privValidator is not nil. func (cs *State) signVote( msgType types.SignedMsgType, hash []byte, @@ -1851,19 +1924,24 @@ func (cs *State) signVote( // and the privValidator will refuse to sign anything. cs.wal.FlushAndSync() - addr := cs.privValidator.GetPubKey().Address() - valIndex, _ := cs.Validators.GetByAddress(addr) + pubKey, err := cs.privValidator.GetPubKey() + if err != nil { + return nil, errors.Wrap(err, "can't get pubkey") + } + addr := pubKey.Address() + valIdx, _ := cs.Voters.GetByAddress(addr) vote := &types.Vote{ ValidatorAddress: addr, - ValidatorIndex: valIndex, + ValidatorIndex: valIdx, Height: cs.Height, Round: cs.Round, Timestamp: cs.voteTime(), Type: msgType, BlockID: types.BlockID{Hash: hash, PartsHeader: header}, } - err := cs.privValidator.SignVote(cs.state.ChainID, vote) + + err = cs.privValidator.SignVote(cs.state.ChainID, vote) return vote, err } @@ -1888,10 +1966,23 @@ func (cs *State) voteTime() time.Time { // sign the vote and publish on internalMsgQueue func (cs *State) signAddVote(msgType types.SignedMsgType, hash []byte, header types.PartSetHeader) *types.Vote { - // if we don't have a key or we're not in the validator set, do nothing - if cs.privValidator == nil || !cs.Validators.HasAddress(cs.privValidator.GetPubKey().Address()) { + if cs.privValidator == nil { // the node does not have a key return nil } + + pubKey, err := cs.privValidator.GetPubKey() + if err != nil { + // Vote won't be signed, but it's not critical. + cs.Logger.Error("Error on retrival of pubkey", "err", err) + return nil + } + + // If the node not in the validator set, do nothing. + if !cs.Voters.HasAddress(pubKey.Address()) { + return nil + } + + // TODO: pass pubKey to signVote vote, err := cs.signVote(msgType, hash, header) if err == nil { cs.sendInternalMessage(msgInfo{&VoteMessage{vote}, ""}) diff --git a/consensus/state_test.go b/consensus/state_test.go index af91e80aa..58ebba323 100644 --- a/consensus/state_test.go +++ b/consensus/state_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/require" cstypes "github.com/tendermint/tendermint/consensus/types" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/libs/log" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmrand "github.com/tendermint/tendermint/libs/rand" @@ -65,7 +66,9 @@ func TestStateProposerSelection0(t *testing.T) { // Commit a block and ensure proposer for the next height is correct. prop := cs1.GetRoundState().Proposer - address := cs1.privValidator.GetPubKey().Address() + pv, err := cs1.privValidator.GetPubKey() + require.NoError(t, err) + address := pv.Address() if !bytes.Equal(prop.Address, address) { t.Fatalf("expected proposer to be validator %d. Got %X", 0, prop.Address) } @@ -80,7 +83,7 @@ func TestStateProposerSelection0(t *testing.T) { ensureNewRound(newRoundCh, height+1, 0) prop = cs1.GetRoundState().Proposer - addr := types.SelectProposer(cs1.Validators, cs1.state.LastProofHash, cs1.Height, cs1.Round).PubKey.Address() + addr := cs1.Validators.SelectProposer(cs1.state.LastProofHash, cs1.Height, cs1.Round).PubKey.Address() if !bytes.Equal(prop.Address, addr) { panic(fmt.Sprintf("expected proposer to be validator %d. Got %X", 1, prop.Address)) } @@ -104,10 +107,10 @@ func TestStateProposerSelection2(t *testing.T) { // everyone just votes nil. we get a new proposer each round for i := 0; i < len(vss); i++ { prop := cs1.GetRoundState().Proposer - addr := types.SelectProposer(cs1.Validators, cs1.state.LastProofHash, height, i+round).PubKey.Address() + addr := cs1.Validators.SelectProposer(cs1.state.LastProofHash, height, i+round).PubKey.Address() correctProposer := addr if !bytes.Equal(prop.Address, correctProposer) { - idx, _ := cs1.Validators.GetByAddress(addr) + idx, _ := cs1.Voters.GetByAddress(addr) panic(fmt.Sprintf( "expected RoundState.Validators.GetProposer() to be validator %d. Got %X", idx, @@ -182,7 +185,7 @@ func TestStateBadProposal(t *testing.T) { proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) voteCh := subscribe(cs1.eventBus, types.EventQueryVote) - propBlock, _ := cs1.createProposalBlock(round) //changeProposer(t, cs1, vs2) + propBlock, _ := cs1.createProposalBlock(round) // changeProposer(t, cs1, vs2) // make the second validator the proposer by incrementing round round++ @@ -520,7 +523,9 @@ func TestStateLockPOLRelock(t *testing.T) { timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) - addr := cs1.privValidator.GetPubKey().Address() + pv1, err := cs1.privValidator.GetPubKey() + require.NoError(t, err) + addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlockHeader) @@ -614,7 +619,9 @@ func TestStateLockPOLUnlock(t *testing.T) { timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock) - addr := cs1.privValidator.GetPubKey().Address() + pv1, err := cs1.privValidator.GetPubKey() + require.NoError(t, err) + addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) // everything done from perspective of cs1 @@ -708,7 +715,9 @@ func TestStateLockPOLSafety1(t *testing.T) { timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - addr := cs1.privValidator.GetPubKey().Address() + pv1, err := cs1.privValidator.GetPubKey() + require.NoError(t, err) + addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) forceProposer(cs1, vss, []int{0, 1, 2}, []int64{height, height, height}, []int{round, round + 1, round + 2}) @@ -827,7 +836,9 @@ func TestStateLockPOLSafety2(t *testing.T) { timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock) - addr := cs1.privValidator.GetPubKey().Address() + pv1, err := cs1.privValidator.GetPubKey() + require.NoError(t, err) + addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) forceProposer(cs1, vss, []int{0, 1}, []int64{height, height}, []int{round, round + 1}) @@ -920,7 +931,9 @@ func TestProposeValidBlock(t *testing.T) { timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock) - addr := cs1.privValidator.GetPubKey().Address() + pv1, err := cs1.privValidator.GetPubKey() + require.NoError(t, err) + addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) forceProposer(cs1, vss, []int{0, theOthers(0), theOthers(0), theOthers(0), 0}, @@ -1014,7 +1027,9 @@ func TestValidateValidBlockOnCommit(t *testing.T) { timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) unlockCh := subscribe(cs1.eventBus, types.EventQueryUnlock) - addr := cs1.privValidator.GetPubKey().Address() + pubKey, err := cs1.privValidator.GetPubKey() + require.NoError(t, err) + addr := pubKey.Address() voteCh := subscribeToVoter(cs1, addr) forceProposer(cs1, vss, []int{0, theOthers(0), theOthers(0), theOthers(0), 0}, @@ -1151,7 +1166,9 @@ func TestSetValidBlockOnDelayedPrevote(t *testing.T) { timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock) - addr := cs1.privValidator.GetPubKey().Address() + pv1, err := cs1.privValidator.GetPubKey() + require.NoError(t, err) + addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) forceProposer(cs1, vss, []int{0, 1}, []int64{height, height}, []int{round, round + 1}) @@ -1213,7 +1230,9 @@ func TestSetValidBlockOnDelayedProposal(t *testing.T) { timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) validBlockCh := subscribe(cs1.eventBus, types.EventQueryValidBlock) - addr := cs1.privValidator.GetPubKey().Address() + pv1, err := cs1.privValidator.GetPubKey() + require.NoError(t, err) + addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) proposalCh := subscribe(cs1.eventBus, types.EventQueryCompleteProposal) @@ -1286,7 +1305,9 @@ func TestWaitingTimeoutProposeOnNewRound(t *testing.T) { timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - addr := cs1.privValidator.GetPubKey().Address() + pv1, err := cs1.privValidator.GetPubKey() + require.NoError(t, err) + addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) // start round @@ -1320,7 +1341,9 @@ func TestRoundSkipOnNilPolkaFromHigherRound(t *testing.T) { timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - addr := cs1.privValidator.GetPubKey().Address() + pv1, err := cs1.privValidator.GetPubKey() + require.NoError(t, err) + addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) // start round @@ -1354,7 +1377,9 @@ func TestWaitTimeoutProposeOnNilPolkaForTheCurrentRound(t *testing.T) { timeoutProposeCh := subscribe(cs1.eventBus, types.EventQueryTimeoutPropose) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) - addr := cs1.privValidator.GetPubKey().Address() + pv1, err := cs1.privValidator.GetPubKey() + require.NoError(t, err) + addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) // start round in which PO is not proposer @@ -1471,7 +1496,9 @@ func TestStartNextHeightCorrectly(t *testing.T) { newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) newBlockHeader := subscribe(cs1.eventBus, types.EventQueryNewBlockHeader) - addr := cs1.privValidator.GetPubKey().Address() + pv1, err := cs1.privValidator.GetPubKey() + require.NoError(t, err) + addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) forceProposer(cs1, vss, []int{0, 1}, []int64{height, height + 1}, []int{round, 0}) @@ -1528,7 +1555,9 @@ func TestResetTimeoutPrecommitUponNewHeight(t *testing.T) { newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) newBlockHeader := subscribe(cs1.eventBus, types.EventQueryNewBlockHeader) - addr := cs1.privValidator.GetPubKey().Address() + pv1, err := cs1.privValidator.GetPubKey() + require.NoError(t, err) + addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) forceProposer(cs1, vss, []int{0, 1}, []int64{height, height + 1}, []int{round, 0}) @@ -1667,7 +1696,9 @@ func TestStateHalt1(t *testing.T) { timeoutWaitCh := subscribe(cs1.eventBus, types.EventQueryTimeoutWait) newRoundCh := subscribe(cs1.eventBus, types.EventQueryNewRound) newBlockCh := subscribe(cs1.eventBus, types.EventQueryNewBlock) - addr := cs1.privValidator.GetPubKey().Address() + pv1, err := cs1.privValidator.GetPubKey() + require.NoError(t, err) + addr := pv1.Address() voteCh := subscribeToVoter(cs1, addr) forceProposer(cs1, vss, []int{0, 1}, []int64{height, height}, []int{round, round + 1}) @@ -1815,3 +1846,154 @@ func subscribeUnBuffered(eventBus *types.EventBus, q tmpubsub.Query) <-chan tmpu } return sub.Out() } + +func makeVssMap(vss []*validatorStub) map[crypto.PubKey]*validatorStub { + vssMap := make(map[crypto.PubKey]*validatorStub) + for _, pv := range vss { + pubKey, _ := pv.GetPubKey() + vssMap[pubKey] = pv + } + return vssMap +} + +func votersPrivVals(voterSet *types.VoterSet, vssMap map[crypto.PubKey]*validatorStub) []*validatorStub { + totalVotingPower := voterSet.TotalVotingPower() + votingPower := int64(0) + voters := 0 + for i, v := range voterSet.Voters { + vssMap[v.PubKey].Index = i // NOTE: re-indexing for new voters + if votingPower < totalVotingPower*2/3+1 { + votingPower += v.VotingPower + voters++ + } + } + result := make([]*validatorStub, voters) + for i := 0; i < voters; i++ { + result[i] = vssMap[voterSet.Voters[i].PubKey] + } + return result +} + +func createProposalBlockByOther(cs *State, other *validatorStub, round int) ( + block *types.Block, blockParts *types.PartSet) { + var commit *types.Commit + switch { + case cs.Height == 1: + commit = types.NewCommit(0, 0, types.BlockID{}, nil) + case cs.LastCommit.HasTwoThirdsMajority(): + commit = cs.LastCommit.MakeCommit() + default: + return + } + + pubKey, err := other.GetPubKey() + if err != nil { + return + } + proposerAddr := pubKey.Address() + message := cs.state.MakeHashMessage(round) + + proof, err := other.GenerateVRFProof(message) + if err != nil { + return + } + return cs.blockExec.CreateProposalBlock(cs.Height, cs.state, commit, proposerAddr, round, proof) +} + +func proposeBlock(t *testing.T, cs *State, round int, vssMap map[crypto.PubKey]*validatorStub) types.BlockID { + newBlock, blockParts := createProposalBlockByOther(cs, vssMap[cs.Proposer.PubKey], round) + proposal := types.NewProposal(cs.Height, round, -1, types.BlockID{ + Hash: newBlock.Hash(), PartsHeader: blockParts.Header()}) + if err := vssMap[cs.Proposer.PubKey].SignProposal(config.ChainID(), proposal); err != nil { + t.Fatal("failed to sign bad proposal", err) + } + + // set the proposal block + if err := cs.SetProposalAndBlock(proposal, newBlock, blockParts, "some peer"); err != nil { + t.Fatal(err) + } + return types.BlockID{Hash: newBlock.Hash(), PartsHeader: blockParts.Header()} +} + +func TestStateFullRoundWithSelectedVoter(t *testing.T) { + cs, vss := randStateWithVoterParams(10, &types.VoterParams{ + VoterElectionThreshold: 5, + MaxTolerableByzantinePercentage: 20, + ElectionPrecision: 2}) + vss[0].Height = 1 // this is needed because of `incrementHeight(vss[1:]...)` of randStateWithVoterParams() + vssMap := makeVssMap(vss) + height, round := cs.Height, cs.Round + + voteCh := subscribeUnBuffered(cs.eventBus, types.EventQueryVote) + propCh := subscribe(cs.eventBus, types.EventQueryCompleteProposal) + newRoundCh := subscribe(cs.eventBus, types.EventQueryNewRound) + newBlockCh := subscribe(cs.eventBus, types.EventQueryNewBlock) + + startTestRound(cs, height, round) + + // height 1 + ensureNewRound(newRoundCh, height, round) + privPubKey, _ := cs.privValidator.GetPubKey() + if !cs.isProposer(privPubKey.Address()) { + blockID := proposeBlock(t, cs, round, vssMap) + ensureProposal(propCh, height, round, blockID) + } else { + ensureNewProposal(propCh, height, round) + } + + propBlock := cs.GetRoundState().ProposalBlock + voters := cs.Voters + voterPrivVals := votersPrivVals(voters, vssMap) + signAddVotes(cs, types.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(types.BlockPartSizeBytes).Header(), + voterPrivVals...) + + for range voterPrivVals { + ensurePrevote(voteCh, height, round) // wait for prevote + } + + signAddVotes(cs, types.PrecommitType, propBlock.Hash(), propBlock.MakePartSet(types.BlockPartSizeBytes).Header(), + voterPrivVals...) + + for range voterPrivVals { + ensurePrecommit(voteCh, height, round) // wait for precommit + } + + ensureNewBlock(newBlockCh, height) + + // height 2 + incrementHeight(vss...) + + ensureNewRound(newRoundCh, height+1, 0) + + height = cs.Height + privPubKey, _ = cs.privValidator.GetPubKey() + if !cs.isProposer(privPubKey.Address()) { + blockID := proposeBlock(t, cs, round, vssMap) + ensureProposal(propCh, height, round, blockID) + } else { + ensureNewProposal(propCh, height, round) + } + + propBlock = cs.GetRoundState().ProposalBlock + voters = cs.Voters + voterPrivVals = votersPrivVals(voters, vssMap) + + signAddVotes(cs, types.PrevoteType, propBlock.Hash(), propBlock.MakePartSet(types.BlockPartSizeBytes).Header(), + voterPrivVals...) + + for range voterPrivVals { + ensurePrevote(voteCh, height, round) // wait for prevote + } + + signAddVotes(cs, types.PrecommitType, propBlock.Hash(), propBlock.MakePartSet(types.BlockPartSizeBytes).Header(), + voterPrivVals...) + + for range voterPrivVals { + ensurePrecommit(voteCh, height, round) // wait for precommit + } + + ensureNewBlock(newBlockCh, height) + + // we're going to roll right into new height + ensureNewRound(newRoundCh, height+1, 0) +} diff --git a/consensus/types/codec.go b/consensus/types/codec.go index e8a05b355..69ac8c4a5 100644 --- a/consensus/types/codec.go +++ b/consensus/types/codec.go @@ -2,6 +2,7 @@ package types import ( amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/types" ) diff --git a/consensus/types/height_vote_set.go b/consensus/types/height_vote_set.go index 14cd04bf8..34a0d548f 100644 --- a/consensus/types/height_vote_set.go +++ b/consensus/types/height_vote_set.go @@ -36,9 +36,9 @@ We let each peer provide us with up to 2 unexpected "catchup" rounds. One for their LastCommit round, and another for the official commit round. */ type HeightVoteSet struct { - chainID string - height int64 - valSet *types.ValidatorSet + chainID string + height int64 + voterSet *types.VoterSet mtx sync.Mutex round int // max tracked round @@ -46,20 +46,20 @@ type HeightVoteSet struct { peerCatchupRounds map[p2p.ID][]int // keys: peer.ID; values: at most 2 rounds } -func NewHeightVoteSet(chainID string, height int64, valSet *types.ValidatorSet) *HeightVoteSet { +func NewHeightVoteSet(chainID string, height int64, voterSet *types.VoterSet) *HeightVoteSet { hvs := &HeightVoteSet{ chainID: chainID, } - hvs.Reset(height, valSet) + hvs.Reset(height, voterSet) return hvs } -func (hvs *HeightVoteSet) Reset(height int64, valSet *types.ValidatorSet) { +func (hvs *HeightVoteSet) Reset(height int64, voterSet *types.VoterSet) { hvs.mtx.Lock() defer hvs.mtx.Unlock() hvs.height = height - hvs.valSet = valSet + hvs.voterSet = voterSet hvs.roundVoteSets = make(map[int]RoundVoteSet) hvs.peerCatchupRounds = make(map[p2p.ID][]int) @@ -100,8 +100,8 @@ func (hvs *HeightVoteSet) addRound(round int) { panic("addRound() for an existing round") } // log.Debug("addRound(round)", "round", round) - prevotes := types.NewVoteSet(hvs.chainID, hvs.height, round, types.PrevoteType, hvs.valSet) - precommits := types.NewVoteSet(hvs.chainID, hvs.height, round, types.PrecommitType, hvs.valSet) + prevotes := types.NewVoteSet(hvs.chainID, hvs.height, round, types.PrevoteType, hvs.voterSet) + precommits := types.NewVoteSet(hvs.chainID, hvs.height, round, types.PrecommitType, hvs.voterSet) hvs.roundVoteSets[round] = RoundVoteSet{ Prevotes: prevotes, Precommits: precommits, diff --git a/consensus/types/height_vote_set_test.go b/consensus/types/height_vote_set_test.go index 93c73f1a1..373e28cca 100644 --- a/consensus/types/height_vote_set_test.go +++ b/consensus/types/height_vote_set_test.go @@ -20,7 +20,7 @@ func TestMain(m *testing.M) { } func TestPeerCatchupRounds(t *testing.T) { - valSet, privVals := types.RandValidatorSet(10, 1) + _, valSet, privVals := types.RandVoterSet(10, 1) hvs := NewHeightVoteSet(config.ChainID(), 1, valSet) @@ -54,9 +54,13 @@ func TestPeerCatchupRounds(t *testing.T) { func makeVoteHR(t *testing.T, height int64, round int, privVals []types.PrivValidator, valIndex int) *types.Vote { privVal := privVals[valIndex] - addr := privVal.GetPubKey().Address() + pubKey, err := privVal.GetPubKey() + if err != nil { + panic(err) + } + vote := &types.Vote{ - ValidatorAddress: addr, + ValidatorAddress: pubKey.Address(), ValidatorIndex: valIndex, Height: height, Round: round, @@ -65,7 +69,7 @@ func makeVoteHR(t *testing.T, height int64, round int, privVals []types.PrivVali BlockID: types.BlockID{Hash: []byte("fakehash"), PartsHeader: types.PartSetHeader{}}, } chainID := config.ChainID() - err := privVal.SignVote(chainID, vote) + err = privVal.SignVote(chainID, vote) if err != nil { panic(fmt.Sprintf("Error signing vote: %v", err)) } diff --git a/consensus/types/round_state.go b/consensus/types/round_state.go index 35c58f698..527556f7c 100644 --- a/consensus/types/round_state.go +++ b/consensus/types/round_state.go @@ -73,6 +73,7 @@ type RoundState struct { // Subjective time when +2/3 precommits for Block at Round were found CommitTime time.Time `json:"commit_time"` Validators *types.ValidatorSet `json:"validators"` + Voters *types.VoterSet `json:"voters"` Proposer *types.Validator `json:"proposer"` Proposal *types.Proposal `json:"proposal"` ProposalBlock *types.Block `json:"proposal_block"` @@ -86,12 +87,12 @@ type RoundState struct { ValidBlock *types.Block `json:"valid_block"` // Last known block of POL mentioned above. // Last known block parts of POL metnioned above. - ValidBlockParts *types.PartSet `json:"valid_block_parts"` - Votes *HeightVoteSet `json:"votes"` - CommitRound int `json:"commit_round"` // - LastCommit *types.VoteSet `json:"last_commit"` // Last precommits at Height-1 - LastValidators *types.ValidatorSet `json:"last_validators"` - TriggeredTimeoutPrecommit bool `json:"triggered_timeout_precommit"` + ValidBlockParts *types.PartSet `json:"valid_block_parts"` + Votes *HeightVoteSet `json:"votes"` + CommitRound int `json:"commit_round"` // + LastCommit *types.VoteSet `json:"last_commit"` // Last precommits at Height-1 + LastVoters *types.VoterSet `json:"last_voters"` + TriggeredTimeoutPrecommit bool `json:"triggered_timeout_precommit"` } // Compressed version of the RoundState for use in RPC @@ -113,7 +114,7 @@ func (rs *RoundState) RoundStateSimple() RoundStateSimple { } addr := rs.Proposer.Address - idx, _ := rs.Validators.GetByAddress(addr) + idx, _ := rs.Voters.GetByAddress(addr) return RoundStateSimple{ HeightRoundStep: fmt.Sprintf("%d/%d/%d", rs.Height, rs.Round, rs.Step), @@ -132,7 +133,7 @@ func (rs *RoundState) RoundStateSimple() RoundStateSimple { // NewRoundEvent returns the RoundState with proposer information as an event. func (rs *RoundState) NewRoundEvent() types.EventDataNewRound { addr := rs.Proposer.Address - idx, _ := rs.Validators.GetByAddress(addr) + idx, _ := rs.Voters.GetByAddress(addr) return types.EventDataNewRound{ Height: rs.Height, @@ -182,7 +183,7 @@ func (rs *RoundState) StringIndented(indent string) string { %s H:%v R:%v S:%v %s StartTime: %v %s CommitTime: %v -%s Validators: %v +%s Voters: %v %s Proposer: %v %s Proposal: %v %s ProposalBlock: %v %v @@ -192,12 +193,12 @@ func (rs *RoundState) StringIndented(indent string) string { %s ValidBlock: %v %v %s Votes: %v %s LastCommit: %v -%s LastValidators:%v +%s LastVoters:%v %s}`, indent, rs.Height, rs.Round, rs.Step, indent, rs.StartTime, indent, rs.CommitTime, - indent, rs.Validators.StringIndented(indent+" "), + indent, rs.Voters.StringIndented(indent+" "), indent, rs.Proposer.String(), indent, rs.Proposal, indent, rs.ProposalBlockParts.StringShort(), rs.ProposalBlock.StringShort(), @@ -207,7 +208,7 @@ func (rs *RoundState) StringIndented(indent string) string { indent, rs.ValidBlockParts.StringShort(), rs.ValidBlock.StringShort(), indent, rs.Votes.StringIndented(indent+" "), indent, rs.LastCommit.StringShort(), - indent, rs.LastValidators.StringIndented(indent+" "), + indent, rs.LastVoters.StringIndented(indent+" "), indent) } diff --git a/consensus/types/round_state_test.go b/consensus/types/round_state_test.go index f5f5f72c0..fb44a9fbe 100644 --- a/consensus/types/round_state_test.go +++ b/consensus/types/round_state_test.go @@ -4,6 +4,7 @@ import ( "testing" amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/tmhash" tmrand "github.com/tendermint/tendermint/libs/rand" @@ -16,7 +17,7 @@ func BenchmarkRoundStateDeepCopy(b *testing.B) { // Random validators nval, ntxs := 100, 100 - vset, _ := types.RandValidatorSet(nval, 1) + _, vset, _ := types.RandVoterSet(nval, 1) commitSigs := make([]types.CommitSig, nval) blockID := types.BlockID{ Hash: tmrand.Bytes(tmhash.Size), @@ -46,7 +47,7 @@ func BenchmarkRoundStateDeepCopy(b *testing.B) { LastBlockID: blockID, LastCommitHash: tmrand.Bytes(20), DataHash: tmrand.Bytes(20), - ValidatorsHash: tmrand.Bytes(20), + VotersHash: tmrand.Bytes(20), ConsensusHash: tmrand.Bytes(20), AppHash: tmrand.Bytes(20), LastResultsHash: tmrand.Bytes(20), @@ -71,7 +72,7 @@ func BenchmarkRoundStateDeepCopy(b *testing.B) { rs := &RoundState{ StartTime: tmtime.Now(), CommitTime: tmtime.Now(), - Validators: vset, + Voters: vset, Proposal: proposal, ProposalBlock: block, ProposalBlockParts: parts, @@ -81,7 +82,7 @@ func BenchmarkRoundStateDeepCopy(b *testing.B) { ValidBlockParts: parts, Votes: nil, // TODO LastCommit: nil, // TODO - LastValidators: vset, + LastVoters: vset, } b.StartTimer() diff --git a/consensus/wal.go b/consensus/wal.go index 989a5dc29..7b09ffa2d 100644 --- a/consensus/wal.go +++ b/consensus/wal.go @@ -11,6 +11,7 @@ import ( "github.com/pkg/errors" amino "github.com/tendermint/go-amino" + auto "github.com/tendermint/tendermint/libs/autofile" "github.com/tendermint/tendermint/libs/log" tmos "github.com/tendermint/tendermint/libs/os" diff --git a/consensus/wal_generator.go b/consensus/wal_generator.go index 244edd536..422c3f73b 100644 --- a/consensus/wal_generator.go +++ b/consensus/wal_generator.go @@ -11,6 +11,8 @@ import ( "github.com/pkg/errors" + db "github.com/tendermint/tm-db" + "github.com/tendermint/tendermint/abci/example/kvstore" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/libs/log" @@ -21,7 +23,6 @@ import ( sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/store" "github.com/tendermint/tendermint/types" - db "github.com/tendermint/tm-db" ) // WALGenerateNBlocks generates a consensus WAL. It does this by spinning up a diff --git a/crypto/ed25519/ed25519_test.go b/crypto/ed25519/ed25519_test.go index 503050274..6fe2c0946 100644 --- a/crypto/ed25519/ed25519_test.go +++ b/crypto/ed25519/ed25519_test.go @@ -5,6 +5,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" ) diff --git a/crypto/encoding/amino/amino.go b/crypto/encoding/amino/amino.go index b63eb738d..f7a2dde77 100644 --- a/crypto/encoding/amino/amino.go +++ b/crypto/encoding/amino/amino.go @@ -4,6 +4,7 @@ import ( "reflect" amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/multisig" diff --git a/crypto/encoding/amino/encode_test.go b/crypto/encoding/amino/encode_test.go index 67a7566dd..edc54292f 100644 --- a/crypto/encoding/amino/encode_test.go +++ b/crypto/encoding/amino/encode_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/require" amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/multisig" diff --git a/crypto/merkle/simple_map.go b/crypto/merkle/simple_map.go index 36434f67f..840bebd51 100644 --- a/crypto/merkle/simple_map.go +++ b/crypto/merkle/simple_map.go @@ -4,6 +4,7 @@ import ( "bytes" amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/libs/kv" ) diff --git a/crypto/merkle/simple_proof.go b/crypto/merkle/simple_proof.go index 660bf236f..44b97f606 100644 --- a/crypto/merkle/simple_proof.go +++ b/crypto/merkle/simple_proof.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/pkg/errors" + "github.com/tendermint/tendermint/crypto/tmhash" ) diff --git a/crypto/multisig/bitarray/compact_bit_array_test.go b/crypto/multisig/bitarray/compact_bit_array_test.go index ba0949178..f086dc877 100644 --- a/crypto/multisig/bitarray/compact_bit_array_test.go +++ b/crypto/multisig/bitarray/compact_bit_array_test.go @@ -7,6 +7,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + tmrand "github.com/tendermint/tendermint/libs/rand" ) diff --git a/crypto/multisig/codec.go b/crypto/multisig/codec.go index 3a5869398..cc1e12f92 100644 --- a/crypto/multisig/codec.go +++ b/crypto/multisig/codec.go @@ -2,6 +2,7 @@ package multisig import ( amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/secp256k1" diff --git a/crypto/secp256k1/secp256k1.go b/crypto/secp256k1/secp256k1.go index 26dcead59..5338d10a5 100644 --- a/crypto/secp256k1/secp256k1.go +++ b/crypto/secp256k1/secp256k1.go @@ -12,6 +12,7 @@ import ( "golang.org/x/crypto/ripemd160" // nolint: staticcheck // necessary for Bitcoin address format amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto" ) diff --git a/crypto/secp256k1/secp256k1_cgo_test.go b/crypto/secp256k1/secp256k1_cgo_test.go index edb207b53..96b026bc9 100644 --- a/crypto/secp256k1/secp256k1_cgo_test.go +++ b/crypto/secp256k1/secp256k1_cgo_test.go @@ -3,9 +3,10 @@ package secp256k1 import ( - "github.com/magiconair/properties/assert" "testing" + "github.com/magiconair/properties/assert" + "github.com/stretchr/testify/require" ) diff --git a/crypto/sr25519/codec.go b/crypto/sr25519/codec.go index c3e6bd646..f33b616f9 100644 --- a/crypto/sr25519/codec.go +++ b/crypto/sr25519/codec.go @@ -2,6 +2,7 @@ package sr25519 import ( amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto" ) diff --git a/crypto/tmhash/hash_test.go b/crypto/tmhash/hash_test.go index 89a779801..57fd0faa5 100644 --- a/crypto/tmhash/hash_test.go +++ b/crypto/tmhash/hash_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/crypto/tmhash" ) diff --git a/docs/.vuepress/styles/index.styl b/docs/.vuepress/styles/index.styl index 0b40a6f9b..0ca835191 100644 --- a/docs/.vuepress/styles/index.styl +++ b/docs/.vuepress/styles/index.styl @@ -1,3 +1,3 @@ :root - --accent-color #00BB00 + --accent-color #018A01 --background #222222 \ No newline at end of file diff --git a/docs/architecture/README.md b/docs/architecture/README.md index 9f754fd37..7519951d9 100644 --- a/docs/architecture/README.md +++ b/docs/architecture/README.md @@ -72,3 +72,4 @@ Note the context/background should be written in the present tense. - [ADR-052-Tendermint-Mode](./adr-052-tendermint-mode.md) - [ADR-053-State-Sync-Prototype](./adr-053-state-sync-prototype.md) - [ADR-054-crypto-encoding-2](./adr-054-crypto-encoding-2.md) +- [ADR-055-protobuf-design](./adr-055-protobuf-design.md) diff --git a/docs/architecture/adr-046-light-client-implementation.md b/docs/architecture/adr-046-light-client-implementation.md index 37a7c83c5..7620409a0 100644 --- a/docs/architecture/adr-046-light-client-implementation.md +++ b/docs/architecture/adr-046-light-client-implementation.md @@ -4,6 +4,7 @@ * 13-02-2020: Initial draft * 26-02-2020: Cross-checking the first header * 28-02-2020: Bisection algorithm details +* 31-03-2020: Verify signature got changed ## Context @@ -60,8 +61,9 @@ also cross-checked with witnesses for additional security. Due to bisection algorithm nature, some headers might be skipped. If the light client does not have a header for height `X` and `VerifyHeaderAtHeight(X)` or -`VerifyHeader(H#X)` methods are called, it will perform a backwards -verification from the latest header back to the header at height `X`. +`VerifyHeader(H#X)` methods are called, these will perform either a) backwards +verification from the latest header back to the header at height `X` or b) +bisection verification from the first stored header to the header at height `X`. `TrustedHeader`, `TrustedValidatorSet` only communicate with the trusted store. If some header is not there, an error will be returned indicating that @@ -99,6 +101,10 @@ type Store interface { FirstSignedHeaderHeight() (int64, error) SignedHeaderAfter(height int64) (*types.SignedHeader, error) + + Prune(size uint16) error + + Size() uint16 } ``` @@ -109,12 +115,13 @@ database, used in Tendermint). In the future, remote adapters are possible ```go func Verify( chainID string, - h1 *types.SignedHeader, - h1NextVals *types.ValidatorSet, - h2 *types.SignedHeader, - h2Vals *types.ValidatorSet, + trustedHeader *types.SignedHeader, // height=X + trustedVals *types.ValidatorSet, // height=X or height=X+1 + untrustedHeader *types.SignedHeader, // height=Y + untrustedVals *types.ValidatorSet, // height=Y trustingPeriod time.Duration, now time.Time, + maxClockDrift time.Duration, trustLevel tmmath.Fraction) error { ``` @@ -123,6 +130,9 @@ cases of adjacent and non-adjacent headers. In the former case, it compares the hashes directly (2/3+ signed transition). Otherwise, it verifies 1/3+ (`trustLevel`) of trusted validators are still present in new validators. +While `Verify` function is certainly handy, `VerifyAdjacent` and +`VerifyNonAdjacent` should be used most often to avoid logic errors. + ### Bisection algorithm details Non-recursive bisection algorithm was implemented despite the spec containing diff --git a/docs/architecture/adr-053-state-sync-prototype.md b/docs/architecture/adr-053-state-sync-prototype.md index 79e39b24d..2848f9dd4 100644 --- a/docs/architecture/adr-053-state-sync-prototype.md +++ b/docs/architecture/adr-053-state-sync-prototype.md @@ -14,6 +14,14 @@ This ADR outlines the plan for an initial state sync prototype, and is subject t * Added experimental prototype info. * Added open questions and implementation plan. +* 2020-03-29: Strengthened and simplified ABCI interface (Erik Grinaker) + * ABCI: replaced `chunks` with `chunk_hashes` in `Snapshot`. + * ABCI: removed `SnapshotChunk` message. + * ABCI: renamed `GetSnapshotChunk` to `LoadSnapshotChunk`. + * ABCI: chunks are now exchanged simply as `bytes`. + * ABCI: chunks are now 0-indexed, for parity with `chunk_hashes` array. + * Reduced maximum chunk size to 16 MB, and increased snapshot message size to 4 MB. + ## Context State sync will allow a new node to receive a snapshot of the application state without downloading blocks or going through consensus. This bootstraps the node significantly faster than the current fast sync system, which replays all historical blocks. @@ -36,28 +44,18 @@ This describes the snapshot/restore process seen from Tendermint. The interface ### Snapshot Data Structure -A node can have multiple snapshots taken at various heights. Snapshots can be taken in different application-specified formats (e.g. MessagePack as format `1` and Protobuf as format `2`, or similarly for schema versioning). Each snapshot consists of multiple chunks containing the actual state data, allowing parallel downloads and reduced memory usage. +A node can have multiple snapshots taken at various heights. Snapshots can be taken in different application-specified formats (e.g. MessagePack as format `1` and Protobuf as format `2`, or similarly for schema versioning). Each snapshot consists of multiple chunks containing the actual state data, for parallel downloads and reduced memory usage. ```proto message Snapshot { - uint64 height = 1; // The height at which the snapshot was taken - uint32 format = 2; // The application-specific snapshot format - uint32 chunks = 3; // The number of chunks in the snapshot - bytes metadata = 4; // Arbitrary application metadata -} - -message SnapshotChunk { - uint64 height = 1; // The height of the corresponding snapshot - uint32 format = 2; // The application-specific snapshot format - uint32 chunk = 3; // The chunk index (one-based) - bytes data = 4; // Serialized application state in an arbitrary format - bytes checksum = 5; // SHA-1 checksum of data + uint64 height = 1; // The height at which the snapshot was taken + uint32 format = 2; // The application-specific snapshot format + repeated bytes chunk_hashes = 3; // SHA-256 checksums of all chunks, in order + bytes metadata = 4; // Arbitrary application metadata } ``` -Chunk verification data must be encoded along with the state data in the `data` field. - -Chunk `data` cannot be larger than 64 MB, and snapshot `metadata` cannot be larger than 64 KB. +Chunks are exchanged simply as `bytes`, and cannot be larger than 16 MB. `Snapshot` messages should be less than 4 MB. ### ABCI Interface @@ -72,41 +70,43 @@ message ResponseListSnapshots { // Offers a snapshot to the application message RequestOfferSnapshot { Snapshot snapshot = 1; - bytes app_hash = 2; + bytes app_hash = 2; } message ResponseOfferSnapshot { - bool accepted = 1; - Reason reason = 2; // Reason why snapshot was rejected - enum Reason { - unknown = 0; // Unknown or generic reason - invalid_height = 1; // Height is rejected: avoid this height - invalid_format = 2; // Format is rejected: avoid this format + bool accepted = 1; + Reason reason = 2; + + enum Reason { // Reason why snapshot was rejected + unknown = 0; // Unknown or generic reason + invalid_height = 1; // Height is rejected: avoid this height + invalid_format = 2; // Format is rejected: avoid this format } } -// Fetches a snapshot chunk -message RequestGetSnapshotChunk { +// Loads a snapshot chunk +message RequestLoadSnapshotChunk { uint64 height = 1; uint32 format = 2; - uint32 chunk = 3; + uint32 chunk = 3; // Zero-indexed } -message ResponseGetSnapshotChunk { - SnapshotChunk chunk = 1; +message ResponseLoadSnapshotChunk { + bytes chunk = 1; } // Applies a snapshot chunk message RequestApplySnapshotChunk { - SnapshotChunk chunk = 1; + bytes chunk = 1; } message ResponseApplySnapshotChunk { - bool applied = 1; - Reason reason = 2; // Reason why chunk failed - enum Reason { - unknown = 0; // Unknown or generic reason - verify_failed = 1; // Chunk verification failed + bool applied = 1; + Reason reason = 2; // Reason why chunk failed + + enum Reason { // Reason why chunk failed + unknown = 0; // Unknown or generic reason + verify_failed = 1; // Snapshot verification failed } } ``` @@ -139,19 +139,19 @@ When starting an empty node with state sync and fast sync enabled, snapshots are 3. The node contacts a set of full nodes, and verifies the trusted block header using the given hash via the light client. -4. The node requests available snapshots via `RequestListSnapshots`. Snapshots with `metadata` greater than 64 KB are rejected. +4. The node requests available snapshots via P2P from peers, via `RequestListSnapshots`. Peers will return the 10 most recent snapshots, one message per snapshot. -5. The node iterates over all snapshots in reverse order by height and format until it finds one that satisfies all of the following conditions: +5. The node aggregates snapshots from multiple peers, ordered by height and format (in reverse). If there are `chunk_hashes` mismatches between different snapshots, the one hosted by the largest amount of peers is chosen. The node iterates over all snapshots in reverse order by height and format until it finds one that satisfies all of the following conditions: * The snapshot height's block is considered trustworthy by the light client (i.e. snapshot height is greater than trusted header and within unbonding period of the latest trustworthy block). - * The snapshot's height or format hasn't been explicitly rejected by an earlier `RequestOffsetSnapshot` call (via `invalid_height` or `invalid_format`). + * The snapshot's height or format hasn't been explicitly rejected by an earlier `RequestOfferSnapshot` call (via `invalid_height` or `invalid_format`). * The application accepts the `RequestOfferSnapshot` call. -6. The node downloads chunks in parallel from multiple peers via `RequestGetSnapshotChunk`, and both the sender and receiver verifies their checksums. Chunks with `data` greater than 64 MB are rejected. +6. The node downloads chunks in parallel from multiple peers, via `RequestLoadSnapshotChunk`, and both the sender and receiver verifies their checksums. Chunk messages cannot exceed 16 MB. -7. The node passes chunks sequentially to the app via `RequestApplySnapshotChunk`, along with the chain's app hash at the snapshot height for verification. If the chunk is rejected the node should retry it. If it was rejected with `verify_failed`, it should be refetched from a different source. If an internal error occurred, `ResponseException` should be returned and state sync should be aborted. +7. The node passes chunks sequentially to the app via `RequestApplySnapshotChunk`. 8. Once all chunks have been applied, the node compares the app hash to the chain app hash, and if they do not match it either errors or discards the state and starts over. @@ -167,7 +167,7 @@ This describes the snapshot process seen from Gaia, using format version `1`. Th In the initial version there is no snapshot metadata, so it is set to an empty byte buffer. -Once all chunks have been successfully built, snapshot metadata should be serialized and stored in the file system as e.g. `snapshots///metadata`, and served via `RequestListSnapshots`. +Once all chunks have been successfully built, snapshot metadata should be stored in a database and served via `RequestListSnapshots`. ### Snapshot Chunk Format @@ -181,7 +181,7 @@ For the initial prototype, each chunk consists of a complete dump of all node da For a production version, it should be sufficient to store key/value/version for all nodes (leaf and inner) in insertion order, chunked in some appropriate way. If per-chunk verification is required, the chunk must also contain enough information to reconstruct the Merkle proofs all the way up to the root of the multistore, e.g. by storing a complete subtree's key/value/version data plus Merkle hashes of all other branches up to the multistore root. The exact approach will depend on tradeoffs between size, time, and verification. IAVL RangeProofs are not recommended, since these include redundant data such as proofs for intermediate and leaf nodes that can be derived from the above data. -Chunks should be built greedily by collecting node data up to some size limit (e.g. 32 MB) and serializing it. Chunk data is stored in the file system as `snapshots////data`, along with a SHA-1 checksum in `snapshots////checksum`, and served via `RequestGetSnapshotChunk`. +Chunks should be built greedily by collecting node data up to some size limit (e.g. 10 MB) and serializing it. Chunk data is stored in the file system as `snapshots///`, and a SHA-256 checksum is stored along with the snapshot metadata. ### Snapshot Scheduling @@ -223,12 +223,6 @@ To stop the testnet, run: $ ./tools/stop.sh ``` -## Open Questions - -* Should we have a simpler scheme for discovering snapshots? E.g. announce supported formats, and have peer supply latest available snapshot. - - Downsides: app has to announce supported formats, having a single snapshot per peer may make fewer peers available for chosen snapshot. - ## Resolved Questions * Is it OK for state-synced nodes to not have historical blocks nor historical IAVL versions? @@ -309,6 +303,8 @@ $ ./tools/stop.sh * **Tendermint:** node should go back to fast-syncing when lagging significantly [#129](https://github.com/tendermint/tendermint/issues/129) +* **Tendermint:** backfill historical blocks [#4629](https://github.com/tendermint/tendermint/issues/4629) + ## Status Accepted diff --git a/docs/architecture/adr-054-crypto-encoding-2.md b/docs/architecture/adr-054-crypto-encoding-2.md index 9ec05f229..1e3691a68 100644 --- a/docs/architecture/adr-054-crypto-encoding-2.md +++ b/docs/architecture/adr-054-crypto-encoding-2.md @@ -2,7 +2,8 @@ ## Changelog -\*2020-2-27: Created +2020-2-27: Created +2020-4-16: Update ## Context @@ -12,7 +13,8 @@ Currently amino encodes keys as ` `. ## Decision -When using the `oneof` protobuf type there are many times where one will have to manually switch over the possible messages and then pass them to the interface which is needed. By transitioning from a fixed size byte array (`[size]byte`) to byte slice's (`[]byte`) then this would enable the usage of the [cosmos-proto's](hhttps://github.com/regen-network/cosmos-proto#interface_type) interface type, which will generate these switch statements. +Previously Tendermint defined all the key types for use in Tendermint and the Cosmos-SDK. Going forward the Cosmos-SDK will define its own protobuf type for keys. This will allow Tendermint to only define the keys that are being used in the codebase (ed25519). +There is the the opportunity to only define the usage of ed25519 (`bytes`) and not have it be a `oneof`, but this would mean that the `oneof` work is only being postponed to a later date. When using the `oneof` protobuf type we will have to manually switch over the possible key types and then pass them to the interface which is needed. The approach that will be taken to minimize headaches for users is one where all encoding of keys will shift to protobuf and where amino encoding is relied on, there will be custom marshal and unmarshal functions. @@ -20,27 +22,13 @@ Protobuf messages: ```proto message PubKey { - option (cosmos_proto.interface_type) = "*github.com/tendermint/tendermint/crypto.PubKey"; oneof key { - bytes ed25519 = 1 - [(gogoproto.casttype) = "github.com/tendermint/tendermint/crypto/ed25519.PubKey"]; - bytes secp256k1 = 2 - [(gogoproto.casttype) = "github.com/tendermint/tendermint/crypto/secp256k1.PubKey"]; - bytes sr25519 = 3 - [(gogoproto.casttype) = "github.com/tendermint/tendermint/crypto/sr25519.PubKey"]; - PubKeyMultiSigThreshold multisig = 4 - [(gogoproto.casttype) = "github.com/tendermint/tendermint/crypto/multisig.PubKeyMultisigThreshold"];; + bytes ed25519 = 1; } message PrivKey { - option (cosmos_proto.interface_type) = "github.com/tendermint/tendermint/crypto.PrivKey"; oneof sum { - bytes ed25519 = 1 - [(gogoproto.casttype) = "github.com/tendermint/tendermint/crypto/ed25519.PrivKey"]; - bytes secp256k1 = 2 - [(gogoproto.casttype) = "github.com/tendermint/tendermint/crypto/secp256k1.PrivKey"]; - bytes sr25519 = 3 - [(gogoproto.casttype) = "github.com/tendermint/tendermint/crypto/sr25519.PrivKey"];; + bytes ed25519 = 1; } } ``` diff --git a/docs/architecture/adr-055-protobuf-design.md b/docs/architecture/adr-055-protobuf-design.md new file mode 100644 index 000000000..5500fc2d8 --- /dev/null +++ b/docs/architecture/adr-055-protobuf-design.md @@ -0,0 +1,60 @@ +# ADR 055: Protobuf Design + +## Changelog + +- 2020-4-15: Created (@marbar3778) + +## Context + +Currently we use [go-amino](https://github.com/tendermint/go-amino) throughout Tendermint. Amino is not being maintained anymore (April 15, 2020) by the Tendermint team and has been found to have issues: + +- https://github.com/tendermint/go-amino/issues/286 +- https://github.com/tendermint/go-amino/issues/230 +- https://github.com/tendermint/go-amino/issues/121 + +These are a few of the known issues that users could run into. + +Amino enables quick prototyping and development of features. While this is nice, amino does not provide the performance and developer convenience that is expected. For Tendermint to see wider adoption as a BFT protocol engine a transition to an adopted encoding format is needed. Below are some possible options that can be explored. + +There are a few options to pick from: + +- `Protobuf`: Protocol buffers are Google's language-neutral, platform-neutral, extensible mechanism for serializing structured data – think XML, but smaller, faster, and simpler. It is supported in countless languages and has been proven in production for many years. + +- `FlatBuffers`: FlatBuffers is an efficient cross platform serialization library. Flatbuffers are more efficient than Protobuf due to the fast that there is no parsing/unpacking to a second representation. FlatBuffers has been tested and used in production but is not widely adopted. + +- `CapnProto`: Cap’n Proto is an insanely fast data interchange format and capability-based RPC system. Cap'n Proto does not have a encoding/decoding step. It has not seen wide adoption throughout the industry. + +- @erikgrinaker - https://github.com/tendermint/tendermint/pull/4623#discussion_r401163501 + ``` + Cap'n'Proto is awesome. It was written by one of the original Protobuf developers to fix some of its issues, and supports e.g. random access to process huge messages without loading them into memory and an (opt-in) canonical form which would be very useful when determinism is needed (e.g. in the state machine). That said, I suspect Protobuf is the better choice due to wider adoption, although it makes me kind of sad since Cap'n'Proto is technically better. + ``` + +## Decision + +Transition Tendermint to Protobuf because of its performance and tooling. The Ecosystem behind Protobuf is vast and has outstanding [support for many languages](https://developers.google.com/protocol-buffers/docs/tutorials). + +We will be making this possible by keeping the current types in there current form (handwritten) and creating a `/proto` directory in which all the `.proto` files will live. Where encoding is needed, on disk and over the wire, we will call util functions that will transition the types from handwritten go types to protobuf generated types. + +By going with this design we will enable future changes to types and allow for a more modular codebase. + +## Status + +Proposed + +## Consequences + +### Positive + +- Allows for modular types in the future +- Less refactoring +- Allows the proto files to be pulled into the spec repo in the future. +- Performance +- Tooling & support in multiple languages + +### Negative + +- When a developer is updating a type they need to make sure to update the proto type as well + +### Neutral + +## References diff --git a/docs/architecture/adr-056-proving-amnesia-attacks.md b/docs/architecture/adr-056-proving-amnesia-attacks.md new file mode 100644 index 000000000..f0200ca7d --- /dev/null +++ b/docs/architecture/adr-056-proving-amnesia-attacks.md @@ -0,0 +1,120 @@ +# ADR 056: Proving amnesia attacks + +## Changelog + +- 02.04.20: Initial Draft +- 06.04.20: Second Draft + +## Context + +Whilst most created evidence of malicious behaviour is self evident such that any individual can verify them independently there are types of evidence, known collectively as global evidence, that require further collaboration from the network in order to accumulate enough information to create evidence that is individually verifiable and can therefore be processed through consensus. [Fork Accountability](https://github.com/tendermint/spec/blob/master/spec/consensus/light-client/accountability.md) has been coined to describe the entire process of detection, proving and punishing of malicious behaviour. This ADR addresses specifically how to prove an amnesia attack but also generally outlines how global evidence can be converted to individual evidence. + +### Amnesia Attack + +The currently only known form of global evidence stems from [flip flopping](https://github.com/tendermint/spec/blob/master/spec/consensus/light-client/accountability.md#flip-flopping) attacks. The schematic below explains one scenario where an amnesia attack, a form of flip flopping, can occur such that two sets of honest nodes, C1 and C2, commit different blocks. + +![](../imgs/tm-amnesia-attack.png) + +1. C1 and F send PREVOTE messages for block A. +2. C1 sends PRECOMMIT for round 1 for block A. +3. A new round is started, C2 and F send PREVOTE messages for a different block B. +4. C2 and F then send PRECOMMIT messages for block B. +5. F breaks the lock and goes back and sends PRECOMMIT messages in round 1 for block A. + + +This creates a fork on the main chain. Back to the past, another form of flip flopping, creates a light fork (capable of fooling those not involved in consensus), in a similar way, with F taking the precommits from C1 and forging a commit from them. + +## Decision + +As the distinction between these two attacks (amnesia and back to the past) can only be distinguished by confirming with all validators (to see if it is a full fork or a light fork), for the purpose of simplicity, these attacks will be treated as the same. + +Currently, the evidence reactor is used to simply broadcast and store evidence. Instead of perhaps creating a new reactor for the specific task of verifying these attacks, the current evidence reactor will be extended. + +The process begins with a light client receiving conflicting headers (in the future this could also be a full node during fast sync), which it sends to a full node to analyse. As part of [evidence handling](https://github.com/tendermint/tendermint/blob/master/docs/architecture/adr-047-handling-evidence-from-light-client.md), this could be deduced into potential amnesia evidence + +```golang +type PotentialAmnesiaEvidence struct { + V1 []*types.Vote + V2 []*types.Vote + + timestamp time.Time +} +``` + +*NOTE: Unlike prior evidence types, `PotentialAmnesiaEvidence` and `AmnesiaEvidence` are processed as a batch instead + of individually. This will require changes to much of the API.* + + *NOTE: `PotentialAmnesiaEvidence` could be constructed for when 1/3 or less vote in two different rounds but as it is not currently detected nor can it cause a fork, it will be ignored.* + +The evidence should contain the precommit votes for the intersection of validators that voted for both rounds. The votes should be all valid and the height and time that the infringement was made should be within: + +`MaxEvidenceAge - Amnesia trial period` + +where `Amnesia trial period` is a configurable duration defaulted at 1 day. + +With reference to the honest nodes, C1 and C2, in the schematic, C2 will not PRECOMMIT an earlier round, but it is likely, if a node in C1 were to receive +2/3 PREVOTE's or PRECOMMIT's for a higher round, that it would remove the lock and PREVOTE and PRECOMMIT for the later round. Therefore, unfortunately it is not a case of simply punishing all nodes that have double voted in the `PotentialAmnesiaEvidence`. + +Instead we use the Proof of Lock Change (PoLC) referred to in the [consensus spec](https://github.com/tendermint/spec/blob/master/spec/consensus/consensus.md#terms). When an honest node votes again for a different block in a later round +(which will only occur in very rare cases), it will generate the PoLC and store it in the evidence reactor for a time equal to the `MaxEvidenceAge` + +```golang +type ProofOfLockChange struct { + Votes []*types.Vote +} +``` + +This can be either evidence of +2/3 PREVOTES or PRECOMMITS (either warrants the honest node the right to vote) and is valid, among other checks, so long as the PRECOMMIT vote of the node in V2 came after all the votes in the `ProofOfLockChange` i.e. it received +2/3 votes for a block and then voted for that block thereafter (F is unable to prove this). + +In the event that an honest node receives `PotentialAmnesiaEvidence` it will first `Verify()` it and then will check if it is among the suspected nodes in the evidence. If so, it will retrieve the `ProofOfLockChange` and combine it with `PotentialAmensiaEvidence` to form `AmensiaEvidence`: + +```golang +type AmnesiaEvidence struct { + Evidence *types.PotentialAmnesiaEvidence + Proofs []*types.ProofOfLockChange +} +``` + +If the node is not required to submit any proof than it will simply broadcast the `PotentialAmnesiaEvidence` . + +When a node has successfully validated `PotentialAmnesiaEvidence` it timestamps it and refuses to receive the same form of `PotentialAmnesiaEvidence`. If a node receives `AmnesiaEvidence` it checks it against any current `AmnesiaEvidence` it might have and if so merges the two by adding the proofs, if it doesn't have it yet it run's `Verify()` and stores it. + +There can only be one `AmnesiaEvidence` and one `PotentialAmneisaEvidence` stored for each attack (i.e. for each height). + +When, `time.Now() > PotentialAmnesiaEvidence.timestamp + AmnesiaTrialPeriod`, honest validators of the current validator set can begin proposing the block that contains the `AmnesiaEvidence`. + +*NOTE: Even before the evidence is proposed and committed, the off-chain process of gossiping valid evidence could be + enough for honest nodes to recognize the fork and halt.* + +Other validators will vote if: + +- The Amnesia Evidence is not valid +- The Amensia Evidence is not within the validators trial period i.e. too soon. +- The Amensia Evidence is of the same height but is different to the Amnesia Evidence that they have. i.e. is missing proofs. + (In this case, the validator will try again to gossip the latest Amnesia Evidence that it has) +- Is of an AmnesiaEvidence that has already been committed to the chain. + + +## Status + +Proposed + +## Consequences + +### Positive + +Increasing fork detection makes the system more secure + +### Negative + +Non-responsive but honest nodes that are part of the suspect group that don't produce a proof will be punished + +A delay between the detection of a fork and the punishment of one + +### Neutral + +Evidence package will need to be able to handle batch evidence as well as individual evidence (i.e. extra work) + +## References + +- [Fork accountability algorithm](https://docs.google.com/document/d/11ZhMsCj3y7zIZz4udO9l25xqb0kl7gmWqNpGVRzOeyY/edit) +- [Fork accountability spec](https://github.com/tendermint/spec/blob/master/spec/consensus/light-client/accountability.md) diff --git a/docs/imgs/light_client_bisection_alg.png b/docs/imgs/light_client_bisection_alg.png new file mode 100644 index 000000000..2a12c7542 Binary files /dev/null and b/docs/imgs/light_client_bisection_alg.png differ diff --git a/docs/imgs/tm-amnesia-attack.png b/docs/imgs/tm-amnesia-attack.png new file mode 100644 index 000000000..7e084b273 Binary files /dev/null and b/docs/imgs/tm-amnesia-attack.png differ diff --git a/docs/package-lock.json b/docs/package-lock.json index a5e8c0880..c3f277d83 100644 --- a/docs/package-lock.json +++ b/docs/package-lock.json @@ -12,22 +12,40 @@ "@babel/highlight": "^7.8.3" } }, + "@babel/compat-data": { + "version": "7.9.0", + "resolved": "https://registry.npmjs.org/@babel/compat-data/-/compat-data-7.9.0.tgz", + "integrity": "sha512-zeFQrr+284Ekvd9e7KAX954LkapWiOmQtsfHirhxqfdlX6MEC32iRE+pqUGlYIBchdevaCwvzxWGSy/YBNI85g==", + "requires": { + "browserslist": "^4.9.1", + "invariant": "^2.2.4", + "semver": "^5.5.0" + }, + "dependencies": { + "semver": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", + "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==" + } + } + }, "@babel/core": { - "version": "7.8.4", - "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.8.4.tgz", - "integrity": "sha512-0LiLrB2PwrVI+a2/IEskBopDYSd8BCb3rOvH7D5tzoWd696TBEduBvuLVm4Nx6rltrLZqvI3MCalB2K2aVzQjA==", + "version": "7.9.0", + "resolved": "https://registry.npmjs.org/@babel/core/-/core-7.9.0.tgz", + "integrity": "sha512-kWc7L0fw1xwvI0zi8OKVBuxRVefwGOrKSQMvrQ3dW+bIIavBY3/NpXmpjMy7bQnLgwgzWQZ8TlM57YHpHNHz4w==", "requires": { "@babel/code-frame": "^7.8.3", - "@babel/generator": "^7.8.4", - "@babel/helpers": "^7.8.4", - "@babel/parser": "^7.8.4", - "@babel/template": "^7.8.3", - "@babel/traverse": "^7.8.4", - "@babel/types": "^7.8.3", + "@babel/generator": "^7.9.0", + "@babel/helper-module-transforms": "^7.9.0", + "@babel/helpers": "^7.9.0", + "@babel/parser": "^7.9.0", + "@babel/template": "^7.8.6", + "@babel/traverse": "^7.9.0", + "@babel/types": "^7.9.0", "convert-source-map": "^1.7.0", "debug": "^4.1.0", "gensync": "^1.0.0-beta.1", - "json5": "^2.1.0", + "json5": "^2.1.2", "lodash": "^4.17.13", "resolve": "^1.3.2", "semver": "^5.4.1", @@ -43,11 +61,11 @@ } }, "json5": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/json5/-/json5-2.1.1.tgz", - "integrity": "sha512-l+3HXD0GEI3huGq1njuqtzYK8OYJyXMkOLtQ53pjWh89tvWS2h6l+1zMkYWqlb57+SiQodKZyvMEFb2X+KrFhQ==", + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/json5/-/json5-2.1.3.tgz", + "integrity": "sha512-KXPvOm8K9IJKFM0bmdn8QXh7udDh1g/giieX0NLCaMnb4hEiVFqnop2ImTXCc5e0/oHz3LTqmHGtExn5hfMkOA==", "requires": { - "minimist": "^1.2.0" + "minimist": "^1.2.5" } }, "ms": { @@ -68,11 +86,11 @@ } }, "@babel/generator": { - "version": "7.8.4", - "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.8.4.tgz", - "integrity": "sha512-PwhclGdRpNAf3IxZb0YVuITPZmmrXz9zf6fH8lT4XbrmfQKr6ryBzhv593P5C6poJRciFCL/eHGW2NuGrgEyxA==", + "version": "7.9.5", + "resolved": "https://registry.npmjs.org/@babel/generator/-/generator-7.9.5.tgz", + "integrity": "sha512-GbNIxVB3ZJe3tLeDm1HSn2AhuD/mVcyLDpgtLXa5tplmWrJdF/elxB56XNqCuD6szyNkDi6wuoKXln3QeBmCHQ==", "requires": { - "@babel/types": "^7.8.3", + "@babel/types": "^7.9.5", "jsesc": "^2.5.1", "lodash": "^4.17.13", "source-map": "^0.5.0" @@ -102,36 +120,46 @@ "@babel/types": "^7.8.3" } }, - "@babel/helper-call-delegate": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/helper-call-delegate/-/helper-call-delegate-7.8.3.tgz", - "integrity": "sha512-6Q05px0Eb+N4/GTyKPPvnkig7Lylw+QzihMpws9iiZQv7ZImf84ZsZpQH7QoWN4n4tm81SnSzPgHw2qtO0Zf3A==", + "@babel/helper-compilation-targets": { + "version": "7.8.7", + "resolved": "https://registry.npmjs.org/@babel/helper-compilation-targets/-/helper-compilation-targets-7.8.7.tgz", + "integrity": "sha512-4mWm8DCK2LugIS+p1yArqvG1Pf162upsIsjE7cNBjez+NjliQpVhj20obE520nao0o14DaTnFJv+Fw5a0JpoUw==", "requires": { - "@babel/helper-hoist-variables": "^7.8.3", - "@babel/traverse": "^7.8.3", - "@babel/types": "^7.8.3" + "@babel/compat-data": "^7.8.6", + "browserslist": "^4.9.1", + "invariant": "^2.2.4", + "levenary": "^1.1.1", + "semver": "^5.5.0" + }, + "dependencies": { + "semver": { + "version": "5.7.1", + "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.1.tgz", + "integrity": "sha512-sauaDf/PZdVgrLTNYHRtpXa1iRiKcaebiKQ1BJdpQlWH2lCvexQdX55snPFyK7QzpudqbCI0qXFfOasHdyNDGQ==" + } } }, "@babel/helper-create-class-features-plugin": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.8.3.tgz", - "integrity": "sha512-qmp4pD7zeTxsv0JNecSBsEmG1ei2MqwJq4YQcK3ZWm/0t07QstWfvuV/vm3Qt5xNMFETn2SZqpMx2MQzbtq+KA==", + "version": "7.9.5", + "resolved": "https://registry.npmjs.org/@babel/helper-create-class-features-plugin/-/helper-create-class-features-plugin-7.9.5.tgz", + "integrity": "sha512-IipaxGaQmW4TfWoXdqjY0TzoXQ1HRS0kPpEgvjosb3u7Uedcq297xFqDQiCcQtRRwzIMif+N1MLVI8C5a4/PAA==", "requires": { - "@babel/helper-function-name": "^7.8.3", + "@babel/helper-function-name": "^7.9.5", "@babel/helper-member-expression-to-functions": "^7.8.3", "@babel/helper-optimise-call-expression": "^7.8.3", "@babel/helper-plugin-utils": "^7.8.3", - "@babel/helper-replace-supers": "^7.8.3", + "@babel/helper-replace-supers": "^7.8.6", "@babel/helper-split-export-declaration": "^7.8.3" } }, "@babel/helper-create-regexp-features-plugin": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.8.3.tgz", - "integrity": "sha512-Gcsm1OHCUr9o9TcJln57xhWHtdXbA2pgQ58S0Lxlks0WMGNXuki4+GLfX0p+L2ZkINUGZvfkz8rzoqJQSthI+Q==", + "version": "7.8.8", + "resolved": "https://registry.npmjs.org/@babel/helper-create-regexp-features-plugin/-/helper-create-regexp-features-plugin-7.8.8.tgz", + "integrity": "sha512-LYVPdwkrQEiX9+1R29Ld/wTrmQu1SSKYnuOk3g0CkcZMA1p0gsNxJFj/3gBdaJ7Cg0Fnek5z0DsMULePP7Lrqg==", "requires": { + "@babel/helper-annotate-as-pure": "^7.8.3", "@babel/helper-regex": "^7.8.3", - "regexpu-core": "^4.6.0" + "regexpu-core": "^4.7.0" } }, "@babel/helper-define-map": { @@ -154,13 +182,13 @@ } }, "@babel/helper-function-name": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.8.3.tgz", - "integrity": "sha512-BCxgX1BC2hD/oBlIFUgOCQDOPV8nSINxCwM3o93xP4P9Fq6aV5sgv2cOOITDMtCfQ+3PvHp3l689XZvAM9QyOA==", + "version": "7.9.5", + "resolved": "https://registry.npmjs.org/@babel/helper-function-name/-/helper-function-name-7.9.5.tgz", + "integrity": "sha512-JVcQZeXM59Cd1qanDUxv9fgJpt3NeKUaqBqUEvfmQ+BCOKq2xUgaWZW2hr0dkbyJgezYuplEoh5knmrnS68efw==", "requires": { "@babel/helper-get-function-arity": "^7.8.3", "@babel/template": "^7.8.3", - "@babel/types": "^7.8.3" + "@babel/types": "^7.9.5" } }, "@babel/helper-get-function-arity": { @@ -196,15 +224,16 @@ } }, "@babel/helper-module-transforms": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.8.3.tgz", - "integrity": "sha512-C7NG6B7vfBa/pwCOshpMbOYUmrYQDfCpVL/JCRu0ek8B5p8kue1+BCXpg2vOYs7w5ACB9GTOBYQ5U6NwrMg+3Q==", + "version": "7.9.0", + "resolved": "https://registry.npmjs.org/@babel/helper-module-transforms/-/helper-module-transforms-7.9.0.tgz", + "integrity": "sha512-0FvKyu0gpPfIQ8EkxlrAydOWROdHpBmiCiRwLkUiBGhCUPRRbVD2/tm3sFr/c/GWFrQ/ffutGUAnx7V0FzT2wA==", "requires": { "@babel/helper-module-imports": "^7.8.3", + "@babel/helper-replace-supers": "^7.8.6", "@babel/helper-simple-access": "^7.8.3", "@babel/helper-split-export-declaration": "^7.8.3", - "@babel/template": "^7.8.3", - "@babel/types": "^7.8.3", + "@babel/template": "^7.8.6", + "@babel/types": "^7.9.0", "lodash": "^4.17.13" } }, @@ -242,14 +271,14 @@ } }, "@babel/helper-replace-supers": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.8.3.tgz", - "integrity": "sha512-xOUssL6ho41U81etpLoT2RTdvdus4VfHamCuAm4AHxGr+0it5fnwoVdwUJ7GFEqCsQYzJUhcbsN9wB9apcYKFA==", + "version": "7.8.6", + "resolved": "https://registry.npmjs.org/@babel/helper-replace-supers/-/helper-replace-supers-7.8.6.tgz", + "integrity": "sha512-PeMArdA4Sv/Wf4zXwBKPqVj7n9UF/xg6slNRtZW84FM7JpE1CbG8B612FyM4cxrf4fMAMGO0kR7voy1ForHHFA==", "requires": { "@babel/helper-member-expression-to-functions": "^7.8.3", "@babel/helper-optimise-call-expression": "^7.8.3", - "@babel/traverse": "^7.8.3", - "@babel/types": "^7.8.3" + "@babel/traverse": "^7.8.6", + "@babel/types": "^7.8.6" } }, "@babel/helper-simple-access": { @@ -269,6 +298,11 @@ "@babel/types": "^7.8.3" } }, + "@babel/helper-validator-identifier": { + "version": "7.9.5", + "resolved": "https://registry.npmjs.org/@babel/helper-validator-identifier/-/helper-validator-identifier-7.9.5.tgz", + "integrity": "sha512-/8arLKUFq882w4tWGj9JYzRpAlZgiWUJ+dtteNTDqrRBz9Iguck9Rn3ykuBDoUwh2TO4tSAJlrxDUOXWklJe4g==" + }, "@babel/helper-wrap-function": { "version": "7.8.3", "resolved": "https://registry.npmjs.org/@babel/helper-wrap-function/-/helper-wrap-function-7.8.3.tgz", @@ -281,29 +315,29 @@ } }, "@babel/helpers": { - "version": "7.8.4", - "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.8.4.tgz", - "integrity": "sha512-VPbe7wcQ4chu4TDQjimHv/5tj73qz88o12EPkO2ValS2QiQS/1F2SsjyIGNnAD0vF/nZS6Cf9i+vW6HIlnaR8w==", + "version": "7.9.2", + "resolved": "https://registry.npmjs.org/@babel/helpers/-/helpers-7.9.2.tgz", + "integrity": "sha512-JwLvzlXVPjO8eU9c/wF9/zOIN7X6h8DYf7mG4CiFRZRvZNKEF5dQ3H3V+ASkHoIB3mWhatgl5ONhyqHRI6MppA==", "requires": { "@babel/template": "^7.8.3", - "@babel/traverse": "^7.8.4", - "@babel/types": "^7.8.3" + "@babel/traverse": "^7.9.0", + "@babel/types": "^7.9.0" } }, "@babel/highlight": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.8.3.tgz", - "integrity": "sha512-PX4y5xQUvy0fnEVHrYOarRPXVWafSjTW9T0Hab8gVIawpl2Sj0ORyrygANq+KjcNlSSTw0YCLSNA8OyZ1I4yEg==", + "version": "7.9.0", + "resolved": "https://registry.npmjs.org/@babel/highlight/-/highlight-7.9.0.tgz", + "integrity": "sha512-lJZPilxX7Op3Nv/2cvFdnlepPXDxi29wxteT57Q965oc5R9v86ztx0jfxVrTcBk8C2kcPkkDa2Z4T3ZsPPVWsQ==", "requires": { + "@babel/helper-validator-identifier": "^7.9.0", "chalk": "^2.0.0", - "esutils": "^2.0.2", "js-tokens": "^4.0.0" } }, "@babel/parser": { - "version": "7.8.4", - "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.8.4.tgz", - "integrity": "sha512-0fKu/QqildpXmPVaRBoXOlyBb3MC+J0A66x97qEfLOMkn3u6nfY5esWogQwi/K0BjASYy4DbnsEWnpNL6qT5Mw==" + "version": "7.9.4", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.9.4.tgz", + "integrity": "sha512-bC49otXX6N0/VYhgOMh4gnP26E9xnDZK3TmbNpxYzzz9BQLBosQwfyOe9/cXUU3txYhTzLCbcqd5c8y/OmCjHA==" }, "@babel/plugin-proposal-async-generator-functions": { "version": "7.8.3", @@ -334,6 +368,15 @@ "@babel/plugin-syntax-decorators": "^7.8.3" } }, + "@babel/plugin-proposal-dynamic-import": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-dynamic-import/-/plugin-proposal-dynamic-import-7.8.3.tgz", + "integrity": "sha512-NyaBbyLFXFLT9FP+zk0kYlUlA8XtCUbehs67F0nnEg7KICgMc2mNkIeu9TYhKzyXMkrapZFwAhXLdnt4IYHy1w==", + "requires": { + "@babel/helper-plugin-utils": "^7.8.3", + "@babel/plugin-syntax-dynamic-import": "^7.8.0" + } + }, "@babel/plugin-proposal-json-strings": { "version": "7.8.3", "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-json-strings/-/plugin-proposal-json-strings-7.8.3.tgz", @@ -343,13 +386,32 @@ "@babel/plugin-syntax-json-strings": "^7.8.0" } }, - "@babel/plugin-proposal-object-rest-spread": { + "@babel/plugin-proposal-nullish-coalescing-operator": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-nullish-coalescing-operator/-/plugin-proposal-nullish-coalescing-operator-7.8.3.tgz", + "integrity": "sha512-TS9MlfzXpXKt6YYomudb/KU7nQI6/xnapG6in1uZxoxDghuSMZsPb6D2fyUwNYSAp4l1iR7QtFOjkqcRYcUsfw==", + "requires": { + "@babel/helper-plugin-utils": "^7.8.3", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.0" + } + }, + "@babel/plugin-proposal-numeric-separator": { "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.8.3.tgz", - "integrity": "sha512-8qvuPwU/xxUCt78HocNlv0mXXo0wdh9VT1R04WU8HGOfaOob26pF+9P5/lYjN/q7DHOX1bvX60hnhOvuQUJdbA==", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-numeric-separator/-/plugin-proposal-numeric-separator-7.8.3.tgz", + "integrity": "sha512-jWioO1s6R/R+wEHizfaScNsAx+xKgwTLNXSh7tTC4Usj3ItsPEhYkEpU4h+lpnBwq7NBVOJXfO6cRFYcX69JUQ==", "requires": { "@babel/helper-plugin-utils": "^7.8.3", - "@babel/plugin-syntax-object-rest-spread": "^7.8.0" + "@babel/plugin-syntax-numeric-separator": "^7.8.3" + } + }, + "@babel/plugin-proposal-object-rest-spread": { + "version": "7.9.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-object-rest-spread/-/plugin-proposal-object-rest-spread-7.9.5.tgz", + "integrity": "sha512-VP2oXvAf7KCYTthbUHwBlewbl1Iq059f6seJGsxMizaCdgHIeczOr7FBqELhSqfkIl04Fi8okzWzl63UKbQmmg==", + "requires": { + "@babel/helper-plugin-utils": "^7.8.3", + "@babel/plugin-syntax-object-rest-spread": "^7.8.0", + "@babel/plugin-transform-parameters": "^7.9.5" } }, "@babel/plugin-proposal-optional-catch-binding": { @@ -361,12 +423,21 @@ "@babel/plugin-syntax-optional-catch-binding": "^7.8.0" } }, + "@babel/plugin-proposal-optional-chaining": { + "version": "7.9.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-optional-chaining/-/plugin-proposal-optional-chaining-7.9.0.tgz", + "integrity": "sha512-NDn5tu3tcv4W30jNhmc2hyD5c56G6cXx4TesJubhxrJeCvuuMpttxr0OnNCqbZGhFjLrg+NIhxxC+BK5F6yS3w==", + "requires": { + "@babel/helper-plugin-utils": "^7.8.3", + "@babel/plugin-syntax-optional-chaining": "^7.8.0" + } + }, "@babel/plugin-proposal-unicode-property-regex": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.8.3.tgz", - "integrity": "sha512-1/1/rEZv2XGweRwwSkLpY+s60za9OZ1hJs4YDqFHCw0kYWYwL5IFljVY1MYBL+weT1l9pokDO2uhSTLVxzoHkQ==", + "version": "7.8.8", + "resolved": "https://registry.npmjs.org/@babel/plugin-proposal-unicode-property-regex/-/plugin-proposal-unicode-property-regex-7.8.8.tgz", + "integrity": "sha512-EVhjVsMpbhLw9ZfHWSx2iy13Q8Z/eg8e8ccVWt23sWQK5l1UdkoLJPN5w69UA4uITGBnEZD2JOe4QOHycYKv8A==", "requires": { - "@babel/helper-create-regexp-features-plugin": "^7.8.3", + "@babel/helper-create-regexp-features-plugin": "^7.8.8", "@babel/helper-plugin-utils": "^7.8.3" } }, @@ -410,6 +481,22 @@ "@babel/helper-plugin-utils": "^7.8.3" } }, + "@babel/plugin-syntax-nullish-coalescing-operator": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-nullish-coalescing-operator/-/plugin-syntax-nullish-coalescing-operator-7.8.3.tgz", + "integrity": "sha512-aSff4zPII1u2QD7y+F8oDsz19ew4IGEJg9SVW+bqwpwtfFleiQDMdzA/R+UlWDzfnHFCxxleFT0PMIrR36XLNQ==", + "requires": { + "@babel/helper-plugin-utils": "^7.8.0" + } + }, + "@babel/plugin-syntax-numeric-separator": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-numeric-separator/-/plugin-syntax-numeric-separator-7.8.3.tgz", + "integrity": "sha512-H7dCMAdN83PcCmqmkHB5dtp+Xa9a6LKSvA2hiFBC/5alSHxM5VgWZXFqDi0YFe8XNGT6iCa+z4V4zSt/PdZ7Dw==", + "requires": { + "@babel/helper-plugin-utils": "^7.8.3" + } + }, "@babel/plugin-syntax-object-rest-spread": { "version": "7.8.3", "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-object-rest-spread/-/plugin-syntax-object-rest-spread-7.8.3.tgz", @@ -426,6 +513,22 @@ "@babel/helper-plugin-utils": "^7.8.0" } }, + "@babel/plugin-syntax-optional-chaining": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-optional-chaining/-/plugin-syntax-optional-chaining-7.8.3.tgz", + "integrity": "sha512-KoK9ErH1MBlCPxV0VANkXW2/dw4vlbGDrFgz8bmUsBGYkFRcbRwMh6cIJubdPrkxRwuGdtCk0v/wPTKbQgBjkg==", + "requires": { + "@babel/helper-plugin-utils": "^7.8.0" + } + }, + "@babel/plugin-syntax-top-level-await": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-syntax-top-level-await/-/plugin-syntax-top-level-await-7.8.3.tgz", + "integrity": "sha512-kwj1j9lL/6Wd0hROD3b/OZZ7MSrZLqqn9RAZ5+cYYsflQ9HZBIKCUkr3+uL1MEJ1NePiUbf98jjiMQSv0NMR9g==", + "requires": { + "@babel/helper-plugin-utils": "^7.8.3" + } + }, "@babel/plugin-transform-arrow-functions": { "version": "7.8.3", "resolved": "https://registry.npmjs.org/@babel/plugin-transform-arrow-functions/-/plugin-transform-arrow-functions-7.8.3.tgz", @@ -462,16 +565,16 @@ } }, "@babel/plugin-transform-classes": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.8.3.tgz", - "integrity": "sha512-SjT0cwFJ+7Rbr1vQsvphAHwUHvSUPmMjMU/0P59G8U2HLFqSa082JO7zkbDNWs9kH/IUqpHI6xWNesGf8haF1w==", + "version": "7.9.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-classes/-/plugin-transform-classes-7.9.5.tgz", + "integrity": "sha512-x2kZoIuLC//O5iA7PEvecB105o7TLzZo8ofBVhP79N+DO3jaX+KYfww9TQcfBEZD0nikNyYcGB1IKtRq36rdmg==", "requires": { "@babel/helper-annotate-as-pure": "^7.8.3", "@babel/helper-define-map": "^7.8.3", - "@babel/helper-function-name": "^7.8.3", + "@babel/helper-function-name": "^7.9.5", "@babel/helper-optimise-call-expression": "^7.8.3", "@babel/helper-plugin-utils": "^7.8.3", - "@babel/helper-replace-supers": "^7.8.3", + "@babel/helper-replace-supers": "^7.8.6", "@babel/helper-split-export-declaration": "^7.8.3", "globals": "^11.1.0" } @@ -485,9 +588,9 @@ } }, "@babel/plugin-transform-destructuring": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.8.3.tgz", - "integrity": "sha512-H4X646nCkiEcHZUZaRkhE2XVsoz0J/1x3VVujnn96pSoGCtKPA99ZZA+va+gK+92Zycd6OBKCD8tDb/731bhgQ==", + "version": "7.9.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-destructuring/-/plugin-transform-destructuring-7.9.5.tgz", + "integrity": "sha512-j3OEsGel8nHL/iusv/mRd5fYZ3DrOxWC82x0ogmdN/vHfAP4MYw+AFKYanzWlktNwikKvlzUV//afBW5FTp17Q==", "requires": { "@babel/helper-plugin-utils": "^7.8.3" } @@ -519,9 +622,9 @@ } }, "@babel/plugin-transform-for-of": { - "version": "7.8.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.8.4.tgz", - "integrity": "sha512-iAXNlOWvcYUYoV8YIxwS7TxGRJcxyl8eQCfT+A5j8sKUzRFvJdcyjp97jL2IghWSRDaL2PU2O2tX8Cu9dTBq5A==", + "version": "7.9.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-for-of/-/plugin-transform-for-of-7.9.0.tgz", + "integrity": "sha512-lTAnWOpMwOXpyDx06N+ywmF3jNbafZEqZ96CGYabxHrxNX8l5ny7dt4bK/rGwAh9utyP2b2Hv7PlZh1AAS54FQ==", "requires": { "@babel/helper-plugin-utils": "^7.8.3" } @@ -543,44 +646,52 @@ "@babel/helper-plugin-utils": "^7.8.3" } }, - "@babel/plugin-transform-modules-amd": { + "@babel/plugin-transform-member-expression-literals": { "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.8.3.tgz", - "integrity": "sha512-MadJiU3rLKclzT5kBH4yxdry96odTUwuqrZM+GllFI/VhxfPz+k9MshJM+MwhfkCdxxclSbSBbUGciBngR+kEQ==", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-member-expression-literals/-/plugin-transform-member-expression-literals-7.8.3.tgz", + "integrity": "sha512-3Wk2EXhnw+rP+IDkK6BdtPKsUE5IeZ6QOGrPYvw52NwBStw9V1ZVzxgK6fSKSxqUvH9eQPR3tm3cOq79HlsKYA==", "requires": { - "@babel/helper-module-transforms": "^7.8.3", + "@babel/helper-plugin-utils": "^7.8.3" + } + }, + "@babel/plugin-transform-modules-amd": { + "version": "7.9.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-amd/-/plugin-transform-modules-amd-7.9.0.tgz", + "integrity": "sha512-vZgDDF003B14O8zJy0XXLnPH4sg+9X5hFBBGN1V+B2rgrB+J2xIypSN6Rk9imB2hSTHQi5OHLrFWsZab1GMk+Q==", + "requires": { + "@babel/helper-module-transforms": "^7.9.0", "@babel/helper-plugin-utils": "^7.8.3", "babel-plugin-dynamic-import-node": "^2.3.0" } }, "@babel/plugin-transform-modules-commonjs": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.8.3.tgz", - "integrity": "sha512-JpdMEfA15HZ/1gNuB9XEDlZM1h/gF/YOH7zaZzQu2xCFRfwc01NXBMHHSTT6hRjlXJJs5x/bfODM3LiCk94Sxg==", + "version": "7.9.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-commonjs/-/plugin-transform-modules-commonjs-7.9.0.tgz", + "integrity": "sha512-qzlCrLnKqio4SlgJ6FMMLBe4bySNis8DFn1VkGmOcxG9gqEyPIOzeQrA//u0HAKrWpJlpZbZMPB1n/OPa4+n8g==", "requires": { - "@babel/helper-module-transforms": "^7.8.3", + "@babel/helper-module-transforms": "^7.9.0", "@babel/helper-plugin-utils": "^7.8.3", "@babel/helper-simple-access": "^7.8.3", "babel-plugin-dynamic-import-node": "^2.3.0" } }, "@babel/plugin-transform-modules-systemjs": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.8.3.tgz", - "integrity": "sha512-8cESMCJjmArMYqa9AO5YuMEkE4ds28tMpZcGZB/jl3n0ZzlsxOAi3mC+SKypTfT8gjMupCnd3YiXCkMjj2jfOg==", + "version": "7.9.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-systemjs/-/plugin-transform-modules-systemjs-7.9.0.tgz", + "integrity": "sha512-FsiAv/nao/ud2ZWy4wFacoLOm5uxl0ExSQ7ErvP7jpoihLR6Cq90ilOFyX9UXct3rbtKsAiZ9kFt5XGfPe/5SQ==", "requires": { "@babel/helper-hoist-variables": "^7.8.3", - "@babel/helper-module-transforms": "^7.8.3", + "@babel/helper-module-transforms": "^7.9.0", "@babel/helper-plugin-utils": "^7.8.3", "babel-plugin-dynamic-import-node": "^2.3.0" } }, "@babel/plugin-transform-modules-umd": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.8.3.tgz", - "integrity": "sha512-evhTyWhbwbI3/U6dZAnx/ePoV7H6OUG+OjiJFHmhr9FPn0VShjwC2kdxqIuQ/+1P50TMrneGzMeyMTFOjKSnAw==", + "version": "7.9.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-modules-umd/-/plugin-transform-modules-umd-7.9.0.tgz", + "integrity": "sha512-uTWkXkIVtg/JGRSIABdBoMsoIeoHQHPTL0Y2E7xf5Oj7sLqwVsNXOkNk0VJc7vF0IMBsPeikHxFjGe+qmwPtTQ==", "requires": { - "@babel/helper-module-transforms": "^7.8.3", + "@babel/helper-module-transforms": "^7.9.0", "@babel/helper-plugin-utils": "^7.8.3" } }, @@ -610,27 +721,42 @@ } }, "@babel/plugin-transform-parameters": { - "version": "7.8.4", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.8.4.tgz", - "integrity": "sha512-IsS3oTxeTsZlE5KqzTbcC2sV0P9pXdec53SU+Yxv7o/6dvGM5AkTotQKhoSffhNgZ/dftsSiOoxy7evCYJXzVA==", + "version": "7.9.5", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-parameters/-/plugin-transform-parameters-7.9.5.tgz", + "integrity": "sha512-0+1FhHnMfj6lIIhVvS4KGQJeuhe1GI//h5uptK4PvLt+BGBxsoUJbd3/IW002yk//6sZPlFgsG1hY6OHLcy6kA==", "requires": { - "@babel/helper-call-delegate": "^7.8.3", "@babel/helper-get-function-arity": "^7.8.3", "@babel/helper-plugin-utils": "^7.8.3" } }, + "@babel/plugin-transform-property-literals": { + "version": "7.8.3", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-property-literals/-/plugin-transform-property-literals-7.8.3.tgz", + "integrity": "sha512-uGiiXAZMqEoQhRWMK17VospMZh5sXWg+dlh2soffpkAl96KAm+WZuJfa6lcELotSRmooLqg0MWdH6UUq85nmmg==", + "requires": { + "@babel/helper-plugin-utils": "^7.8.3" + } + }, "@babel/plugin-transform-regenerator": { + "version": "7.8.7", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.8.7.tgz", + "integrity": "sha512-TIg+gAl4Z0a3WmD3mbYSk+J9ZUH6n/Yc57rtKRnlA/7rcCvpekHXe0CMZHP1gYp7/KLe9GHTuIba0vXmls6drA==", + "requires": { + "regenerator-transform": "^0.14.2" + } + }, + "@babel/plugin-transform-reserved-words": { "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-regenerator/-/plugin-transform-regenerator-7.8.3.tgz", - "integrity": "sha512-qt/kcur/FxrQrzFR432FGZznkVAjiyFtCOANjkAKwCbt465L6ZCiUQh2oMYGU3Wo8LRFJxNDFwWn106S5wVUNA==", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-reserved-words/-/plugin-transform-reserved-words-7.8.3.tgz", + "integrity": "sha512-mwMxcycN3omKFDjDQUl+8zyMsBfjRFr0Zn/64I41pmjv4NJuqcYlEtezwYtw9TFd9WR1vN5kiM+O0gMZzO6L0A==", "requires": { - "regenerator-transform": "^0.14.0" + "@babel/helper-plugin-utils": "^7.8.3" } }, "@babel/plugin-transform-runtime": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.8.3.tgz", - "integrity": "sha512-/vqUt5Yh+cgPZXXjmaG9NT8aVfThKk7G4OqkVhrXqwsC5soMn/qTCxs36rZ2QFhpfTJcjw4SNDIZ4RUb8OL4jQ==", + "version": "7.9.0", + "resolved": "https://registry.npmjs.org/@babel/plugin-transform-runtime/-/plugin-transform-runtime-7.9.0.tgz", + "integrity": "sha512-pUu9VSf3kI1OqbWINQ7MaugnitRss1z533436waNXp+0N3ur3zfut37sXiQMxkuCF4VUjwZucen/quskCh7NHw==", "requires": { "@babel/helper-module-imports": "^7.8.3", "@babel/helper-plugin-utils": "^7.8.3", @@ -697,53 +823,70 @@ } }, "@babel/preset-env": { - "version": "7.3.4", - "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.3.4.tgz", - "integrity": "sha512-2mwqfYMK8weA0g0uBKOt4FE3iEodiHy9/CW0b+nWXcbL+pGzLx8ESYc+j9IIxr6LTDHWKgPm71i9smo02bw+gA==", + "version": "7.9.5", + "resolved": "https://registry.npmjs.org/@babel/preset-env/-/preset-env-7.9.5.tgz", + "integrity": "sha512-eWGYeADTlPJH+wq1F0wNfPbVS1w1wtmMJiYk55Td5Yu28AsdR9AsC97sZ0Qq8fHqQuslVSIYSGJMcblr345GfQ==", "requires": { - "@babel/helper-module-imports": "^7.0.0", - "@babel/helper-plugin-utils": "^7.0.0", - "@babel/plugin-proposal-async-generator-functions": "^7.2.0", - "@babel/plugin-proposal-json-strings": "^7.2.0", - "@babel/plugin-proposal-object-rest-spread": "^7.3.4", - "@babel/plugin-proposal-optional-catch-binding": "^7.2.0", - "@babel/plugin-proposal-unicode-property-regex": "^7.2.0", - "@babel/plugin-syntax-async-generators": "^7.2.0", - "@babel/plugin-syntax-json-strings": "^7.2.0", - "@babel/plugin-syntax-object-rest-spread": "^7.2.0", - "@babel/plugin-syntax-optional-catch-binding": "^7.2.0", - "@babel/plugin-transform-arrow-functions": "^7.2.0", - "@babel/plugin-transform-async-to-generator": "^7.3.4", - "@babel/plugin-transform-block-scoped-functions": "^7.2.0", - "@babel/plugin-transform-block-scoping": "^7.3.4", - "@babel/plugin-transform-classes": "^7.3.4", - "@babel/plugin-transform-computed-properties": "^7.2.0", - "@babel/plugin-transform-destructuring": "^7.2.0", - "@babel/plugin-transform-dotall-regex": "^7.2.0", - "@babel/plugin-transform-duplicate-keys": "^7.2.0", - "@babel/plugin-transform-exponentiation-operator": "^7.2.0", - "@babel/plugin-transform-for-of": "^7.2.0", - "@babel/plugin-transform-function-name": "^7.2.0", - "@babel/plugin-transform-literals": "^7.2.0", - "@babel/plugin-transform-modules-amd": "^7.2.0", - "@babel/plugin-transform-modules-commonjs": "^7.2.0", - "@babel/plugin-transform-modules-systemjs": "^7.3.4", - "@babel/plugin-transform-modules-umd": "^7.2.0", - "@babel/plugin-transform-named-capturing-groups-regex": "^7.3.0", - "@babel/plugin-transform-new-target": "^7.0.0", - "@babel/plugin-transform-object-super": "^7.2.0", - "@babel/plugin-transform-parameters": "^7.2.0", - "@babel/plugin-transform-regenerator": "^7.3.4", - "@babel/plugin-transform-shorthand-properties": "^7.2.0", - "@babel/plugin-transform-spread": "^7.2.0", - "@babel/plugin-transform-sticky-regex": "^7.2.0", - "@babel/plugin-transform-template-literals": "^7.2.0", - "@babel/plugin-transform-typeof-symbol": "^7.2.0", - "@babel/plugin-transform-unicode-regex": "^7.2.0", - "browserslist": "^4.3.4", + "@babel/compat-data": "^7.9.0", + "@babel/helper-compilation-targets": "^7.8.7", + "@babel/helper-module-imports": "^7.8.3", + "@babel/helper-plugin-utils": "^7.8.3", + "@babel/plugin-proposal-async-generator-functions": "^7.8.3", + "@babel/plugin-proposal-dynamic-import": "^7.8.3", + "@babel/plugin-proposal-json-strings": "^7.8.3", + "@babel/plugin-proposal-nullish-coalescing-operator": "^7.8.3", + "@babel/plugin-proposal-numeric-separator": "^7.8.3", + "@babel/plugin-proposal-object-rest-spread": "^7.9.5", + "@babel/plugin-proposal-optional-catch-binding": "^7.8.3", + "@babel/plugin-proposal-optional-chaining": "^7.9.0", + "@babel/plugin-proposal-unicode-property-regex": "^7.8.3", + "@babel/plugin-syntax-async-generators": "^7.8.0", + "@babel/plugin-syntax-dynamic-import": "^7.8.0", + "@babel/plugin-syntax-json-strings": "^7.8.0", + "@babel/plugin-syntax-nullish-coalescing-operator": "^7.8.0", + "@babel/plugin-syntax-numeric-separator": "^7.8.0", + "@babel/plugin-syntax-object-rest-spread": "^7.8.0", + "@babel/plugin-syntax-optional-catch-binding": "^7.8.0", + "@babel/plugin-syntax-optional-chaining": "^7.8.0", + "@babel/plugin-syntax-top-level-await": "^7.8.3", + "@babel/plugin-transform-arrow-functions": "^7.8.3", + "@babel/plugin-transform-async-to-generator": "^7.8.3", + "@babel/plugin-transform-block-scoped-functions": "^7.8.3", + "@babel/plugin-transform-block-scoping": "^7.8.3", + "@babel/plugin-transform-classes": "^7.9.5", + "@babel/plugin-transform-computed-properties": "^7.8.3", + "@babel/plugin-transform-destructuring": "^7.9.5", + "@babel/plugin-transform-dotall-regex": "^7.8.3", + "@babel/plugin-transform-duplicate-keys": "^7.8.3", + "@babel/plugin-transform-exponentiation-operator": "^7.8.3", + "@babel/plugin-transform-for-of": "^7.9.0", + "@babel/plugin-transform-function-name": "^7.8.3", + "@babel/plugin-transform-literals": "^7.8.3", + "@babel/plugin-transform-member-expression-literals": "^7.8.3", + "@babel/plugin-transform-modules-amd": "^7.9.0", + "@babel/plugin-transform-modules-commonjs": "^7.9.0", + "@babel/plugin-transform-modules-systemjs": "^7.9.0", + "@babel/plugin-transform-modules-umd": "^7.9.0", + "@babel/plugin-transform-named-capturing-groups-regex": "^7.8.3", + "@babel/plugin-transform-new-target": "^7.8.3", + "@babel/plugin-transform-object-super": "^7.8.3", + "@babel/plugin-transform-parameters": "^7.9.5", + "@babel/plugin-transform-property-literals": "^7.8.3", + "@babel/plugin-transform-regenerator": "^7.8.7", + "@babel/plugin-transform-reserved-words": "^7.8.3", + "@babel/plugin-transform-shorthand-properties": "^7.8.3", + "@babel/plugin-transform-spread": "^7.8.3", + "@babel/plugin-transform-sticky-regex": "^7.8.3", + "@babel/plugin-transform-template-literals": "^7.8.3", + "@babel/plugin-transform-typeof-symbol": "^7.8.4", + "@babel/plugin-transform-unicode-regex": "^7.8.3", + "@babel/preset-modules": "^0.1.3", + "@babel/types": "^7.9.5", + "browserslist": "^4.9.1", + "core-js-compat": "^3.6.2", "invariant": "^2.2.2", - "js-levenshtein": "^1.1.3", - "semver": "^5.3.0" + "levenary": "^1.1.1", + "semver": "^5.5.0" }, "dependencies": { "semver": { @@ -753,58 +896,54 @@ } } }, - "@babel/runtime": { - "version": "7.8.4", - "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.8.4.tgz", - "integrity": "sha512-neAp3zt80trRVBI1x0azq6c57aNBqYZH8KhMm3TaB7wEI5Q4A2SHfBHE8w9gOhI/lrqxtEbXZgQIrHP+wvSGwQ==", + "@babel/preset-modules": { + "version": "0.1.3", + "resolved": "https://registry.npmjs.org/@babel/preset-modules/-/preset-modules-0.1.3.tgz", + "integrity": "sha512-Ra3JXOHBq2xd56xSF7lMKXdjBn3T772Y1Wet3yWnkDly9zHvJki029tAFzvAAK5cf4YV3yoxuP61crYRol6SVg==", "requires": { - "regenerator-runtime": "^0.13.2" - }, - "dependencies": { - "regenerator-runtime": { - "version": "0.13.3", - "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.3.tgz", - "integrity": "sha512-naKIZz2GQ8JWh///G7L3X6LaQUAMp2lvb1rvwwsURe/VXwD6VMfr+/1NuNw3ag8v2kY1aQ/go5SNn79O9JU7yw==" - } + "@babel/helper-plugin-utils": "^7.0.0", + "@babel/plugin-proposal-unicode-property-regex": "^7.4.4", + "@babel/plugin-transform-dotall-regex": "^7.4.4", + "@babel/types": "^7.4.4", + "esutils": "^2.0.2" } }, - "@babel/runtime-corejs2": { - "version": "7.8.4", - "resolved": "https://registry.npmjs.org/@babel/runtime-corejs2/-/runtime-corejs2-7.8.4.tgz", - "integrity": "sha512-7jU2FgNqNHX6yTuU/Dr/vH5/O8eVL9U85MG5aDw1LzGfCvvhXC1shdXfVzCQDsoY967yrAKeLujRv7l8BU+dZA==", + "@babel/runtime": { + "version": "7.9.2", + "resolved": "https://registry.npmjs.org/@babel/runtime/-/runtime-7.9.2.tgz", + "integrity": "sha512-NE2DtOdufG7R5vnfQUTehdTfNycfUANEtCa9PssN9O/xmTzP4E08UI797ixaei6hBEVL9BI/PsdJS5x7mWoB9Q==", "requires": { - "core-js": "^2.6.5", - "regenerator-runtime": "^0.13.2" + "regenerator-runtime": "^0.13.4" }, "dependencies": { "regenerator-runtime": { - "version": "0.13.3", - "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.3.tgz", - "integrity": "sha512-naKIZz2GQ8JWh///G7L3X6LaQUAMp2lvb1rvwwsURe/VXwD6VMfr+/1NuNw3ag8v2kY1aQ/go5SNn79O9JU7yw==" + "version": "0.13.5", + "resolved": "https://registry.npmjs.org/regenerator-runtime/-/regenerator-runtime-0.13.5.tgz", + "integrity": "sha512-ZS5w8CpKFinUzOwW3c83oPeVXoNsrLsaCoLtJvAClH135j/R77RuymhiSErhm2lKcwSCIpmvIWSbDkIfAqKQlA==" } } }, "@babel/template": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.8.3.tgz", - "integrity": "sha512-04m87AcQgAFdvuoyiQ2kgELr2tV8B4fP/xJAVUL3Yb3bkNdMedD3d0rlSQr3PegP0cms3eHjl1F7PWlvWbU8FQ==", + "version": "7.8.6", + "resolved": "https://registry.npmjs.org/@babel/template/-/template-7.8.6.tgz", + "integrity": "sha512-zbMsPMy/v0PWFZEhQJ66bqjhH+z0JgMoBWuikXybgG3Gkd/3t5oQ1Rw2WQhnSrsOmsKXnZOx15tkC4qON/+JPg==", "requires": { "@babel/code-frame": "^7.8.3", - "@babel/parser": "^7.8.3", - "@babel/types": "^7.8.3" + "@babel/parser": "^7.8.6", + "@babel/types": "^7.8.6" } }, "@babel/traverse": { - "version": "7.8.4", - "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.8.4.tgz", - "integrity": "sha512-NGLJPZwnVEyBPLI+bl9y9aSnxMhsKz42so7ApAv9D+b4vAFPpY013FTS9LdKxcABoIYFU52HcYga1pPlx454mg==", + "version": "7.9.5", + "resolved": "https://registry.npmjs.org/@babel/traverse/-/traverse-7.9.5.tgz", + "integrity": "sha512-c4gH3jsvSuGUezlP6rzSJ6jf8fYjLj3hsMZRx/nX0h+fmHN0w+ekubRrHPqnMec0meycA2nwCsJ7dC8IPem2FQ==", "requires": { "@babel/code-frame": "^7.8.3", - "@babel/generator": "^7.8.4", - "@babel/helper-function-name": "^7.8.3", + "@babel/generator": "^7.9.5", + "@babel/helper-function-name": "^7.9.5", "@babel/helper-split-export-declaration": "^7.8.3", - "@babel/parser": "^7.8.4", - "@babel/types": "^7.8.3", + "@babel/parser": "^7.9.0", + "@babel/types": "^7.9.5", "debug": "^4.1.0", "globals": "^11.1.0", "lodash": "^4.17.13" @@ -826,11 +965,11 @@ } }, "@babel/types": { - "version": "7.8.3", - "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.8.3.tgz", - "integrity": "sha512-jBD+G8+LWpMBBWvVcdr4QysjUE4mU/syrhN17o1u3gx0/WzJB1kwiVZAXRtWbsIPOwW8pF/YJV5+nmetPzepXg==", + "version": "7.9.5", + "resolved": "https://registry.npmjs.org/@babel/types/-/types-7.9.5.tgz", + "integrity": "sha512-XjnvNqenk818r5zMaba+sLQjnbda31UfUURv3ei0qPQw4u+j2jMyJ5b11y8ZHYTRSI3NnInQkkkRT4fLqqPdHg==", "requires": { - "esutils": "^2.0.2", + "@babel/helper-validator-identifier": "^7.9.5", "lodash": "^4.17.13", "to-fast-properties": "^2.0.0" }, @@ -843,10 +982,15 @@ } }, "@cosmos-ui/vue": { - "version": "0.5.21", - "resolved": "https://registry.npmjs.org/@cosmos-ui/vue/-/vue-0.5.21.tgz", - "integrity": "sha512-Y60AMxFKgHrgE/EHxnGKaTcYUN1nJa5m3SylhsCe/d0AvzF9RSYGSPwVgDxmW4KiufBKXkv4PmiNG9WDNWwdxw==", + "version": "0.22.0", + "resolved": "https://registry.npmjs.org/@cosmos-ui/vue/-/vue-0.22.0.tgz", + "integrity": "sha512-+1A6SNohzHKI64EsPP3N4spcWalGsnwRUB4y6ySBHkHlQ5X4KjsSkHOQ95xODMlwtKELiDSVjS8PsgdEyk+4Vg==", "requires": { + "axios": "^0.19.2", + "clipboard-copy": "^3.1.0", + "js-base64": "^2.5.2", + "prismjs": "^1.19.0", + "querystring": "^0.2.0", "tiny-cookie": "^2.3.1", "vue": "^2.6.10" } @@ -917,9 +1061,9 @@ "integrity": "sha512-tHq6qdbT9U1IRSGf14CL0pUlULksvY9OZ+5eEgl1N7t+OA3tGvNpxJCzuKQlsNgCVwbAs670L1vcVQi8j9HjnA==" }, "@types/node": { - "version": "13.7.0", - "resolved": "https://registry.npmjs.org/@types/node/-/node-13.7.0.tgz", - "integrity": "sha512-GnZbirvmqZUzMgkFn70c74OQpTTUcCzlhQliTzYjQMqg+hVKcDnxdL19Ne3UdYzdMA/+W3eb646FWn/ZaT1NfQ==" + "version": "13.11.0", + "resolved": "https://registry.npmjs.org/@types/node/-/node-13.11.0.tgz", + "integrity": "sha512-uM4mnmsIIPK/yeO+42F2RQhGUIs39K2RFmugcJANppXe6J1nvH87PvzPZYpza7Xhhs8Yn9yIAVdLZ84z61+0xQ==" }, "@types/q": { "version": "1.5.2", @@ -945,23 +1089,31 @@ } }, "@vue/babel-preset-app": { - "version": "3.12.1", - "resolved": "https://registry.npmjs.org/@vue/babel-preset-app/-/babel-preset-app-3.12.1.tgz", - "integrity": "sha512-Zjy5jQaikV1Pz+ri0YgXFS7q4/5wCxB5tRkDOEIt5+4105u0Feb/pvH20nVL6nx9GyXrECFfcm7Yxr/z++OaPQ==", + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/@vue/babel-preset-app/-/babel-preset-app-4.3.1.tgz", + "integrity": "sha512-iNkySkbRWXGUA+Cvzj+/gEP0Y0uVAwwzfn21S7hkggSeIg9LJyZ+QzdxgKO0wgi01yTdb2mYWgeLQAfHZ65aew==", "requires": { - "@babel/helper-module-imports": "^7.0.0", - "@babel/plugin-proposal-class-properties": "^7.0.0", - "@babel/plugin-proposal-decorators": "^7.1.0", - "@babel/plugin-syntax-dynamic-import": "^7.0.0", - "@babel/plugin-syntax-jsx": "^7.0.0", - "@babel/plugin-transform-runtime": "^7.4.0", - "@babel/preset-env": "^7.0.0 < 7.4.0", - "@babel/runtime": "^7.0.0", - "@babel/runtime-corejs2": "^7.2.0", - "@vue/babel-preset-jsx": "^1.0.0", - "babel-plugin-dynamic-import-node": "^2.2.0", - "babel-plugin-module-resolver": "3.2.0", - "core-js": "^2.6.5" + "@babel/core": "^7.9.0", + "@babel/helper-compilation-targets": "^7.8.7", + "@babel/helper-module-imports": "^7.8.3", + "@babel/plugin-proposal-class-properties": "^7.8.3", + "@babel/plugin-proposal-decorators": "^7.8.3", + "@babel/plugin-syntax-dynamic-import": "^7.8.3", + "@babel/plugin-syntax-jsx": "^7.8.3", + "@babel/plugin-transform-runtime": "^7.9.0", + "@babel/preset-env": "^7.9.0", + "@babel/runtime": "^7.9.2", + "@vue/babel-preset-jsx": "^1.1.2", + "babel-plugin-dynamic-import-node": "^2.3.0", + "core-js": "^3.6.4", + "core-js-compat": "^3.6.4" + }, + "dependencies": { + "core-js": { + "version": "3.6.4", + "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.6.4.tgz", + "integrity": "sha512-4paDGScNgZP2IXXilaffL9X7968RuvwlkK3xWtZRVqgd8SYNiVKRJvkFd1aqqEuPfN7E68ZHEp9hDj6lHj4Hyw==" + } } }, "@vue/babel-preset-jsx": { @@ -1031,9 +1183,9 @@ } }, "@vue/component-compiler-utils": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/@vue/component-compiler-utils/-/component-compiler-utils-3.1.1.tgz", - "integrity": "sha512-+lN3nsfJJDGMNz7fCpcoYIORrXo0K3OTsdr8jCM7FuqdI4+70TY6gxY6viJ2Xi1clqyPg7LpeOWwjF31vSMmUw==", + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/@vue/component-compiler-utils/-/component-compiler-utils-3.1.2.tgz", + "integrity": "sha512-QLq9z8m79mCinpaEeSURhnNCN6djxpHw0lpP/bodMlt5kALfONpryMthvnrQOlTcIKoF+VoPi+lPHUYeDFPXug==", "requires": { "consolidate": "^0.15.1", "hash-sum": "^1.0.2", @@ -1063,23 +1215,24 @@ } }, "@vuepress/core": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/@vuepress/core/-/core-1.3.0.tgz", - "integrity": "sha512-/KaH10ggZeEnwh/i8A02VtGHfuIfTEf/pIPV9BBVjK5M6ToPhF2pkcXlPk5PbCWam2dKm7ZDQddJzev1dY5TNA==", - "requires": { - "@babel/core": "^7.0.0", - "@vue/babel-preset-app": "^3.1.1", - "@vuepress/markdown": "^1.3.0", - "@vuepress/markdown-loader": "^1.3.0", - "@vuepress/plugin-last-updated": "^1.3.0", - "@vuepress/plugin-register-components": "^1.3.0", - "@vuepress/shared-utils": "^1.3.0", + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/@vuepress/core/-/core-1.4.0.tgz", + "integrity": "sha512-xWiLG6MEzZdXGvr7/ickSr/plxPESC8c3prMOUDxROkFnyOiKmVvIyn4vAmRkFX3Xw4mfOLxucIOpQg0K6hEjw==", + "requires": { + "@babel/core": "^7.8.4", + "@vue/babel-preset-app": "^4.1.2", + "@vuepress/markdown": "^1.4.0", + "@vuepress/markdown-loader": "^1.4.0", + "@vuepress/plugin-last-updated": "^1.4.0", + "@vuepress/plugin-register-components": "^1.4.0", + "@vuepress/shared-utils": "^1.4.0", "autoprefixer": "^9.5.1", "babel-loader": "^8.0.4", "cache-loader": "^3.0.0", "chokidar": "^2.0.3", "connect-history-api-fallback": "^1.5.0", "copy-webpack-plugin": "^5.0.2", + "core-js": "^3.6.4", "cross-spawn": "^6.0.5", "css-loader": "^2.1.1", "file-loader": "^3.0.1", @@ -1104,14 +1257,21 @@ "webpack-dev-server": "^3.5.1", "webpack-merge": "^4.1.2", "webpackbar": "3.2.0" + }, + "dependencies": { + "core-js": { + "version": "3.6.4", + "resolved": "https://registry.npmjs.org/core-js/-/core-js-3.6.4.tgz", + "integrity": "sha512-4paDGScNgZP2IXXilaffL9X7968RuvwlkK3xWtZRVqgd8SYNiVKRJvkFd1aqqEuPfN7E68ZHEp9hDj6lHj4Hyw==" + } } }, "@vuepress/markdown": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/@vuepress/markdown/-/markdown-1.3.0.tgz", - "integrity": "sha512-h4FCAxcYLSGuoftbumsesqquRuQksb98sygiP/EV1J7z3qVj8r/1YdRRoUoE0Yd9hw0izN52KJRYZC7tlUmBnw==", + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/@vuepress/markdown/-/markdown-1.4.0.tgz", + "integrity": "sha512-H3uojkiO5/uWKpwBEPdk5fsSj+ZGgNR7xi6oYhUxaUak9nC6mhMZ3KzeNA67QmevG3XHEoYx4d9oeAC1Au1frg==", "requires": { - "@vuepress/shared-utils": "^1.3.0", + "@vuepress/shared-utils": "^1.4.0", "markdown-it": "^8.4.1", "markdown-it-anchor": "^5.0.2", "markdown-it-chain": "^1.3.0", @@ -1140,56 +1300,61 @@ } }, "@vuepress/markdown-loader": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/@vuepress/markdown-loader/-/markdown-loader-1.3.0.tgz", - "integrity": "sha512-20J9+wuyCxhwOWfb7aDY0F/+j2oQYaoDE1VbH3zaqI9XesPl42DsEwA1Nw1asEm3yXdh+uC2scBCiNcv94tsHg==", + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/@vuepress/markdown-loader/-/markdown-loader-1.4.0.tgz", + "integrity": "sha512-oEHB6EzCeIxyQxg1HSGX3snRL25V6XZ3O0Zx/sWd5hl0sneEsRLHRMflPGhKu4c6cfsyTck7aTbt7Z71vVy0FQ==", "requires": { - "@vuepress/markdown": "^1.3.0", + "@vuepress/markdown": "^1.4.0", "loader-utils": "^1.1.0", "lru-cache": "^5.1.1" } }, "@vuepress/plugin-active-header-links": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-active-header-links/-/plugin-active-header-links-1.3.0.tgz", - "integrity": "sha512-C+EhZefAOxN83jVZebRWqFUBUklTsTtWRiDFczxcxqH995ZZumi1UFKj9TurOjrZppUDr4ftfxIqGkj4QSUeWw==", + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/@vuepress/plugin-active-header-links/-/plugin-active-header-links-1.4.0.tgz", + "integrity": "sha512-UWnRcqJZnX1LaPHxESx4XkRVJCleWvdGlSVivRGNLZuV1xrxJzB6LC86SNMur+imoyzeQL/oIgKY1QFx710g8w==", "requires": { "lodash.debounce": "^4.0.8" } }, + "@vuepress/plugin-google-analytics": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/@vuepress/plugin-google-analytics/-/plugin-google-analytics-1.4.1.tgz", + "integrity": "sha512-s43V5QHdTz0ayfy5vZrfMPpZzJBsj9L79TaxyMux1jOviS7oeWqkvNSblaHwP4Y8BxISehsKte8qsblQEN3zvQ==" + }, "@vuepress/plugin-last-updated": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-last-updated/-/plugin-last-updated-1.3.0.tgz", - "integrity": "sha512-zCg98YiCFzBo7hHh5CE4H7lO13QaexeNXKC8SC7aNopjhg1/+rzFKEWt5frARnYqhMrkhEqcegSuB4xWxNV+zQ==", + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/@vuepress/plugin-last-updated/-/plugin-last-updated-1.4.0.tgz", + "integrity": "sha512-sNxCXDz7AO4yIAZTEGt9TaLpJ2E0dgJGWx79nDFKfvpITn+Q2p7dUzkyVVxXs3TWXffoElGdNj/xIL5AUkg2qg==", "requires": { "cross-spawn": "^6.0.5" } }, "@vuepress/plugin-nprogress": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-nprogress/-/plugin-nprogress-1.3.0.tgz", - "integrity": "sha512-PuBDAhaYLvwG63LamIc1fMk+s4kUqPuvNYKfZjQlF3LtXjlCMvd6YEQyogfB9cZnFOg1nryeHJwWoAdFvzw29Q==", + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/@vuepress/plugin-nprogress/-/plugin-nprogress-1.4.0.tgz", + "integrity": "sha512-hJ9phJHONWWZqcWztbVtmmRjZduHQHIOBifUBvAfAGcuOBLVHqRnv3i7XD5UB3MIWPM1/bAoTA2TVs4sb9Wg4Q==", "requires": { "nprogress": "^0.2.0" } }, "@vuepress/plugin-register-components": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-register-components/-/plugin-register-components-1.3.0.tgz", - "integrity": "sha512-IkBacuTDHSHhI3qWXPQtVWTEAL+wprrbaYrD+g2n9xV3dzMkhHJxbpRpw7eAbvsP85a03rVouwRukZ+YlhYPPQ==", + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/@vuepress/plugin-register-components/-/plugin-register-components-1.4.0.tgz", + "integrity": "sha512-HmSzCTPVrlJJ8PSIXAvh4RkPy9bGmdrQuAXAtjiiq5rzBjL3uIg2VwzTrKDqf7FkCKs4lcRAEuNxB70bH6tddA==", "requires": { - "@vuepress/shared-utils": "^1.3.0" + "@vuepress/shared-utils": "^1.4.0" } }, "@vuepress/plugin-search": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/@vuepress/plugin-search/-/plugin-search-1.3.0.tgz", - "integrity": "sha512-buoQ6gQ2MLbLQ7Nhg5KJWPzKo7NtvdK/e6Fo1ig/kbOG5HyYKHCyqLjbQ/ZqT+fGbaSeEjH3DaVYTNx55GRX5A==" + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/@vuepress/plugin-search/-/plugin-search-1.4.0.tgz", + "integrity": "sha512-5K02DL9Wqlfy/aNiYXdbXBOGzR9zMNKz/P8lfHDU+ZOjtfNf6ImAdUkHS4pi70YkkTuemdYM8JjG/j5UYn6Rjw==" }, "@vuepress/shared-utils": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/@vuepress/shared-utils/-/shared-utils-1.3.0.tgz", - "integrity": "sha512-n1AFgt8SiMDdc5aIj5yOqS3E6+dAZ+9tPw6qf1mBiqvdZzwaUtlydvXqVkskrwUo18znLrUr55VYwubMOaxFnQ==", + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/@vuepress/shared-utils/-/shared-utils-1.4.0.tgz", + "integrity": "sha512-6QTv7zMRXAojCuPRIm4aosYfrQO4OREhyxvbFeg/ZMWkVX+xZZQTdE7ZyK/4NAvEgkpjtPTRC1TQYhLJUqC5mQ==", "requires": { "chalk": "^2.3.2", "diacritics": "^1.3.0", @@ -1203,13 +1368,13 @@ } }, "@vuepress/theme-default": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/@vuepress/theme-default/-/theme-default-1.3.0.tgz", - "integrity": "sha512-0KKTIQQAyO3xE9Gn5vdQYWY+B1onzMm2i3Td610FiLsCRqeHsWs/stl6tlP3nV75OUHwBRH/w0ITrIF4kMR7GQ==", + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/@vuepress/theme-default/-/theme-default-1.4.0.tgz", + "integrity": "sha512-4ywWVfXZTBha+yuvWoa1HRg0vMpT2wZF3zuW0PDXkDzxqP4DkLljJk8mPpepyuPYlSThn+gHNC8kmnNBbGp3Tw==", "requires": { - "@vuepress/plugin-active-header-links": "^1.3.0", - "@vuepress/plugin-nprogress": "^1.3.0", - "@vuepress/plugin-search": "^1.3.0", + "@vuepress/plugin-active-header-links": "^1.4.0", + "@vuepress/plugin-nprogress": "^1.4.0", + "@vuepress/plugin-search": "^1.4.0", "docsearch.js": "^2.5.2", "lodash": "^4.17.15", "stylus": "^0.54.5", @@ -1219,160 +1384,159 @@ } }, "@webassemblyjs/ast": { - "version": "1.8.5", - "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.8.5.tgz", - "integrity": "sha512-aJMfngIZ65+t71C3y2nBBg5FFG0Okt9m0XEgWZ7Ywgn1oMAT8cNwx00Uv1cQyHtidq0Xn94R4TAywO+LCQ+ZAQ==", + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ast/-/ast-1.9.0.tgz", + "integrity": "sha512-C6wW5L+b7ogSDVqymbkkvuW9kruN//YisMED04xzeBBqjHa2FYnmvOlS6Xj68xWQRgWvI9cIglsjFowH/RJyEA==", "requires": { - "@webassemblyjs/helper-module-context": "1.8.5", - "@webassemblyjs/helper-wasm-bytecode": "1.8.5", - "@webassemblyjs/wast-parser": "1.8.5" + "@webassemblyjs/helper-module-context": "1.9.0", + "@webassemblyjs/helper-wasm-bytecode": "1.9.0", + "@webassemblyjs/wast-parser": "1.9.0" } }, "@webassemblyjs/floating-point-hex-parser": { - "version": "1.8.5", - "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.8.5.tgz", - "integrity": "sha512-9p+79WHru1oqBh9ewP9zW95E3XAo+90oth7S5Re3eQnECGq59ly1Ri5tsIipKGpiStHsUYmY3zMLqtk3gTcOtQ==" + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/@webassemblyjs/floating-point-hex-parser/-/floating-point-hex-parser-1.9.0.tgz", + "integrity": "sha512-TG5qcFsS8QB4g4MhrxK5TqfdNe7Ey/7YL/xN+36rRjl/BlGE/NcBvJcqsRgCP6Z92mRE+7N50pRIi8SmKUbcQA==" }, "@webassemblyjs/helper-api-error": { - "version": "1.8.5", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.8.5.tgz", - "integrity": "sha512-Za/tnzsvnqdaSPOUXHyKJ2XI7PDX64kWtURyGiJJZKVEdFOsdKUCPTNEVFZq3zJ2R0G5wc2PZ5gvdTRFgm81zA==" + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-api-error/-/helper-api-error-1.9.0.tgz", + "integrity": "sha512-NcMLjoFMXpsASZFxJ5h2HZRcEhDkvnNFOAKneP5RbKRzaWJN36NC4jqQHKwStIhGXu5mUWlUUk7ygdtrO8lbmw==" }, "@webassemblyjs/helper-buffer": { - "version": "1.8.5", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.8.5.tgz", - "integrity": "sha512-Ri2R8nOS0U6G49Q86goFIPNgjyl6+oE1abW1pS84BuhP1Qcr5JqMwRFT3Ah3ADDDYGEgGs1iyb1DGX+kAi/c/Q==" + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-buffer/-/helper-buffer-1.9.0.tgz", + "integrity": "sha512-qZol43oqhq6yBPx7YM3m9Bv7WMV9Eevj6kMi6InKOuZxhw+q9hOkvq5e/PpKSiLfyetpaBnogSbNCfBwyB00CA==" }, "@webassemblyjs/helper-code-frame": { - "version": "1.8.5", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-code-frame/-/helper-code-frame-1.8.5.tgz", - "integrity": "sha512-VQAadSubZIhNpH46IR3yWO4kZZjMxN1opDrzePLdVKAZ+DFjkGD/rf4v1jap744uPVU6yjL/smZbRIIJTOUnKQ==", + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-code-frame/-/helper-code-frame-1.9.0.tgz", + "integrity": "sha512-ERCYdJBkD9Vu4vtjUYe8LZruWuNIToYq/ME22igL+2vj2dQ2OOujIZr3MEFvfEaqKoVqpsFKAGsRdBSBjrIvZA==", "requires": { - "@webassemblyjs/wast-printer": "1.8.5" + "@webassemblyjs/wast-printer": "1.9.0" } }, "@webassemblyjs/helper-fsm": { - "version": "1.8.5", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-fsm/-/helper-fsm-1.8.5.tgz", - "integrity": "sha512-kRuX/saORcg8se/ft6Q2UbRpZwP4y7YrWsLXPbbmtepKr22i8Z4O3V5QE9DbZK908dh5Xya4Un57SDIKwB9eow==" + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-fsm/-/helper-fsm-1.9.0.tgz", + "integrity": "sha512-OPRowhGbshCb5PxJ8LocpdX9Kl0uB4XsAjl6jH/dWKlk/mzsANvhwbiULsaiqT5GZGT9qinTICdj6PLuM5gslw==" }, "@webassemblyjs/helper-module-context": { - "version": "1.8.5", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-module-context/-/helper-module-context-1.8.5.tgz", - "integrity": "sha512-/O1B236mN7UNEU4t9X7Pj38i4VoU8CcMHyy3l2cV/kIF4U5KoHXDVqcDuOs1ltkac90IM4vZdHc52t1x8Yfs3g==", + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-module-context/-/helper-module-context-1.9.0.tgz", + "integrity": "sha512-MJCW8iGC08tMk2enck1aPW+BE5Cw8/7ph/VGZxwyvGbJwjktKkDK7vy7gAmMDx88D7mhDTCNKAW5tED+gZ0W8g==", "requires": { - "@webassemblyjs/ast": "1.8.5", - "mamacro": "^0.0.3" + "@webassemblyjs/ast": "1.9.0" } }, "@webassemblyjs/helper-wasm-bytecode": { - "version": "1.8.5", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.8.5.tgz", - "integrity": "sha512-Cu4YMYG3Ddl72CbmpjU/wbP6SACcOPVbHN1dI4VJNJVgFwaKf1ppeFJrwydOG3NDHxVGuCfPlLZNyEdIYlQ6QQ==" + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-bytecode/-/helper-wasm-bytecode-1.9.0.tgz", + "integrity": "sha512-R7FStIzyNcd7xKxCZH5lE0Bqy+hGTwS3LJjuv1ZVxd9O7eHCedSdrId/hMOd20I+v8wDXEn+bjfKDLzTepoaUw==" }, "@webassemblyjs/helper-wasm-section": { - "version": "1.8.5", - "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.8.5.tgz", - "integrity": "sha512-VV083zwR+VTrIWWtgIUpqfvVdK4ff38loRmrdDBgBT8ADXYsEZ5mPQ4Nde90N3UYatHdYoDIFb7oHzMncI02tA==", + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/@webassemblyjs/helper-wasm-section/-/helper-wasm-section-1.9.0.tgz", + "integrity": "sha512-XnMB8l3ek4tvrKUUku+IVaXNHz2YsJyOOmz+MMkZvh8h1uSJpSen6vYnw3IoQ7WwEuAhL8Efjms1ZWjqh2agvw==", "requires": { - "@webassemblyjs/ast": "1.8.5", - "@webassemblyjs/helper-buffer": "1.8.5", - "@webassemblyjs/helper-wasm-bytecode": "1.8.5", - "@webassemblyjs/wasm-gen": "1.8.5" + "@webassemblyjs/ast": "1.9.0", + "@webassemblyjs/helper-buffer": "1.9.0", + "@webassemblyjs/helper-wasm-bytecode": "1.9.0", + "@webassemblyjs/wasm-gen": "1.9.0" } }, "@webassemblyjs/ieee754": { - "version": "1.8.5", - "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.8.5.tgz", - "integrity": "sha512-aaCvQYrvKbY/n6wKHb/ylAJr27GglahUO89CcGXMItrOBqRarUMxWLJgxm9PJNuKULwN5n1csT9bYoMeZOGF3g==", + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/@webassemblyjs/ieee754/-/ieee754-1.9.0.tgz", + "integrity": "sha512-dcX8JuYU/gvymzIHc9DgxTzUUTLexWwt8uCTWP3otys596io0L5aW02Gb1RjYpx2+0Jus1h4ZFqjla7umFniTg==", "requires": { "@xtuc/ieee754": "^1.2.0" } }, "@webassemblyjs/leb128": { - "version": "1.8.5", - "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.8.5.tgz", - "integrity": "sha512-plYUuUwleLIziknvlP8VpTgO4kqNaH57Y3JnNa6DLpu/sGcP6hbVdfdX5aHAV716pQBKrfuU26BJK29qY37J7A==", + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/@webassemblyjs/leb128/-/leb128-1.9.0.tgz", + "integrity": "sha512-ENVzM5VwV1ojs9jam6vPys97B/S65YQtv/aanqnU7D8aSoHFX8GyhGg0CMfyKNIHBuAVjy3tlzd5QMMINa7wpw==", "requires": { "@xtuc/long": "4.2.2" } }, "@webassemblyjs/utf8": { - "version": "1.8.5", - "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.8.5.tgz", - "integrity": "sha512-U7zgftmQriw37tfD934UNInokz6yTmn29inT2cAetAsaU9YeVCveWEwhKL1Mg4yS7q//NGdzy79nlXh3bT8Kjw==" + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/@webassemblyjs/utf8/-/utf8-1.9.0.tgz", + "integrity": "sha512-GZbQlWtopBTP0u7cHrEx+73yZKrQoBMpwkGEIqlacljhXCkVM1kMQge/Mf+csMJAjEdSwhOyLAS0AoR3AG5P8w==" }, "@webassemblyjs/wasm-edit": { - "version": "1.8.5", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.8.5.tgz", - "integrity": "sha512-A41EMy8MWw5yvqj7MQzkDjU29K7UJq1VrX2vWLzfpRHt3ISftOXqrtojn7nlPsZ9Ijhp5NwuODuycSvfAO/26Q==", + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-edit/-/wasm-edit-1.9.0.tgz", + "integrity": "sha512-FgHzBm80uwz5M8WKnMTn6j/sVbqilPdQXTWraSjBwFXSYGirpkSWE2R9Qvz9tNiTKQvoKILpCuTjBKzOIm0nxw==", "requires": { - "@webassemblyjs/ast": "1.8.5", - "@webassemblyjs/helper-buffer": "1.8.5", - "@webassemblyjs/helper-wasm-bytecode": "1.8.5", - "@webassemblyjs/helper-wasm-section": "1.8.5", - "@webassemblyjs/wasm-gen": "1.8.5", - "@webassemblyjs/wasm-opt": "1.8.5", - "@webassemblyjs/wasm-parser": "1.8.5", - "@webassemblyjs/wast-printer": "1.8.5" + "@webassemblyjs/ast": "1.9.0", + "@webassemblyjs/helper-buffer": "1.9.0", + "@webassemblyjs/helper-wasm-bytecode": "1.9.0", + "@webassemblyjs/helper-wasm-section": "1.9.0", + "@webassemblyjs/wasm-gen": "1.9.0", + "@webassemblyjs/wasm-opt": "1.9.0", + "@webassemblyjs/wasm-parser": "1.9.0", + "@webassemblyjs/wast-printer": "1.9.0" } }, "@webassemblyjs/wasm-gen": { - "version": "1.8.5", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.8.5.tgz", - "integrity": "sha512-BCZBT0LURC0CXDzj5FXSc2FPTsxwp3nWcqXQdOZE4U7h7i8FqtFK5Egia6f9raQLpEKT1VL7zr4r3+QX6zArWg==", + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-gen/-/wasm-gen-1.9.0.tgz", + "integrity": "sha512-cPE3o44YzOOHvlsb4+E9qSqjc9Qf9Na1OO/BHFy4OI91XDE14MjFN4lTMezzaIWdPqHnsTodGGNP+iRSYfGkjA==", "requires": { - "@webassemblyjs/ast": "1.8.5", - "@webassemblyjs/helper-wasm-bytecode": "1.8.5", - "@webassemblyjs/ieee754": "1.8.5", - "@webassemblyjs/leb128": "1.8.5", - "@webassemblyjs/utf8": "1.8.5" + "@webassemblyjs/ast": "1.9.0", + "@webassemblyjs/helper-wasm-bytecode": "1.9.0", + "@webassemblyjs/ieee754": "1.9.0", + "@webassemblyjs/leb128": "1.9.0", + "@webassemblyjs/utf8": "1.9.0" } }, "@webassemblyjs/wasm-opt": { - "version": "1.8.5", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.8.5.tgz", - "integrity": "sha512-HKo2mO/Uh9A6ojzu7cjslGaHaUU14LdLbGEKqTR7PBKwT6LdPtLLh9fPY33rmr5wcOMrsWDbbdCHq4hQUdd37Q==", + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-opt/-/wasm-opt-1.9.0.tgz", + "integrity": "sha512-Qkjgm6Anhm+OMbIL0iokO7meajkzQD71ioelnfPEj6r4eOFuqm4YC3VBPqXjFyyNwowzbMD+hizmprP/Fwkl2A==", "requires": { - "@webassemblyjs/ast": "1.8.5", - "@webassemblyjs/helper-buffer": "1.8.5", - "@webassemblyjs/wasm-gen": "1.8.5", - "@webassemblyjs/wasm-parser": "1.8.5" + "@webassemblyjs/ast": "1.9.0", + "@webassemblyjs/helper-buffer": "1.9.0", + "@webassemblyjs/wasm-gen": "1.9.0", + "@webassemblyjs/wasm-parser": "1.9.0" } }, "@webassemblyjs/wasm-parser": { - "version": "1.8.5", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.8.5.tgz", - "integrity": "sha512-pi0SYE9T6tfcMkthwcgCpL0cM9nRYr6/6fjgDtL6q/ZqKHdMWvxitRi5JcZ7RI4SNJJYnYNaWy5UUrHQy998lw==", + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wasm-parser/-/wasm-parser-1.9.0.tgz", + "integrity": "sha512-9+wkMowR2AmdSWQzsPEjFU7njh8HTO5MqO8vjwEHuM+AMHioNqSBONRdr0NQQ3dVQrzp0s8lTcYqzUdb7YgELA==", "requires": { - "@webassemblyjs/ast": "1.8.5", - "@webassemblyjs/helper-api-error": "1.8.5", - "@webassemblyjs/helper-wasm-bytecode": "1.8.5", - "@webassemblyjs/ieee754": "1.8.5", - "@webassemblyjs/leb128": "1.8.5", - "@webassemblyjs/utf8": "1.8.5" + "@webassemblyjs/ast": "1.9.0", + "@webassemblyjs/helper-api-error": "1.9.0", + "@webassemblyjs/helper-wasm-bytecode": "1.9.0", + "@webassemblyjs/ieee754": "1.9.0", + "@webassemblyjs/leb128": "1.9.0", + "@webassemblyjs/utf8": "1.9.0" } }, "@webassemblyjs/wast-parser": { - "version": "1.8.5", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-parser/-/wast-parser-1.8.5.tgz", - "integrity": "sha512-daXC1FyKWHF1i11obK086QRlsMsY4+tIOKgBqI1lxAnkp9xe9YMcgOxm9kLe+ttjs5aWV2KKE1TWJCN57/Btsg==", - "requires": { - "@webassemblyjs/ast": "1.8.5", - "@webassemblyjs/floating-point-hex-parser": "1.8.5", - "@webassemblyjs/helper-api-error": "1.8.5", - "@webassemblyjs/helper-code-frame": "1.8.5", - "@webassemblyjs/helper-fsm": "1.8.5", + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-parser/-/wast-parser-1.9.0.tgz", + "integrity": "sha512-qsqSAP3QQ3LyZjNC/0jBJ/ToSxfYJ8kYyuiGvtn/8MK89VrNEfwj7BPQzJVHi0jGTRK2dGdJ5PRqhtjzoww+bw==", + "requires": { + "@webassemblyjs/ast": "1.9.0", + "@webassemblyjs/floating-point-hex-parser": "1.9.0", + "@webassemblyjs/helper-api-error": "1.9.0", + "@webassemblyjs/helper-code-frame": "1.9.0", + "@webassemblyjs/helper-fsm": "1.9.0", "@xtuc/long": "4.2.2" } }, "@webassemblyjs/wast-printer": { - "version": "1.8.5", - "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.8.5.tgz", - "integrity": "sha512-w0U0pD4EhlnvRyeJzBqaVSJAo9w/ce7/WPogeXLzGkO6hzhr4GnQIZ4W4uUt5b9ooAaXPtnXlj0gzsXEOUNYMg==", + "version": "1.9.0", + "resolved": "https://registry.npmjs.org/@webassemblyjs/wast-printer/-/wast-printer-1.9.0.tgz", + "integrity": "sha512-2J0nE95rHXHyQ24cWjMKJ1tqB/ds8z/cyeOZxJhcb+rW+SQASVjuznUSmdz5GpVJTzU8JkhYut0D3siFDD6wsA==", "requires": { - "@webassemblyjs/ast": "1.8.5", - "@webassemblyjs/wast-parser": "1.8.5", + "@webassemblyjs/ast": "1.9.0", + "@webassemblyjs/wast-parser": "1.9.0", "@xtuc/long": "4.2.2" } }, @@ -1426,9 +1590,9 @@ "integrity": "sha1-xdG9SxKQCPEWPyNvhuX66iAm4u8=" }, "ajv": { - "version": "6.11.0", - "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.11.0.tgz", - "integrity": "sha512-nCprB/0syFYy9fVYU1ox1l2KN8S9I+tziH8D4zdZuLT3N6RMlGSGt5FSTpAiHB/Whv8Qs1cWHma1aMKZyaHRKA==", + "version": "6.12.0", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.0.tgz", + "integrity": "sha512-D6gFiFA0RRLyUbvijN74DWAjXSFxWKaWP7mldxkVhyhAV3+SWA9HEJPHQ2c9soIeTFJqcSdFDGFgdqs1iUU2Hw==", "requires": { "fast-deep-equal": "^3.1.1", "fast-json-stable-stringify": "^2.0.0", @@ -1557,11 +1721,11 @@ "integrity": "sha512-hHUXGagefjN2iRrID63xckIvotOXOojhQKWIPUZ4mNUZ9nLZW+7FMNoE1lOkEhNWYsx/7ysGIuJYCiMAA9FnrA==" }, "ansi-escapes": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.0.tgz", - "integrity": "sha512-EiYhwo0v255HUL6eDyuLrXEkTi7WwVCLAw+SeOQ7M7qdun1z1pum4DEm/nuqIVbPvi9RPPc9k9LbyBv6H0DwVg==", + "version": "4.3.1", + "resolved": "https://registry.npmjs.org/ansi-escapes/-/ansi-escapes-4.3.1.tgz", + "integrity": "sha512-JWF7ocqNrp8u9oqpgV+wH5ftbt+cfvv+PTjOvKLT3AdYly/LmORARfEVT1iyjwN+4MqE5UmVKoAdIBqeoCHgLA==", "requires": { - "type-fest": "^0.8.1" + "type-fest": "^0.11.0" } }, "ansi-html": { @@ -1746,17 +1910,17 @@ } }, "autoprefixer": { - "version": "9.7.4", - "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-9.7.4.tgz", - "integrity": "sha512-g0Ya30YrMBAEZk60lp+qfX5YQllG+S5W3GYCFvyHTvhOki0AEQJLPEcIuGRsqVwLi8FvXPVtwTGhfr38hVpm0g==", + "version": "9.7.6", + "resolved": "https://registry.npmjs.org/autoprefixer/-/autoprefixer-9.7.6.tgz", + "integrity": "sha512-F7cYpbN7uVVhACZTeeIeealwdGM6wMtfWARVLTy5xmKtgVdBNJvbDRoCK3YO1orcs7gv/KwYlb3iXwu9Ug9BkQ==", "requires": { - "browserslist": "^4.8.3", - "caniuse-lite": "^1.0.30001020", + "browserslist": "^4.11.1", + "caniuse-lite": "^1.0.30001039", "chalk": "^2.4.2", "normalize-range": "^0.1.2", "num2fraction": "^1.2.2", - "postcss": "^7.0.26", - "postcss-value-parser": "^4.0.2" + "postcss": "^7.0.27", + "postcss-value-parser": "^4.0.3" } }, "aws-sign2": { @@ -1778,14 +1942,15 @@ } }, "babel-loader": { - "version": "8.0.6", - "resolved": "https://registry.npmjs.org/babel-loader/-/babel-loader-8.0.6.tgz", - "integrity": "sha512-4BmWKtBOBm13uoUwd08UwjZlaw3O9GWf456R9j+5YykFZ6LUIjIKLc0zEZf+hauxPOJs96C8k6FvYD09vWzhYw==", + "version": "8.1.0", + "resolved": "https://registry.npmjs.org/babel-loader/-/babel-loader-8.1.0.tgz", + "integrity": "sha512-7q7nC1tYOrqvUrN3LQK4GwSk/TQorZSOlO9C+RZDZpODgyN4ZlCqE5q9cDsyWOliN+aU9B4JX01xK9eJXowJLw==", "requires": { - "find-cache-dir": "^2.0.0", - "loader-utils": "^1.0.2", - "mkdirp": "^0.5.1", - "pify": "^4.0.1" + "find-cache-dir": "^2.1.0", + "loader-utils": "^1.4.0", + "mkdirp": "^0.5.3", + "pify": "^4.0.1", + "schema-utils": "^2.6.5" } }, "babel-plugin-dynamic-import-node": { @@ -1796,18 +1961,6 @@ "object.assign": "^4.1.0" } }, - "babel-plugin-module-resolver": { - "version": "3.2.0", - "resolved": "https://registry.npmjs.org/babel-plugin-module-resolver/-/babel-plugin-module-resolver-3.2.0.tgz", - "integrity": "sha512-tjR0GvSndzPew/Iayf4uICWZqjBwnlMWjSx6brryfQ81F9rxBVqwDJtFCV8oOs0+vJeefK9TmdZtkIFdFe1UnA==", - "requires": { - "find-babel-config": "^1.1.0", - "glob": "^7.1.2", - "pkg-up": "^2.0.0", - "reselect": "^3.0.1", - "resolve": "^1.4.0" - } - }, "babel-runtime": { "version": "6.26.0", "resolved": "https://registry.npmjs.org/babel-runtime/-/babel-runtime-6.26.0.tgz", @@ -2081,6 +2234,11 @@ "requires": { "has-flag": "^4.0.0" } + }, + "type-fest": { + "version": "0.8.1", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", + "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==" } } }, @@ -2181,13 +2339,14 @@ } }, "browserslist": { - "version": "4.8.6", - "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.8.6.tgz", - "integrity": "sha512-ZHao85gf0eZ0ESxLfCp73GG9O/VTytYDIkIiZDlURppLTI9wErSM/5yAKEq6rcUdxBLjMELmrYUJGg5sxGKMHg==", + "version": "4.11.1", + "resolved": "https://registry.npmjs.org/browserslist/-/browserslist-4.11.1.tgz", + "integrity": "sha512-DCTr3kDrKEYNw6Jb9HFxVLQNaue8z+0ZfRBRjmCunKDEXEBajKDj2Y+Uelg+Pi29OnvaSGwjOsnRyNEkXzHg5g==", "requires": { - "caniuse-lite": "^1.0.30001023", - "electron-to-chromium": "^1.3.341", - "node-releases": "^1.1.47" + "caniuse-lite": "^1.0.30001038", + "electron-to-chromium": "^1.3.390", + "node-releases": "^1.1.53", + "pkg-up": "^2.0.0" } }, "buffer": { @@ -2231,14 +2390,14 @@ "integrity": "sha1-0ygVQE1olpn4Wk6k+odV3ROpYEg=" }, "cac": { - "version": "6.5.6", - "resolved": "https://registry.npmjs.org/cac/-/cac-6.5.6.tgz", - "integrity": "sha512-8jsGLeBiYEVYTDExaj/rDPG4tyra4yjjacIL10TQ+MobPcg9/IST+dkKLu6sOzq0GcIC6fQqX1nkH9HoskQLAw==" + "version": "6.5.8", + "resolved": "https://registry.npmjs.org/cac/-/cac-6.5.8.tgz", + "integrity": "sha512-jLv2+ps4T2HRVR1k4UlQZoAFvliAhf5LVR0yjPjIaIr/Cw99p/I7CXIEkXtw5q+AkYk4NCFJcF5ErmELSyrZnw==" }, "cacache": { - "version": "12.0.3", - "resolved": "https://registry.npmjs.org/cacache/-/cacache-12.0.3.tgz", - "integrity": "sha512-kqdmfXEGFepesTuROHMs3MpFLWrPkSSpRqOw80RCflZXy/khxaArvFrQ7uJxSUduzAufc6G0g1VUCOZXxWavPw==", + "version": "12.0.4", + "resolved": "https://registry.npmjs.org/cacache/-/cacache-12.0.4.tgz", + "integrity": "sha512-a0tMB40oefvuInr4Cwb3GerbL9xTj1D5yg0T5xrjGCGyfvbxseIXX7BAO/u/hIXdafzOI5JC3wDwHyf24buOAQ==", "requires": { "bluebird": "^3.5.5", "chownr": "^1.1.1", @@ -2284,6 +2443,18 @@ "mkdirp": "^0.5.1", "neo-async": "^2.6.1", "schema-utils": "^1.0.0" + }, + "dependencies": { + "schema-utils": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", + "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", + "requires": { + "ajv": "^6.1.0", + "ajv-errors": "^1.0.0", + "ajv-keywords": "^3.1.0" + } + } } }, "cacheable-request": { @@ -2372,9 +2543,9 @@ } }, "caniuse-lite": { - "version": "1.0.30001027", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001027.tgz", - "integrity": "sha512-7xvKeErvXZFtUItTHgNtLgS9RJpVnwBlWX8jSo/BO8VsF6deszemZSkJJJA1KOKrXuzZH4WALpAJdq5EyfgMLg==" + "version": "1.0.30001039", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001039.tgz", + "integrity": "sha512-SezbWCTT34eyFoWHgx8UWso7YtvtM7oosmFoXbCkdC6qJzRfBTeTgE9REtKtiuKXuMwWTZEvdnFNGAyVMorv8Q==" }, "caseless": { "version": "0.12.0", @@ -2448,9 +2619,9 @@ } }, "chownr": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.3.tgz", - "integrity": "sha512-i70fVHhmV3DtTl6nqvZOnIjbY0Pe4kAUjwHj8z0zAdgBtYrJyYwLKCCuRBQ5ppkyL0AkN7HKRnETdmdp1zqNXw==" + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz", + "integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==" }, "chrome-trace-event": { "version": "1.0.2", @@ -2509,9 +2680,9 @@ "integrity": "sha512-gpaBrMAizVEANOpfZp/EEUixTXDyGt7DFzdK5hU+UbWt/J0lB0w20ncZj59Z9a93xHb9u12zF5BS6i9RKbtg4w==" }, "clipboard": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/clipboard/-/clipboard-2.0.4.tgz", - "integrity": "sha512-Vw26VSLRpJfBofiVaFb/I8PVfdI1OxKcYShe6fm0sP/DtmiWQNCjhM/okTvdCo0G+lMMm1rMYbk4IK4x1X+kgQ==", + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/clipboard/-/clipboard-2.0.6.tgz", + "integrity": "sha512-g5zbiixBRk/wyKakSwCKd7vQXDjFnAMGHoEyBogG/bw9kTD9GvdAvaoRR1ALcEzt3pVKxZR0pViekPMIS0QyGg==", "optional": true, "requires": { "good-listener": "^1.2.2", @@ -2703,11 +2874,11 @@ } }, "configstore": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/configstore/-/configstore-5.0.0.tgz", - "integrity": "sha512-eE/hvMs7qw7DlcB5JPRnthmrITuHMmACUJAp89v6PT6iOqzoLS7HRWhBtuHMlhNHo2AhUSA/3Dh1bKNJHcublQ==", + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/configstore/-/configstore-5.0.1.tgz", + "integrity": "sha512-aMKprgk5YhBNyH25hj8wGt2+D52Sw1DRRIzqBwLp2Ya9mFmY8KPvvtvmna8SxVR9JMZ4kzMD68N22vlaRpkeFA==", "requires": { - "dot-prop": "^5.1.0", + "dot-prop": "^5.2.0", "graceful-fs": "^4.1.2", "make-dir": "^3.0.0", "unique-string": "^2.0.0", @@ -2715,23 +2886,10 @@ "xdg-basedir": "^4.0.0" }, "dependencies": { - "dot-prop": { - "version": "5.2.0", - "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-5.2.0.tgz", - "integrity": "sha512-uEUyaDKoSQ1M4Oq8l45hSE26SnTxL6snNnqvK/VWx5wJhmff5z0FUVJDKDanor/6w3kzE3i7XZOk+7wC0EXr1A==", - "requires": { - "is-obj": "^2.0.0" - } - }, - "is-obj": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-2.0.0.tgz", - "integrity": "sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w==" - }, "make-dir": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.0.0.tgz", - "integrity": "sha512-grNJDhb8b1Jm1qeqW5R/O63wUo4UXo2v2HMic6YT9i/HBlF93S8jkMgH7yugvY9ABDShH4VZMn8I+U8+fCNegw==", + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/make-dir/-/make-dir-3.0.2.tgz", + "integrity": "sha512-rYKABKutXa6vXTXhoV18cBE7PaewPXHe/Bdq4v+ZLMhxbWApkFFplT0LcbMW+6BbjnQXzZ/sAvSE/JdguApG5w==", "requires": { "semver": "^6.0.0" } @@ -2878,9 +3036,9 @@ "integrity": "sha512-Pgs951kaMm5GXP7MOvxERINe3gsaVjUWFm+UZPSq9xYriQAksyhg0csnS0KXSNRD5NmNdapXEpjxG49+AKh/ug==" }, "p-limit": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.2.2.tgz", - "integrity": "sha512-WGR+xHecKTr7EbUEhyLSh5Dube9JtdiG78ufaeLxTgpudf/20KqyMioIUZJAezlTIi6evxuoUs9YXc11cU+yzQ==", + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", "requires": { "p-try": "^2.0.0" } @@ -2895,6 +3053,16 @@ "resolved": "https://registry.npmjs.org/pify/-/pify-3.0.0.tgz", "integrity": "sha1-5aSs0sEB/fPZpNB/DbxNtJ3SgXY=" }, + "schema-utils": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", + "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", + "requires": { + "ajv": "^6.1.0", + "ajv-errors": "^1.0.0", + "ajv-keywords": "^3.1.0" + } + }, "slash": { "version": "1.0.0", "resolved": "https://registry.npmjs.org/slash/-/slash-1.0.0.tgz", @@ -2907,6 +3075,22 @@ "resolved": "https://registry.npmjs.org/core-js/-/core-js-2.6.11.tgz", "integrity": "sha512-5wjnpaT/3dV+XB4borEsnAYQchn00XSgTAWKDkEqv+K8KevjbzmofK6hfJ9TZIlpj2N0xQpazy7PiRQiWHqzWg==" }, + "core-js-compat": { + "version": "3.6.4", + "resolved": "https://registry.npmjs.org/core-js-compat/-/core-js-compat-3.6.4.tgz", + "integrity": "sha512-zAa3IZPvsJ0slViBQ2z+vgyyTuhd3MFn1rBQjZSKVEgB0UMYhUkCj9jJUVPgGTGqWvsBVmfnruXgTcNyTlEiSA==", + "requires": { + "browserslist": "^4.8.3", + "semver": "7.0.0" + }, + "dependencies": { + "semver": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.0.0.tgz", + "integrity": "sha512-+GB6zVA9LWh6zovYQLALHwv5rb2PHGlJi3lfiqIHxR0uuwCgefcOJc59v9fv1w8GbStwxuuqqAjI9NMAOOgq1A==" + } + } + }, "core-util-is": { "version": "1.0.2", "resolved": "https://registry.npmjs.org/core-util-is/-/core-util-is-1.0.2.tgz", @@ -3051,6 +3235,16 @@ "version": "3.3.1", "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" + }, + "schema-utils": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", + "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", + "requires": { + "ajv": "^6.1.0", + "ajv-errors": "^1.0.0", + "ajv-keywords": "^3.1.0" + } } } }, @@ -3087,11 +3281,6 @@ "source-map": "^0.6.1" } }, - "css-unit-converter": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/css-unit-converter/-/css-unit-converter-1.1.1.tgz", - "integrity": "sha1-2bkoGtz9jO2TW9urqDeGiX9k6ZY=" - }, "css-what": { "version": "2.1.3", "resolved": "https://registry.npmjs.org/css-what/-/css-what-2.1.3.tgz", @@ -3174,11 +3363,27 @@ "integrity": "sha512-WcKx5OY+KoSIAxBW6UBBRay1U6vkYheCdjyVNDm85zt5K9mHoGOfsOsqIszfAqrQQFIIKgjh2+FDgIj/zsl21Q==" }, "csso": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/csso/-/csso-4.0.2.tgz", - "integrity": "sha512-kS7/oeNVXkHWxby5tHVxlhjizRCSv8QdU7hB2FpdAibDU8FjTAolhNjKNTiLzXtUrKT6HwClE81yXwEk1309wg==", + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/csso/-/csso-4.0.3.tgz", + "integrity": "sha512-NL3spysxUkcrOgnpsT4Xdl2aiEiBG6bXswAABQVHcMrfjjBisFOKwLDOmf4wf32aPdcJws1zds2B0Rg+jqMyHQ==", "requires": { - "css-tree": "1.0.0-alpha.37" + "css-tree": "1.0.0-alpha.39" + }, + "dependencies": { + "css-tree": { + "version": "1.0.0-alpha.39", + "resolved": "https://registry.npmjs.org/css-tree/-/css-tree-1.0.0-alpha.39.tgz", + "integrity": "sha512-7UvkEYgBAHRG9Nt980lYxjsTrCyHFN53ky3wVsDkiMdVqylqRt+Zc+jm5qw7/qyOvN2dHSYtX0e4MbCCExSvnA==", + "requires": { + "mdn-data": "2.0.6", + "source-map": "^0.6.1" + } + }, + "mdn-data": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/mdn-data/-/mdn-data-2.0.6.tgz", + "integrity": "sha512-rQvjv71olwNHgiTbfPZFkJtjNMciWgswYeciZhtvWLO8bmX3TnhyA62I6sTWOyZssWHJJjY6/KiWwqQsWWsqOA==" + } } }, "cyclist": { @@ -3466,9 +3671,9 @@ } }, "dom-walk": { - "version": "0.1.1", - "resolved": "https://registry.npmjs.org/dom-walk/-/dom-walk-0.1.1.tgz", - "integrity": "sha1-ZyIm3HTI95mtNTB9+TaroRrNYBg=" + "version": "0.1.2", + "resolved": "https://registry.npmjs.org/dom-walk/-/dom-walk-0.1.2.tgz", + "integrity": "sha512-6QvTW9mrGeIegrFXdtQi9pk7O/nSK6lSdXW2eqUspN5LWD7UTji2Fqw5V2YLjBpHEoU9Xl/eUWNpDeZvoyOv2w==" }, "domain-browser": { "version": "1.2.0", @@ -3498,11 +3703,11 @@ } }, "dot-prop": { - "version": "4.2.0", - "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-4.2.0.tgz", - "integrity": "sha512-tUMXrxlExSW6U2EXiiKGSBVdYgtV8qlHL+C10TsW4PURY/ic+eaysnSkwB4kA/mBlCyy/IKDJ+Lc3wbWeaXtuQ==", + "version": "5.2.0", + "resolved": "https://registry.npmjs.org/dot-prop/-/dot-prop-5.2.0.tgz", + "integrity": "sha512-uEUyaDKoSQ1M4Oq8l45hSE26SnTxL6snNnqvK/VWx5wJhmff5z0FUVJDKDanor/6w3kzE3i7XZOk+7wC0EXr1A==", "requires": { - "is-obj": "^1.0.0" + "is-obj": "^2.0.0" } }, "duplexer3": { @@ -3565,9 +3770,9 @@ "integrity": "sha1-WQxhFWsK4vTwJVcyoViyZrxWsh0=" }, "electron-to-chromium": { - "version": "1.3.346", - "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.3.346.tgz", - "integrity": "sha512-Yy4jF5hJd57BWmGPt0KjaXc25AmWZeQK75kdr4zIzksWVtiT6DwaNtvTb9dt+LkQKwUpvBfCyyPsXXtbY/5GYw==" + "version": "1.3.398", + "resolved": "https://registry.npmjs.org/electron-to-chromium/-/electron-to-chromium-1.3.398.tgz", + "integrity": "sha512-BJjxuWLKFbM5axH3vES7HKMQgAknq9PZHBkMK/rEXUQG9i1Iw5R+6hGkm6GtsQSANjSUrh/a6m32nzCNDNo/+w==" }, "elliptic": { "version": "6.5.2", @@ -3589,9 +3794,9 @@ "integrity": "sha512-CwBLREIQ7LvYFB0WyRvwhq5N5qPhc6PMjD6bYggFlI5YyDgl+0vxq5VHbMOFqLg7hfWzmu8T5Z1QofhmTIhItA==" }, "emojis-list": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-2.1.0.tgz", - "integrity": "sha1-TapNnbAPmBmIDHn6RXrlsJof04k=" + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-3.0.0.tgz", + "integrity": "sha512-/kyM18EfinwXZbno9FyUGeFh87KC8HRQBQGildHZbEuRyWFOmv1U10o9BBp8XVZDVNNuQKyIGIu5ZYAAXJ0V2Q==" }, "encodeurl": { "version": "1.0.2", @@ -3690,9 +3895,9 @@ } }, "es-abstract": { - "version": "1.17.4", - "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.17.4.tgz", - "integrity": "sha512-Ae3um/gb8F0mui/jPL+QiqmglkUsaQf7FwBEHYIFkztkneosu9imhqHpBzQ3h1vit8t5iQ74t6PEVvphBZiuiQ==", + "version": "1.17.5", + "resolved": "https://registry.npmjs.org/es-abstract/-/es-abstract-1.17.5.tgz", + "integrity": "sha512-BR9auzDbySxOcfog0tLECW8l28eRGpDpU3Dm3Hp4q/N+VtLTmyj4EUN088XZWQDW/hzj6sYRDXeOFsaAODKvpg==", "requires": { "es-to-primitive": "^1.2.1", "function-bind": "^1.1.1", @@ -3722,6 +3927,11 @@ "resolved": "https://registry.npmjs.org/es6-promise/-/es6-promise-4.2.8.tgz", "integrity": "sha512-HJDGx5daxeIvxdBxvG2cb9g4tEvwIk3i8+nhX0yGrYmZUzbkdg8QbDevheDB8gd0//uPj4c1EQua8Q+MViT0/w==" }, + "escape-goat": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/escape-goat/-/escape-goat-2.1.1.tgz", + "integrity": "sha512-8/uIhbG12Csjy2JEW7D9pHbreaVaS/OpN3ycnyvElTdwM5n6GY6W6e2IPemfvGZeUMqZ9A/3GqIZMgKnBhAw/Q==" + }, "escape-html": { "version": "1.0.3", "resolved": "https://registry.npmjs.org/escape-html/-/escape-html-1.0.3.tgz", @@ -4000,14 +4210,14 @@ } }, "figgy-pudding": { - "version": "3.5.1", - "resolved": "https://registry.npmjs.org/figgy-pudding/-/figgy-pudding-3.5.1.tgz", - "integrity": "sha512-vNKxJHTEKNThjfrdJwHc7brvM6eVevuO5nTj6ez8ZQ1qbXTvGthucRF7S4vf2cr71QVnT70V34v0S1DyQsti0w==" + "version": "3.5.2", + "resolved": "https://registry.npmjs.org/figgy-pudding/-/figgy-pudding-3.5.2.tgz", + "integrity": "sha512-0btnI/H8f2pavGMN8w40mlSKOfTK2SVJmBfBeVIj3kNw0swwgzyRq0d5TJVOwodFmtvpPeWPN/MCcfuWF0Ezbw==" }, "figures": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/figures/-/figures-3.1.0.tgz", - "integrity": "sha512-ravh8VRXqHuMvZt/d8GblBeqDMkdJMBdv/2KntFH+ra5MXkO7nxNKpzQ3n6QD/2da1kH0aWmNISdvhM7gl2gVg==", + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/figures/-/figures-3.2.0.tgz", + "integrity": "sha512-yaduQFRKLXYOGgEn6AZau90j3ggSOyiqXU0F9JZfeXYhNa+Jk4X+s45A2zg5jns87GAFa34BBm2kXw4XpNcbdg==", "requires": { "escape-string-regexp": "^1.0.5" } @@ -4019,6 +4229,18 @@ "requires": { "loader-utils": "^1.0.2", "schema-utils": "^1.0.0" + }, + "dependencies": { + "schema-utils": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", + "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", + "requires": { + "ajv": "^6.1.0", + "ajv-errors": "^1.0.0", + "ajv-keywords": "^3.1.0" + } + } } }, "file-uri-to-path": { @@ -4062,22 +4284,6 @@ } } }, - "find-babel-config": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/find-babel-config/-/find-babel-config-1.2.0.tgz", - "integrity": "sha512-jB2CHJeqy6a820ssiqwrKMeyC6nNdmrcgkKWJWmpoxpE8RKciYJXCcXRq1h2AzCo5I5BJeN2tkGEO3hLTuePRA==", - "requires": { - "json5": "^0.5.1", - "path-exists": "^3.0.0" - }, - "dependencies": { - "json5": { - "version": "0.5.1", - "resolved": "https://registry.npmjs.org/json5/-/json5-0.5.1.tgz", - "integrity": "sha1-Hq3nrMASA0rYTiOWdn6tn6VJWCE=" - } - } - }, "find-cache-dir": { "version": "2.1.0", "resolved": "https://registry.npmjs.org/find-cache-dir/-/find-cache-dir-2.1.0.tgz", @@ -4279,9 +4485,9 @@ "integrity": "sha1-FQStJSMVjKpA20onh8sBQRmU6k8=" }, "fsevents": { - "version": "1.2.11", - "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-1.2.11.tgz", - "integrity": "sha512-+ux3lx6peh0BpvY0JebGyZoiR4D+oYzdPZMKJwkZ+sFkNJzpL7tXc/wehS49gUAxg3tmMHPHZkA8JU2rhhgDHw==", + "version": "1.2.12", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-1.2.12.tgz", + "integrity": "sha512-Ggd/Ktt7E7I8pxZRbGIs7vwqAPscSESMrCSkx2FtWeqmheJgCo2R74fTsZFCifr0VTPwqRpPv17+6b8Zp7th0Q==", "optional": true, "requires": { "bindings": "^1.5.0", @@ -4328,7 +4534,7 @@ } }, "chownr": { - "version": "1.1.3", + "version": "1.1.4", "bundled": true, "optional": true }, @@ -4478,7 +4684,7 @@ } }, "minimist": { - "version": "0.0.8", + "version": "1.2.5", "bundled": true, "optional": true }, @@ -4500,11 +4706,11 @@ } }, "mkdirp": { - "version": "0.5.1", + "version": "0.5.3", "bundled": true, "optional": true, "requires": { - "minimist": "0.0.8" + "minimist": "^1.2.5" } }, "ms": { @@ -4513,7 +4719,7 @@ "optional": true }, "needle": { - "version": "2.4.0", + "version": "2.3.3", "bundled": true, "optional": true, "requires": { @@ -4540,7 +4746,7 @@ } }, "nopt": { - "version": "4.0.1", + "version": "4.0.3", "bundled": true, "optional": true, "requires": { @@ -4562,12 +4768,13 @@ "optional": true }, "npm-packlist": { - "version": "1.4.7", + "version": "1.4.8", "bundled": true, "optional": true, "requires": { "ignore-walk": "^3.0.1", - "npm-bundled": "^1.0.1" + "npm-bundled": "^1.0.1", + "npm-normalize-package-bin": "^1.0.1" } }, "npmlog": { @@ -4637,17 +4844,10 @@ "ini": "~1.3.0", "minimist": "^1.2.0", "strip-json-comments": "~2.0.1" - }, - "dependencies": { - "minimist": { - "version": "1.2.0", - "bundled": true, - "optional": true - } } }, "readable-stream": { - "version": "2.3.6", + "version": "2.3.7", "bundled": true, "optional": true, "requires": { @@ -4774,9 +4974,9 @@ "integrity": "sha512-yIovAzMX49sF8Yl58fSCWJ5svSLuaibPxXQJFLmBObTuCr0Mf1KiPopGM9NiFjiYBCbfaa2Fh6breQ6ANVTI0A==" }, "fuse.js": { - "version": "3.4.6", - "resolved": "https://registry.npmjs.org/fuse.js/-/fuse.js-3.4.6.tgz", - "integrity": "sha512-H6aJY4UpLFwxj1+5nAvufom5b2BT2v45P1MkPvdGIK8fWjQx/7o6tTT1+ALV0yawQvbmvCF0ufl2et8eJ7v7Cg==" + "version": "3.6.1", + "resolved": "https://registry.npmjs.org/fuse.js/-/fuse.js-3.6.1.tgz", + "integrity": "sha512-hT9yh/tiinkmirKrlv4KWOjztdoZo1mx9Qh4KvWqC7isoXwdUY3PNWUxceF4/qO9R6riA2C29jdTOeQOIROjgw==" }, "gensync": { "version": "1.0.0-beta.1", @@ -4927,9 +5127,9 @@ } }, "handle-thing": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/handle-thing/-/handle-thing-2.0.0.tgz", - "integrity": "sha512-d4sze1JNC454Wdo2fkuyzCr6aHcbL6PGGuFAz0Li/NcOm1tCHGnWDRmJP85dh9IhQErTc2svWFEX5xHIOo//kQ==" + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/handle-thing/-/handle-thing-2.0.1.tgz", + "integrity": "sha512-9Qn4yBxelxoh2Ow62nP+Ka/kMnOXRi8BXnRaUwezLNhqelnN49xKz4F/dPP8OYLxLxq6JDtZb2i9XznUQbNPTg==" }, "har-schema": { "version": "2.0.0", @@ -5065,9 +5265,9 @@ } }, "hotkeys-js": { - "version": "3.7.3", - "resolved": "https://registry.npmjs.org/hotkeys-js/-/hotkeys-js-3.7.3.tgz", - "integrity": "sha512-CSaeVPAKEEYNexYR35znMJnCqoofk7oqG/AOOqWow1qDT0Yxy+g+Y8Hs/LhGlsZaSJ7973YN6/N41LAr3t30QQ==" + "version": "3.7.6", + "resolved": "https://registry.npmjs.org/hotkeys-js/-/hotkeys-js-3.7.6.tgz", + "integrity": "sha512-X5d16trjp79o+OaCn7syXu0cs+TkLYlK/teE5FhpD1Cj9ROcEIhfIQ7Mhrk761ynF3NQLbLn5xRojP2UuSqDAw==" }, "hpack.js": { "version": "2.1.6", @@ -5187,9 +5387,9 @@ } }, "http-cache-semantics": { - "version": "4.0.3", - "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.0.3.tgz", - "integrity": "sha512-TcIMG3qeVLgDr1TEd2XvHaTnMPwYQUQMIBLy+5pLSDKYFc7UIqj39w8EGzZkaxoLv/l2K8HaI0t5AVA+YYgUew==" + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.1.0.tgz", + "integrity": "sha512-carPklcUh7ROWRK7Cv27RPtdhYhUsela/ue5/jKzjegVvXDqM2ILE9Q2BGn9JZJh1g87cp56su/FgQSzcWS8cQ==" }, "http-deceiver": { "version": "1.2.7", @@ -5403,9 +5603,9 @@ "integrity": "sha1-+ni/XS5pE8kRzp+BnuUUa7bYROk=" }, "ipaddr.js": { - "version": "1.9.0", - "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.0.tgz", - "integrity": "sha512-M4Sjn6N/+O6/IXSJseKqHoFc+5FdGJ22sXqnjTpdZweHK64MzEPAyQZyEU3R/KRv2GLoa7nNtg/C2Ev6m7z+eA==" + "version": "1.9.1", + "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", + "integrity": "sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==" }, "is-absolute-url": { "version": "2.1.0", @@ -5571,9 +5771,9 @@ } }, "is-installed-globally": { - "version": "0.3.1", - "resolved": "https://registry.npmjs.org/is-installed-globally/-/is-installed-globally-0.3.1.tgz", - "integrity": "sha512-oiEcGoQbGc+3/iijAijrK2qFpkNoNjsHOm/5V5iaeydyrS/hnwaRCEgH5cpW0P3T1lSjV5piB7S5b5lEugNLhg==", + "version": "0.3.2", + "resolved": "https://registry.npmjs.org/is-installed-globally/-/is-installed-globally-0.3.2.tgz", + "integrity": "sha512-wZ8x1js7Ia0kecP/CHM/3ABkAmujX7WPvQk6uu3Fly/Mk44pySulQpnHG46OMjHGXApINnV4QhY3SWnECO2z5g==", "requires": { "global-dirs": "^2.0.1", "is-path-inside": "^3.0.1" @@ -5610,9 +5810,9 @@ } }, "is-obj": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-1.0.1.tgz", - "integrity": "sha1-PkcprB9f3gJc19g6iW2rn09n2w8=" + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/is-obj/-/is-obj-2.0.0.tgz", + "integrity": "sha512-drqDG3cbczxxEJRoOXcOjtdp1J/lyp1mNn0xaznRs8+muBhgQcrnbspox5X5fOw0HnMnbfDzvnEMEtqDEJEo8w==" }, "is-path-cwd": { "version": "2.2.0", @@ -5732,10 +5932,10 @@ "resolved": "https://registry.npmjs.org/javascript-stringify/-/javascript-stringify-1.6.0.tgz", "integrity": "sha1-FC0RHzpuPa6PSpr9d9RYVbWpzOM=" }, - "js-levenshtein": { - "version": "1.1.6", - "resolved": "https://registry.npmjs.org/js-levenshtein/-/js-levenshtein-1.1.6.tgz", - "integrity": "sha512-X2BB11YZtrRqY4EnQcLX5Rh373zbK4alC1FW7D7MBhL2gtcC17cTnr6DmfHZeS0s2rTHjUTMMHfG7gO8SSdw+g==" + "js-base64": { + "version": "2.5.2", + "resolved": "https://registry.npmjs.org/js-base64/-/js-base64-2.5.2.tgz", + "integrity": "sha512-Vg8czh0Q7sFBSUMWWArX/miJeBWYBPpdU/3M/DKSaekLMqrqVPaedp+5mZhie/r0lgrcaYBfwXatEew6gwgiQQ==" }, "js-stringify": { "version": "1.0.2", @@ -5812,6 +6012,24 @@ "graceful-fs": "^4.1.6" } }, + "jsonp": { + "version": "0.2.1", + "resolved": "https://registry.npmjs.org/jsonp/-/jsonp-0.2.1.tgz", + "integrity": "sha1-pltPoPEL2nGaBUQep7lMVfPhW64=", + "requires": { + "debug": "^2.1.3" + }, + "dependencies": { + "debug": { + "version": "2.6.9", + "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", + "integrity": "sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==", + "requires": { + "ms": "2.0.0" + } + } + } + }, "jsprim": { "version": "1.4.1", "resolved": "https://registry.npmjs.org/jsprim/-/jsprim-1.4.1.tgz", @@ -5880,6 +6098,19 @@ "invert-kv": "^2.0.0" } }, + "leven": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", + "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==" + }, + "levenary": { + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/levenary/-/levenary-1.1.1.tgz", + "integrity": "sha512-mkAdOIt79FD6irqjYSs4rdbnlT5vRonMEvBVPVb3XmevfS8kgRXwfes0dhPdEtzTWD/1eNE/Bm/G1iRt6DcnQQ==", + "requires": { + "leven": "^3.1.0" + } + }, "linkify-it": { "version": "2.2.0", "resolved": "https://registry.npmjs.org/linkify-it/-/linkify-it-2.2.0.tgz", @@ -5899,12 +6130,12 @@ "integrity": "sha512-Jsmr89RcXGIwivFY21FcRrisYZfvLMTWx5kOLc+JTxtpBOG6xML0vzbc6SEQG2FO9/4Fc3wW4LVcB5DmGflaRw==" }, "loader-utils": { - "version": "1.2.3", - "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-1.2.3.tgz", - "integrity": "sha512-fkpz8ejdnEMG3s37wGL07iSBDg99O9D5yflE9RGNH3hRdx9SOwYfnGYdZOUIZitN8E+E2vkq3MUMYMvPYl5ZZA==", + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/loader-utils/-/loader-utils-1.4.0.tgz", + "integrity": "sha512-qH0WSMBtn/oHuwjy/NucEgbx5dbxxnxup9s4PVXJUDHZBQY+s0NWA9rJf53RBnQZxfch7euUui7hpoAPvALZdA==", "requires": { "big.js": "^5.2.2", - "emojis-list": "^2.0.0", + "emojis-list": "^3.0.0", "json5": "^1.0.1" } }, @@ -5985,9 +6216,9 @@ "integrity": "sha1-0CJTc662Uq3BvILklFM5qEJ1R3M=" }, "loglevel": { - "version": "1.6.6", - "resolved": "https://registry.npmjs.org/loglevel/-/loglevel-1.6.6.tgz", - "integrity": "sha512-Sgr5lbboAUBo3eXCSPL4/KoVz3ROKquOjcctxmHIt+vol2DrqTQe3SwkKKuYhEiWB5kYa13YyopJ69deJ1irzQ==" + "version": "1.6.7", + "resolved": "https://registry.npmjs.org/loglevel/-/loglevel-1.6.7.tgz", + "integrity": "sha512-cY2eLFrQSAfVPhCgH1s7JI73tMbg9YC3v3+ZHVW67sBS7UxWzNEk/ZBbSfLykBWHp33dqqtOv82gjhKEi81T/A==" }, "longest": { "version": "1.0.1", @@ -6036,11 +6267,6 @@ } } }, - "mamacro": { - "version": "0.0.3", - "resolved": "https://registry.npmjs.org/mamacro/-/mamacro-0.0.3.tgz", - "integrity": "sha512-qMEwh+UujcQ+kbz3T6V+wAmO2U8veoq2w+3wY8MquqwVA3jChfwY+Tk52GZKDfACEPjuZ7r2oJLejwpt8jtwTA==" - }, "map-age-cleaner": { "version": "0.1.3", "resolved": "https://registry.npmjs.org/map-age-cleaner/-/map-age-cleaner-0.1.3.tgz", @@ -6075,9 +6301,9 @@ } }, "markdown-it-anchor": { - "version": "5.2.5", - "resolved": "https://registry.npmjs.org/markdown-it-anchor/-/markdown-it-anchor-5.2.5.tgz", - "integrity": "sha512-xLIjLQmtym3QpoY9llBgApknl7pxAcN3WDRc2d3rwpl+/YvDZHPmKscGs+L6E05xf2KrCXPBvosWt7MZukwSpQ==" + "version": "5.2.7", + "resolved": "https://registry.npmjs.org/markdown-it-anchor/-/markdown-it-anchor-5.2.7.tgz", + "integrity": "sha512-REFmIaSS6szaD1bye80DMbp7ePwsPNvLTR5HunsUcZ0SG0rWJQ+Pz24R4UlTKtjKBPhxo0v0tOBDYjZQQknW8Q==" }, "markdown-it-attrs": { "version": "3.0.2", @@ -6307,6 +6533,18 @@ "normalize-url": "^2.0.1", "schema-utils": "^1.0.0", "webpack-sources": "^1.1.0" + }, + "dependencies": { + "schema-utils": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", + "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", + "requires": { + "ajv": "^6.1.0", + "ajv-errors": "^1.0.0", + "ajv-keywords": "^3.1.0" + } + } } }, "minimalistic-assert": { @@ -6328,9 +6566,9 @@ } }, "minimist": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.0.tgz", - "integrity": "sha1-o1AIsg9BOD7sH7kU9M1d95omQoQ=" + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.5.tgz", + "integrity": "sha512-FM9nNUYrRBAELZQT3xeZQ7fmMOBg6nWNmJKTcgsJeaLstP/UODVpGsr5OhXhhXg6f+qtJ8uiZ+PUxkDWcgIXLw==" }, "mississippi": { "version": "3.0.0", @@ -6369,18 +6607,11 @@ } }, "mkdirp": { - "version": "0.5.1", - "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.1.tgz", - "integrity": "sha1-MAV0OOrGz3+MR2fzhkjWaX11yQM=", + "version": "0.5.5", + "resolved": "https://registry.npmjs.org/mkdirp/-/mkdirp-0.5.5.tgz", + "integrity": "sha512-NKmAlESf6jMGym1++R0Ra7wvhV+wFW63FaSOFPwRahvea0gMUcGUhVeAg/0BC0wiv9ih5NYPB1Wn1UEI1/L+xQ==", "requires": { - "minimist": "0.0.8" - }, - "dependencies": { - "minimist": { - "version": "0.0.8", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-0.0.8.tgz", - "integrity": "sha1-hX/Kv8M5fSYluCKCYuhqp6ARsF0=" - } + "minimist": "^1.2.5" } }, "move-concurrently": { @@ -6553,12 +6784,9 @@ } }, "node-releases": { - "version": "1.1.48", - "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-1.1.48.tgz", - "integrity": "sha512-Hr8BbmUl1ujAST0K0snItzEA5zkJTQup8VNTKNfT6Zw8vTJkIiagUPNfxHmgDOyfFYNfKAul40sD0UEYTvwebw==", - "requires": { - "semver": "^6.3.0" - } + "version": "1.1.53", + "resolved": "https://registry.npmjs.org/node-releases/-/node-releases-1.1.53.tgz", + "integrity": "sha512-wp8zyQVwef2hpZ/dJH7SfSrIPD6YoJz6BDQDpGEkcA0s3LpAQoxBIYmfIq6QAhC1DhwsyCgTaTTcONwX8qzCuQ==" }, "nopt": { "version": "1.0.10", @@ -7063,9 +7291,9 @@ } }, "p-limit": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.2.2.tgz", - "integrity": "sha512-WGR+xHecKTr7EbUEhyLSh5Dube9JtdiG78ufaeLxTgpudf/20KqyMioIUZJAezlTIi6evxuoUs9YXc11cU+yzQ==", + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", "requires": { "p-try": "^2.0.0" } @@ -7124,9 +7352,9 @@ "integrity": "sha1-AerA/jta9xoqbAL+q7jB/vfgDqs=" }, "postcss": { - "version": "7.0.26", - "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.26.tgz", - "integrity": "sha512-IY4oRjpXWYshuTDFxMVkJDtWIk2LhsTlu8bZnbEJA4+bYT16Lvpo8Qv6EvDumhYRgzjZl489pmsY3qVgJQ08nA==", + "version": "7.0.27", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-7.0.27.tgz", + "integrity": "sha512-WuQETPMcW9Uf1/22HWUWP9lgsIC+KEHg2kozMflKjbeUtw9ujvFX6QmIfozaErDkmLWS9WEnEdEe6Uo9/BNTdQ==", "requires": { "chalk": "^2.4.2", "source-map": "^0.6.1", @@ -7144,36 +7372,13 @@ } }, "postcss-calc": { - "version": "7.0.1", - "resolved": "https://registry.npmjs.org/postcss-calc/-/postcss-calc-7.0.1.tgz", - "integrity": "sha512-oXqx0m6tb4N3JGdmeMSc/i91KppbYsFZKdH0xMOqK8V1rJlzrKlTdokz8ozUXLVejydRN6u2IddxpcijRj2FqQ==", + "version": "7.0.2", + "resolved": "https://registry.npmjs.org/postcss-calc/-/postcss-calc-7.0.2.tgz", + "integrity": "sha512-rofZFHUg6ZIrvRwPeFktv06GdbDYLcGqh9EwiMutZg+a0oePCCw1zHOEiji6LCpyRcjTREtPASuUqeAvYlEVvQ==", "requires": { - "css-unit-converter": "^1.1.1", - "postcss": "^7.0.5", - "postcss-selector-parser": "^5.0.0-rc.4", - "postcss-value-parser": "^3.3.1" - }, - "dependencies": { - "cssesc": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-2.0.0.tgz", - "integrity": "sha512-MsCAG1z9lPdoO/IUMLSBWBSVxVtJ1395VGIQ+Fc2gNdkQ1hNDnQdw3YhA71WJCBW1vdwA0cAnk/DnW6bqoEUYg==" - }, - "postcss-selector-parser": { - "version": "5.0.0", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-5.0.0.tgz", - "integrity": "sha512-w+zLE5Jhg6Liz8+rQOWEAwtwkyqpfnmsinXjXg6cY7YIONZZtgvE0v2O0uhQBs0peNomOJwWRKt6JBfTdTd3OQ==", - "requires": { - "cssesc": "^2.0.0", - "indexes-of": "^1.0.1", - "uniq": "^1.0.1" - } - }, - "postcss-value-parser": { - "version": "3.3.1", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-3.3.1.tgz", - "integrity": "sha512-pISE66AbVkp4fDQ7VHBwRNXzAAKJjw4Vw7nWI/+Q3vuly7SNfgYXvm6i5IgFylHGK5sP/xHAbB7N49OS4gWNyQ==" - } + "postcss": "^7.0.27", + "postcss-selector-parser": "^6.0.2", + "postcss-value-parser": "^4.0.2" } }, "postcss-colormin": { @@ -7261,6 +7466,18 @@ "postcss": "^7.0.0", "postcss-load-config": "^2.0.0", "schema-utils": "^1.0.0" + }, + "dependencies": { + "schema-utils": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", + "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", + "requires": { + "ajv": "^6.1.0", + "ajv-errors": "^1.0.0", + "ajv-keywords": "^3.1.0" + } + } } }, "postcss-merge-longhand": { @@ -7295,11 +7512,11 @@ }, "dependencies": { "postcss-selector-parser": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-3.1.1.tgz", - "integrity": "sha1-T4dfSvsMllc9XPTXQBGu4lCn6GU=", + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-3.1.2.tgz", + "integrity": "sha512-h7fJ/5uWuRVyOtkO45pnt1Ih40CEleeyCHzipqAZO2e5H20g25Y48uYnFUiShvY4rZWNJ/Bib/KVPmanaCtOhA==", "requires": { - "dot-prop": "^4.1.1", + "dot-prop": "^5.2.0", "indexes-of": "^1.0.1", "uniq": "^1.0.1" } @@ -7372,11 +7589,11 @@ }, "dependencies": { "postcss-selector-parser": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-3.1.1.tgz", - "integrity": "sha1-T4dfSvsMllc9XPTXQBGu4lCn6GU=", + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-3.1.2.tgz", + "integrity": "sha512-h7fJ/5uWuRVyOtkO45pnt1Ih40CEleeyCHzipqAZO2e5H20g25Y48uYnFUiShvY4rZWNJ/Bib/KVPmanaCtOhA==", "requires": { - "dot-prop": "^4.1.1", + "dot-prop": "^5.2.0", "indexes-of": "^1.0.1", "uniq": "^1.0.1" } @@ -7409,9 +7626,9 @@ } }, "postcss-modules-scope": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/postcss-modules-scope/-/postcss-modules-scope-2.1.1.tgz", - "integrity": "sha512-OXRUPecnHCg8b9xWvldG/jUpRIGPNRka0r4D4j0ESUU2/5IOnpsjfPPmDprM3Ih8CgZ8FXjWqaniK5v4rWt3oQ==", + "version": "2.2.0", + "resolved": "https://registry.npmjs.org/postcss-modules-scope/-/postcss-modules-scope-2.2.0.tgz", + "integrity": "sha512-YyEgsTMRpNd+HmyC7H/mh3y+MeFWevy7V1evVhJWewmMbjDHIbZbOXICC2y+m1xI1UVfIT1HMW/O04Hxyu9oXQ==", "requires": { "postcss": "^7.0.6", "postcss-selector-parser": "^6.0.0" @@ -7670,9 +7887,9 @@ } }, "postcss-value-parser": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.0.2.tgz", - "integrity": "sha512-LmeoohTpp/K4UiyQCwuGWlONxXamGzCMtFxLq4W1nZVGIQLYvMCJx3yAF9qyyuFpflABI9yVdtJAqbihOsCsJQ==" + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/postcss-value-parser/-/postcss-value-parser-4.0.3.tgz", + "integrity": "sha512-N7h4pG+Nnu5BEIzyeaaIYWs0LI5XC40OrRh5L60z0QjFsqGWcHcbkBvpe1WYpcIS9yQ8sOi/vIPt1ejQCrMVrg==" }, "prepend-http": { "version": "2.0.0", @@ -7682,7 +7899,8 @@ "prettier": { "version": "1.19.1", "resolved": "https://registry.npmjs.org/prettier/-/prettier-1.19.1.tgz", - "integrity": "sha512-s7PoyDv/II1ObgQunCbB9PdLmUcBZcnWOcxDh7O0N/UwDEsHyqkW+Qh28jW+mVuCdx7gLB0BotYI1Y6uI9iyew==" + "integrity": "sha512-s7PoyDv/II1ObgQunCbB9PdLmUcBZcnWOcxDh7O0N/UwDEsHyqkW+Qh28jW+mVuCdx7gLB0BotYI1Y6uI9iyew==", + "optional": true }, "pretty-error": { "version": "2.1.1", @@ -7699,9 +7917,9 @@ "integrity": "sha512-28iF6xPQrP8Oa6uxE6a1biz+lWeTOAPKggvjB8HAs6nVMKZwf5bG++632Dx614hIWgUPkgivRfG+a8uAXGTIbA==" }, "prismjs": { - "version": "1.19.0", - "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.19.0.tgz", - "integrity": "sha512-IVFtbW9mCWm9eOIaEkNyo2Vl4NnEifis2GQ7/MLRG5TQe6t+4Sj9J5QWI9i3v+SS43uZBlCAOn+zYTVYQcPXJw==", + "version": "1.20.0", + "resolved": "https://registry.npmjs.org/prismjs/-/prismjs-1.20.0.tgz", + "integrity": "sha512-AEDjSrVNkynnw6A+B1DsFkd6AVdTnp+/WoUixFRULlCLZVRZlVQMVWio/16jv7G1FscUxQxOQhWwApgbnxr6kQ==", "requires": { "clipboard": "^2.0.0" } @@ -7735,12 +7953,12 @@ "integrity": "sha1-mEcocL8igTL8vdhoEputEsPAKeM=" }, "proxy-addr": { - "version": "2.0.5", - "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.5.tgz", - "integrity": "sha512-t/7RxHXPH6cJtP0pRG6smSr9QJidhB+3kXu0KgXnbGYMgzEnUxRQ4/LDdfOwZEMyIh3/xHb8PX3t+lfL9z+YVQ==", + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.6.tgz", + "integrity": "sha512-dh/frvCBVmSsDYzw6n926jv974gddhkFPfiN8hPOi30Wax25QZyZEGveluCgliBnqmuM+UJmBErbAUFIoDbjOw==", "requires": { "forwarded": "~0.1.2", - "ipaddr.js": "1.9.0" + "ipaddr.js": "1.9.1" } }, "prr": { @@ -7754,9 +7972,9 @@ "integrity": "sha1-8FKijacOYYkX7wqKw0wa5aaChrM=" }, "psl": { - "version": "1.7.0", - "resolved": "https://registry.npmjs.org/psl/-/psl-1.7.0.tgz", - "integrity": "sha512-5NsSEDv8zY70ScRnOTn7bK7eanl2MvFrOrS/R6x+dBt5g1ghnj9Zv90kO8GwT8gxcu2ANyFprnFYB85IogIJOQ==" + "version": "1.8.0", + "resolved": "https://registry.npmjs.org/psl/-/psl-1.8.0.tgz", + "integrity": "sha512-RIdOzyoavK+hA18OGGWDqUTsCLhtA7IcZ/6NCs4fFJaHBDab+pDDmDIByWFRQJq2Cd7r1OoQxBGKOaztq+hjIQ==" }, "public-encrypt": { "version": "4.0.3", @@ -7928,6 +8146,14 @@ "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.1.1.tgz", "integrity": "sha512-XRsRjdf+j5ml+y/6GKHPZbrF/8p2Yga0JPtdqTIY2Xe5ohJPD9saDJJLPvp9+NSBprVvevdXZybnj2cv8OEd0A==" }, + "pupa": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/pupa/-/pupa-2.0.1.tgz", + "integrity": "sha512-hEJH0s8PXLY/cdXh66tNEQGndDrIKNqNC5xmrysZy3i5C3oEoLna7YAOad+7u125+zH1HNXUmGEkrhb3c2VriA==", + "requires": { + "escape-goat": "^2.0.0" + } + }, "q": { "version": "1.5.1", "resolved": "https://registry.npmjs.org/q/-/q-1.5.1.tgz", @@ -8015,9 +8241,9 @@ } }, "readable-stream": { - "version": "3.5.0", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.5.0.tgz", - "integrity": "sha512-gSz026xs2LfxBPudDuI41V1lka8cxg64E66SGe78zJlsUofOg/yqwezdIcdfwik6B4h8LFmWPA9ef9X3FiNFLA==", + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.0.tgz", + "integrity": "sha512-BViHy7LKeTz4oNnkcLJ+lVSL6vpiFeX6/d3oSH8zCW7UxP2onchk+vTGB143xuFjHS3deTgkKoXXymXqymiIdA==", "requires": { "inherits": "^2.0.3", "string_decoder": "^1.1.1", @@ -8077,9 +8303,9 @@ "integrity": "sha512-1G6jJVDWrt0rK99kBjvEtziZNCICAuvIPkSiUFIQxVP06RCVpq3dmDo2oi6ABpYaDYaTRr67BEhL8r1wgEZZKg==" }, "regenerate-unicode-properties": { - "version": "8.1.0", - "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-8.1.0.tgz", - "integrity": "sha512-LGZzkgtLY79GeXLm8Dp0BVLdQlWICzBnJz/ipWUgo59qBaZ+BHtq51P2q1uVZlppMuUAT37SDk39qUbjTWB7bA==", + "version": "8.2.0", + "resolved": "https://registry.npmjs.org/regenerate-unicode-properties/-/regenerate-unicode-properties-8.2.0.tgz", + "integrity": "sha512-F9DjY1vKLo/tPePDycuH3dn9H1OTPIkVD9Kz4LODu+F2C75mgjAJ7x/gwy6ZcSNRAAkhNlJSOHRe8k3p+K9WhA==", "requires": { "regenerate": "^1.4.0" } @@ -8090,11 +8316,12 @@ "integrity": "sha512-MguG95oij0fC3QV3URf4V2SDYGJhJnJGqvIIgdECeODCT98wSWDAJ94SSuVpYQUoTcGUIL6L4yNB7j1DFFHSBg==" }, "regenerator-transform": { - "version": "0.14.1", - "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.14.1.tgz", - "integrity": "sha512-flVuee02C3FKRISbxhXl9mGzdbWUVHubl1SMaknjxkFB1/iqpJhArQUvRxOOPEc/9tAiX0BaQ28FJH10E4isSQ==", + "version": "0.14.4", + "resolved": "https://registry.npmjs.org/regenerator-transform/-/regenerator-transform-0.14.4.tgz", + "integrity": "sha512-EaJaKPBI9GvKpvUz2mz4fhx7WPgvwRLY9v3hlNHWmAuJHI13T4nwKnNvm5RWJzEdnI5g5UwtOww+S8IdoUC2bw==", "requires": { - "private": "^0.1.6" + "@babel/runtime": "^7.8.4", + "private": "^0.1.8" } }, "regex-not": { @@ -8135,16 +8362,16 @@ } }, "regexpu-core": { - "version": "4.6.0", - "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-4.6.0.tgz", - "integrity": "sha512-YlVaefl8P5BnFYOITTNzDvan1ulLOiXJzCNZxduTIosN17b87h3bvG9yHMoHaRuo88H4mQ06Aodj5VtYGGGiTg==", + "version": "4.7.0", + "resolved": "https://registry.npmjs.org/regexpu-core/-/regexpu-core-4.7.0.tgz", + "integrity": "sha512-TQ4KXRnIn6tz6tjnrXEkD/sshygKH/j5KzK86X8MkeHyZ8qst/LZ89j3X4/8HEIfHANTFIP/AbXakeRhWIl5YQ==", "requires": { "regenerate": "^1.4.0", - "regenerate-unicode-properties": "^8.1.0", - "regjsgen": "^0.5.0", - "regjsparser": "^0.6.0", + "regenerate-unicode-properties": "^8.2.0", + "regjsgen": "^0.5.1", + "regjsparser": "^0.6.4", "unicode-match-property-ecmascript": "^1.0.4", - "unicode-match-property-value-ecmascript": "^1.1.0" + "unicode-match-property-value-ecmascript": "^1.2.0" } }, "registry-auth-token": { @@ -8169,9 +8396,9 @@ "integrity": "sha512-5qxzGZjDs9w4tzT3TPhCJqWdCc3RLYwy9J2NB0nm5Lz+S273lvWcpjaTGHsT1dc6Hhfq41uSEOw8wBmxrKOuyg==" }, "regjsparser": { - "version": "0.6.2", - "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.6.2.tgz", - "integrity": "sha512-E9ghzUtoLwDekPT0DYCp+c4h+bvuUpe6rRHCTYn6eGoqj1LgKXxT6I0Il4WbjhQkOghzi/V+y03bPKvbllL93Q==", + "version": "0.6.4", + "resolved": "https://registry.npmjs.org/regjsparser/-/regjsparser-0.6.4.tgz", + "integrity": "sha512-64O87/dPDgfk8/RQqC4gkZoGyyWFIEUTTh80CU6CWuK5vkCGyekIx+oKcEIYtP/RAxSQltCZHCNu/mdd7fqlJw==", "requires": { "jsesc": "~0.5.0" }, @@ -8216,9 +8443,9 @@ "integrity": "sha1-jcrkcOHIirwtYA//Sndihtp15jc=" }, "request": { - "version": "2.88.0", - "resolved": "https://registry.npmjs.org/request/-/request-2.88.0.tgz", - "integrity": "sha512-NAqBSrijGLZdM0WZNsInLJpkJokL72XYjUpnB0iwsRgxh7dB6COrHnTBNwN0E+lHDAJzu7kLAkDeY08z2/A0hg==", + "version": "2.88.2", + "resolved": "https://registry.npmjs.org/request/-/request-2.88.2.tgz", + "integrity": "sha512-MsvtOrfG9ZcrOwAW+Qi+F6HbD0CWXEh9ou77uOb7FM2WPhwT7smM833PzanhJLsgXjN89Ir6V2PczXNnMpwKhw==", "requires": { "aws-sign2": "~0.7.0", "aws4": "^1.8.0", @@ -8227,7 +8454,7 @@ "extend": "~3.0.2", "forever-agent": "~0.6.1", "form-data": "~2.3.2", - "har-validator": "~5.1.0", + "har-validator": "~5.1.3", "http-signature": "~1.2.0", "is-typedarray": "~1.0.0", "isstream": "~0.1.2", @@ -8237,7 +8464,7 @@ "performance-now": "^2.1.0", "qs": "~6.5.2", "safe-buffer": "^5.1.2", - "tough-cookie": "~2.4.3", + "tough-cookie": "~2.5.0", "tunnel-agent": "^0.6.0", "uuid": "^3.3.2" }, @@ -8264,11 +8491,6 @@ "resolved": "https://registry.npmjs.org/requires-port/-/requires-port-1.0.0.tgz", "integrity": "sha1-kl0mAdOaxIXgkc8NpcbmlNw9yv8=" }, - "reselect": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/reselect/-/reselect-3.0.1.tgz", - "integrity": "sha1-79qpjqdFEyTQkrKyFjpqHXqaIUc=" - }, "resolve": { "version": "1.15.1", "resolved": "https://registry.npmjs.org/resolve/-/resolve-1.15.1.tgz", @@ -8380,13 +8602,12 @@ "integrity": "sha512-NqVDv9TpANUjFm0N8uM5GxL36UgKi9/atZw+x7YFnQ8ckwFGKrl4xX4yWtrey3UJm5nP1kUbnYgLopqWNSRhWw==" }, "schema-utils": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", - "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", + "version": "2.6.5", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-2.6.5.tgz", + "integrity": "sha512-5KXuwKziQrTVHh8j/Uxz+QUbxkaLW9X/86NBlx/gnKgtsZA2GIVMUn17qWhRFwF8jdYb3Dig5hRO/W5mZqy6SQ==", "requires": { - "ajv": "^6.1.0", - "ajv-errors": "^1.0.0", - "ajv-keywords": "^3.1.0" + "ajv": "^6.12.0", + "ajv-keywords": "^3.4.1" } }, "section-matter": { @@ -8587,9 +8808,9 @@ "integrity": "sha1-2kL0l0DAtC2yypcoVxyxkMmO/qM=" }, "signal-exit": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.2.tgz", - "integrity": "sha1-tf3AjxKH6hF4Yo5BXiUTK3NkbG0=" + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/signal-exit/-/signal-exit-3.0.3.tgz", + "integrity": "sha512-VUJ49FC8U1OxwZLxIbTTrDvLnf/6TDgxZcK8wxR8zs13xpx7xbG60ndBlhNrFi2EMuFRoeDoJO7wthSLq42EjA==" }, "simple-swizzle": { "version": "0.2.2", @@ -8819,9 +9040,9 @@ "integrity": "sha1-PpNdfd1zYxuXZZlW1VEo6HtQhKM=" }, "spdy": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/spdy/-/spdy-4.0.1.tgz", - "integrity": "sha512-HeZS3PBdMA+sZSu0qwpCxl3DeALD5ASx8pAX0jZdKXSpPWbQ6SYGnlg3BBmYLx5LtiZrmkAZfErCm2oECBcioA==", + "version": "4.0.2", + "resolved": "https://registry.npmjs.org/spdy/-/spdy-4.0.2.tgz", + "integrity": "sha512-r46gZQZQV+Kl9oItvl1JZZqJKGr+oEkB08A6BzkiR7593/7IbtuncXHd2YoYeTsG4157ZssMu9KYvUHLcjcDoA==", "requires": { "debug": "^4.1.0", "handle-thing": "^2.0.0", @@ -9093,22 +9314,42 @@ } } }, + "string.prototype.trimend": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/string.prototype.trimend/-/string.prototype.trimend-1.0.0.tgz", + "integrity": "sha512-EEJnGqa/xNfIg05SxiPSqRS7S9qwDhYts1TSLR1BQfYUfPe1stofgGKvwERK9+9yf+PpfBMlpBaCHucXGPQfUA==", + "requires": { + "define-properties": "^1.1.3", + "es-abstract": "^1.17.5" + } + }, "string.prototype.trimleft": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/string.prototype.trimleft/-/string.prototype.trimleft-2.1.1.tgz", - "integrity": "sha512-iu2AGd3PuP5Rp7x2kEZCrB2Nf41ehzh+goo8TV7z8/XDBbsvc6HQIlUl9RjkZ4oyrW1XM5UwlGl1oVEaDjg6Ag==", + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/string.prototype.trimleft/-/string.prototype.trimleft-2.1.2.tgz", + "integrity": "sha512-gCA0tza1JBvqr3bfAIFJGqfdRTyPae82+KTnm3coDXkZN9wnuW3HjGgN386D7hfv5CHQYCI022/rJPVlqXyHSw==", "requires": { "define-properties": "^1.1.3", - "function-bind": "^1.1.1" + "es-abstract": "^1.17.5", + "string.prototype.trimstart": "^1.0.0" } }, "string.prototype.trimright": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/string.prototype.trimright/-/string.prototype.trimright-2.1.1.tgz", - "integrity": "sha512-qFvWL3/+QIgZXVmJBfpHmxLB7xsUXz6HsUmP8+5dRaC3Q7oKUv9Vo6aMCRZC1smrtyECFsIT30PqBJ1gTjAs+g==", + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/string.prototype.trimright/-/string.prototype.trimright-2.1.2.tgz", + "integrity": "sha512-ZNRQ7sY3KroTaYjRS6EbNiiHrOkjihL9aQE/8gfQ4DtAC/aEBRHFJa44OmoWxGGqXuJlfKkZW4WcXErGr+9ZFg==", "requires": { "define-properties": "^1.1.3", - "function-bind": "^1.1.1" + "es-abstract": "^1.17.5", + "string.prototype.trimend": "^1.0.0" + } + }, + "string.prototype.trimstart": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/string.prototype.trimstart/-/string.prototype.trimstart-1.0.0.tgz", + "integrity": "sha512-iCP8g01NFYiiBOnwG1Xc3WZLyoo+RuBymwIlWncShXDDJYWN6DbnM3odslBJdgCdRlq94B5s63NWAZlcn2CS4w==", + "requires": { + "define-properties": "^1.1.3", + "es-abstract": "^1.17.5" } }, "string_decoder": { @@ -9153,11 +9394,11 @@ }, "dependencies": { "postcss-selector-parser": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-3.1.1.tgz", - "integrity": "sha1-T4dfSvsMllc9XPTXQBGu4lCn6GU=", + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-3.1.2.tgz", + "integrity": "sha512-h7fJ/5uWuRVyOtkO45pnt1Ih40CEleeyCHzipqAZO2e5H20g25Y48uYnFUiShvY4rZWNJ/Bib/KVPmanaCtOhA==", "requires": { - "dot-prop": "^4.1.1", + "dot-prop": "^5.2.0", "indexes-of": "^1.0.1", "uniq": "^1.0.1" } @@ -9267,9 +9508,9 @@ "integrity": "sha512-a6sumDlzyHVJWb8+YofY4TW112G6p2FCPEAFk+59gIYHv3XHRhm9ltVQ9kli4hNWeQBwSpe8cRN25x0ROunMOw==" }, "terser": { - "version": "4.6.3", - "resolved": "https://registry.npmjs.org/terser/-/terser-4.6.3.tgz", - "integrity": "sha512-Lw+ieAXmY69d09IIc/yqeBqXpEQIpDGZqT34ui1QWXIUpR2RjbqEkT8X7Lgex19hslSqcWM5iMN2kM11eMsESQ==", + "version": "4.6.11", + "resolved": "https://registry.npmjs.org/terser/-/terser-4.6.11.tgz", + "integrity": "sha512-76Ynm7OXUG5xhOpblhytE7X58oeNSmC8xnNhjWVo8CksHit0U0kO4hfNbPrrYwowLWFgM2n9L176VNx2QaHmtA==", "requires": { "commander": "^2.20.0", "source-map": "~0.6.1", @@ -9297,6 +9538,18 @@ "terser": "^4.1.2", "webpack-sources": "^1.4.0", "worker-farm": "^1.7.0" + }, + "dependencies": { + "schema-utils": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", + "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", + "requires": { + "ajv": "^6.1.0", + "ajv-errors": "^1.0.0", + "ajv-keywords": "^3.1.0" + } + } } }, "text-table": { @@ -9366,9 +9619,9 @@ "integrity": "sha1-QFQRqOfmM5/mTbmiNN4R3DHgK9Q=" }, "tiny-cookie": { - "version": "2.3.1", - "resolved": "https://registry.npmjs.org/tiny-cookie/-/tiny-cookie-2.3.1.tgz", - "integrity": "sha512-C4x1e8dHfKf03ewuN9aIZzzOfN2a6QKhYlnHdzJxmmjMTLqcskI20F+EplszjODQ4SHmIGFJrvUUnBMS/bJbOA==" + "version": "2.3.2", + "resolved": "https://registry.npmjs.org/tiny-cookie/-/tiny-cookie-2.3.2.tgz", + "integrity": "sha512-qbymkVh+6+Gc/c9sqnvbG+dOHH6bschjphK3SHgIfT6h/t+63GBL37JXNoXEc6u/+BcwU6XmaWUuf19ouLVtPg==" }, "tiny-emitter": { "version": "2.1.0", @@ -9474,19 +9727,12 @@ "integrity": "sha1-LmhELZ9k7HILjMieZEOsbKqVACk=" }, "tough-cookie": { - "version": "2.4.3", - "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.4.3.tgz", - "integrity": "sha512-Q5srk/4vDM54WJsJio3XNn6K2sCG+CQ8G5Wz6bZhRZoAe/+TxjWB/GlFAnYEbkYVlON9FMk/fE3h2RLpPXo4lQ==", + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/tough-cookie/-/tough-cookie-2.5.0.tgz", + "integrity": "sha512-nlLsUzgm1kfLXSXfRZMc1KLAugd4hqJHDTvc2hDIwS3mZAfMEuMbc03SujMF+GEcpaX/qboeycw6iO8JwVv2+g==", "requires": { - "psl": "^1.1.24", - "punycode": "^1.4.1" - }, - "dependencies": { - "punycode": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/punycode/-/punycode-1.4.1.tgz", - "integrity": "sha1-wNWmOycYgArY4esPpSachN1BhF4=" - } + "psl": "^1.1.28", + "punycode": "^2.1.1" } }, "tr46": { @@ -9498,9 +9744,9 @@ } }, "tslib": { - "version": "1.10.0", - "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.10.0.tgz", - "integrity": "sha512-qOebF53frne81cf0S9B41ByenJ3/IuH8yJKngAX35CmiZySA0khhkovshKK+jGCaMnVomla7gVlIcc3EvKPbTQ==" + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-1.11.1.tgz", + "integrity": "sha512-aZW88SY8kQbU7gpV19lN24LtXh/yD4ZZg6qieAJDDg+YBsJcSmLGK9QpnUjAKVG/xefmvJGd1WUmfpT/g6AJGA==" }, "tty-browserify": { "version": "0.0.0", @@ -9521,9 +9767,9 @@ "integrity": "sha1-WuaBd/GS1EViadEIr6k/+HQ/T2Q=" }, "type-fest": { - "version": "0.8.1", - "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.8.1.tgz", - "integrity": "sha512-4dbzIzqvjtgiM5rw1k5rEHtBANKmdudhGyBEajN01fEyhaAIhsoKNy6y7+IN93IfpFtwY9iqi7kD+xwKhQsNJA==" + "version": "0.11.0", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.11.0.tgz", + "integrity": "sha512-OdjXJxnCN1AvyLSzeKIgXTXxV+99ZuXl3Hpo9XpJAv9MBcHrrJOQ5kV7ypXOuQie+AmWG25hLbiKdwYTifzcfQ==" }, "type-is": { "version": "1.6.18", @@ -9590,14 +9836,14 @@ } }, "unicode-match-property-value-ecmascript": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-1.1.0.tgz", - "integrity": "sha512-hDTHvaBk3RmFzvSl0UVrUmC3PuW9wKVnpoUDYH0JDkSIovzw+J5viQmeYHxVSBptubnr7PbH2e0fnpDRQnQl5g==" + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/unicode-match-property-value-ecmascript/-/unicode-match-property-value-ecmascript-1.2.0.tgz", + "integrity": "sha512-wjuQHGQVofmSJv1uVISKLE5zO2rNGzM/KCYZch/QQvez7C1hUhBIuZ701fYXExuufJFMPhv2SyL8CyoIfMLbIQ==" }, "unicode-property-aliases-ecmascript": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-1.0.5.tgz", - "integrity": "sha512-L5RAqCfXqAwR3RriF8pM0lU0w4Ryf/GgzONwi6KnL1taJQa7x1TCxdJnILX59WIGOwR57IVxn7Nej0fz1Ny6fw==" + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/unicode-property-aliases-ecmascript/-/unicode-property-aliases-ecmascript-1.1.0.tgz", + "integrity": "sha512-PqSoPh/pWetQ2phoj5RLiaqIk4kCNwoV3CI+LfGmWLKI3rE3kl1h59XpX2BjgDrmbxD9ARtQobPGU1SguCYuQg==" }, "union-value": { "version": "1.0.1", @@ -9701,13 +9947,13 @@ "integrity": "sha512-aZwGpamFO61g3OlfT7OQCHqhGnW43ieH9WZeP7QxN/G/jS4jfqUkZxoryvJgVPEcrl5NL/ggHsSmLMHuH64Lhg==" }, "update-notifier": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/update-notifier/-/update-notifier-4.0.0.tgz", - "integrity": "sha512-p9zf71hWt5GVXM4iEBujpUgx8mK9AWiCCapEJm/O1z5ntCim83Z1ATqzZFBHFYqx03laMqv8LiDgs/7ikXjf/g==", + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/update-notifier/-/update-notifier-4.1.0.tgz", + "integrity": "sha512-w3doE1qtI0/ZmgeoDoARmI5fjDoT93IfKgEGqm26dGUOh8oNpaSTsGNdYRN/SjOuo10jcJGwkEL3mroKzktkew==", "requires": { "boxen": "^4.2.0", "chalk": "^3.0.0", - "configstore": "^5.0.0", + "configstore": "^5.0.1", "has-yarn": "^2.1.0", "import-lazy": "^2.1.0", "is-ci": "^2.0.0", @@ -9715,6 +9961,7 @@ "is-npm": "^4.0.0", "is-yarn-global": "^0.3.0", "latest-version": "^5.0.0", + "pupa": "^2.0.1", "semver-diff": "^3.1.1", "xdg-basedir": "^4.0.0" }, @@ -9807,6 +10054,18 @@ "loader-utils": "^1.1.0", "mime": "^2.0.3", "schema-utils": "^1.0.0" + }, + "dependencies": { + "schema-utils": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", + "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", + "requires": { + "ajv": "^6.1.0", + "ajv-errors": "^1.0.0", + "ajv-keywords": "^3.1.0" + } + } } }, "url-parse": { @@ -9923,9 +10182,9 @@ "integrity": "sha512-BXq3jwIagosjgNVae6tkHzzIk6a8MHFtzAdwhnV5VlvPTFxDCvIttgSiHWjdGoTJvXtmRu5HacExfdarRcFhog==" }, "vue-loader": { - "version": "15.8.3", - "resolved": "https://registry.npmjs.org/vue-loader/-/vue-loader-15.8.3.tgz", - "integrity": "sha512-yFksTFbhp+lxlm92DrKdpVIWMpranXnTEuGSc0oW+Gk43M9LWaAmBTnfj5+FCdve715mTHvo78IdaXf5TbiTJg==", + "version": "15.9.1", + "resolved": "https://registry.npmjs.org/vue-loader/-/vue-loader-15.9.1.tgz", + "integrity": "sha512-IaPU2KOPjs/QjMlxFs/TiTtQUSbftQ7lsAvoxe21rtcQohsMhx+1AltXCNhZIpIn46PtODiAgz+o8RbMpKtmJw==", "requires": { "@vue/component-compiler-utils": "^3.1.0", "hash-sum": "^1.0.2", @@ -9935,9 +10194,9 @@ } }, "vue-router": { - "version": "3.1.5", - "resolved": "https://registry.npmjs.org/vue-router/-/vue-router-3.1.5.tgz", - "integrity": "sha512-BszkPvhl7I9h334GjckCh7sVFyjTPMMJFJ4Bsrem/Ik+B/9gt5tgrk8k4gGLO4ZpdvciVdg7O41gW4DisQWurg==" + "version": "3.1.6", + "resolved": "https://registry.npmjs.org/vue-router/-/vue-router-3.1.6.tgz", + "integrity": "sha512-GYhn2ynaZlysZMkFE5oCHRUTqE8BWs/a9YbKpNLi0i7xD6KG1EzDqpHQmv1F5gXjr8kL5iIVS8EOtRaVUEXTqA==" }, "vue-server-renderer": { "version": "2.6.11", @@ -10007,13 +10266,13 @@ "integrity": "sha512-4gDntzrifFnCEvyoO8PqyJDmguXgVPxKiIxrBKjIowvL9l+N66196+72XVYR8BBf1Uv1Fgt3bGevJ+sEmxfZzw==" }, "vuepress": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/vuepress/-/vuepress-1.3.0.tgz", - "integrity": "sha512-TmPmHiT70aq4xqy4XczUJmUdpGlMSheOGGVwA2nhYSIS9IEd4ngPbfT9oEcAFTsGHXsr5KH8EgEU7G+3wWzY/A==", + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/vuepress/-/vuepress-1.4.0.tgz", + "integrity": "sha512-VrBNCCjyrB4EfdIRWTW6uo/xmMzplVsGE/2oGLkgVhWLPCvvSEAcGQhoUKWxRJXk6CdrDCov6jsmu6MA1N3fvw==", "requires": { - "@vuepress/core": "^1.3.0", - "@vuepress/theme-default": "^1.3.0", - "cac": "^6.5.5", + "@vuepress/core": "^1.4.0", + "@vuepress/theme-default": "^1.4.0", + "cac": "^6.5.6", "envinfo": "^7.2.0", "opencollective-postinstall": "^2.0.2", "update-notifier": "^4.0.0" @@ -10038,6 +10297,11 @@ "resolved": "https://registry.npmjs.org/big.js/-/big.js-3.2.0.tgz", "integrity": "sha512-+hN/Zh2D08Mx65pZ/4g5bsmNiZUuChDiQfTUQ7qJr4/kuopCr88xZsAXv6mBoZEsUI4OuGHlX59qE94K2mMW8Q==" }, + "emojis-list": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/emojis-list/-/emojis-list-2.1.0.tgz", + "integrity": "sha1-TapNnbAPmBmIDHn6RXrlsJof04k=" + }, "json5": { "version": "0.5.1", "resolved": "https://registry.npmjs.org/json5/-/json5-0.5.1.tgz", @@ -10066,9 +10330,9 @@ } }, "vuepress-plugin-container": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/vuepress-plugin-container/-/vuepress-plugin-container-2.1.2.tgz", - "integrity": "sha512-Df5KoIDMYiFg45GTfFw2hIiLGSsjhms4f3ppl2UIBf5nWMxi2lfifcoo8MooMSfxboxRZjoDccqQfu0fypaKrQ==", + "version": "2.1.3", + "resolved": "https://registry.npmjs.org/vuepress-plugin-container/-/vuepress-plugin-container-2.1.3.tgz", + "integrity": "sha512-5bTtt8PKu9edNoc2Op/sRhCynjT+xKO/VuqwH7ftjdwNZUZMl/ymga7L+5lXCWNOLYAzRHaZAyYV5tY/97cl5g==", "requires": { "markdown-it-container": "^2.0.0" } @@ -10090,11 +10354,12 @@ } }, "vuepress-theme-cosmos": { - "version": "1.0.150", - "resolved": "https://registry.npmjs.org/vuepress-theme-cosmos/-/vuepress-theme-cosmos-1.0.150.tgz", - "integrity": "sha512-f4McVndkB+CqJ6mWpOG4UZSR14LJyXqwcgwoDoDUx149g2PKU3qI/AF5AcrM25+4UKMCXFKcJloQCl/aWq+1ig==", + "version": "1.0.161", + "resolved": "https://registry.npmjs.org/vuepress-theme-cosmos/-/vuepress-theme-cosmos-1.0.161.tgz", + "integrity": "sha512-eKcjz6IYEw4gYH57orf8H0qSd34+40R+Sw71gdwMkNphJRdMTK4hy7uwrjSmK0McpBRK7tEEZYZLR+EGeMIDNg==", "requires": { - "@cosmos-ui/vue": "^0.5.20", + "@cosmos-ui/vue": "^0.22.0", + "@vuepress/plugin-google-analytics": "^1.3.1", "axios": "^0.19.0", "cheerio": "^1.0.0-rc.3", "clipboard-copy": "^3.1.0", @@ -10102,6 +10367,7 @@ "fuse.js": "^3.4.6", "gray-matter": "^4.0.2", "hotkeys-js": "^3.7.3", + "jsonp": "^0.2.1", "markdown-it": "^10.0.0", "markdown-it-attrs": "^3.0.1", "prismjs": "^1.17.1", @@ -10115,11 +10381,11 @@ } }, "watchpack": { - "version": "1.6.0", - "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-1.6.0.tgz", - "integrity": "sha512-i6dHe3EyLjMmDlU1/bGQpEw25XSjkJULPuAVKCbNRefQVq48yXKUpwg538F7AZTf9kyr57zj++pQFltUa5H7yA==", + "version": "1.6.1", + "resolved": "https://registry.npmjs.org/watchpack/-/watchpack-1.6.1.tgz", + "integrity": "sha512-+IF9hfUFOrYOOaKyfaI7h7dquUIOgyEMoQMLA7OP5FxegKA2+XdXThAZ9TU2kucfhDH7rfMHs1oPYziVGWRnZA==", "requires": { - "chokidar": "^2.0.2", + "chokidar": "^2.1.8", "graceful-fs": "^4.1.2", "neo-async": "^2.5.0" } @@ -10138,14 +10404,14 @@ "integrity": "sha512-YQ+BmxuTgd6UXZW3+ICGfyqRyHXVlD5GtQr5+qjiNW7bF0cqrzX500HVXPBOvgXb5YnzDd+h0zqyv61KUD7+Sg==" }, "webpack": { - "version": "4.41.5", - "resolved": "https://registry.npmjs.org/webpack/-/webpack-4.41.5.tgz", - "integrity": "sha512-wp0Co4vpyumnp3KlkmpM5LWuzvZYayDwM2n17EHFr4qxBBbRokC7DJawPJC7TfSFZ9HZ6GsdH40EBj4UV0nmpw==", - "requires": { - "@webassemblyjs/ast": "1.8.5", - "@webassemblyjs/helper-module-context": "1.8.5", - "@webassemblyjs/wasm-edit": "1.8.5", - "@webassemblyjs/wasm-parser": "1.8.5", + "version": "4.42.1", + "resolved": "https://registry.npmjs.org/webpack/-/webpack-4.42.1.tgz", + "integrity": "sha512-SGfYMigqEfdGchGhFFJ9KyRpQKnipvEvjc1TwrXEPCM6H5Wywu10ka8o3KGrMzSMxMQKt8aCHUFh5DaQ9UmyRg==", + "requires": { + "@webassemblyjs/ast": "1.9.0", + "@webassemblyjs/helper-module-context": "1.9.0", + "@webassemblyjs/wasm-edit": "1.9.0", + "@webassemblyjs/wasm-parser": "1.9.0", "acorn": "^6.2.1", "ajv": "^6.10.2", "ajv-keywords": "^3.4.1", @@ -10157,7 +10423,7 @@ "loader-utils": "^1.2.3", "memory-fs": "^0.4.1", "micromatch": "^3.1.10", - "mkdirp": "^0.5.1", + "mkdirp": "^0.5.3", "neo-async": "^2.6.1", "node-libs-browser": "^2.2.1", "schema-utils": "^1.0.0", @@ -10168,9 +10434,19 @@ }, "dependencies": { "acorn": { - "version": "6.4.0", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-6.4.0.tgz", - "integrity": "sha512-gac8OEcQ2Li1dxIEWGZzsp2BitJxwkwcOm0zHAJLcPJaVvm58FRnk6RkuLRpU1EujipU2ZFODv2P9DLMfnV8mw==" + "version": "6.4.1", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-6.4.1.tgz", + "integrity": "sha512-ZVA9k326Nwrj3Cj9jlh3wGFutC2ZornPNARZwsNYqQYgN0EsV2d53w5RN/co65Ohn4sUAUtb1rSUAOD6XN9idA==" + }, + "schema-utils": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", + "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", + "requires": { + "ajv": "^6.1.0", + "ajv-errors": "^1.0.0", + "ajv-keywords": "^3.1.0" + } } } }, @@ -10303,9 +10579,9 @@ "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==" }, "p-limit": { - "version": "2.2.2", - "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.2.2.tgz", - "integrity": "sha512-WGR+xHecKTr7EbUEhyLSh5Dube9JtdiG78ufaeLxTgpudf/20KqyMioIUZJAezlTIi6evxuoUs9YXc11cU+yzQ==", + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-2.3.0.tgz", + "integrity": "sha512-//88mFWSJx8lxCzwdAABTJL2MyWB12+eIY7MDL2SqLmAkeKU9qxRvWuSyTjm3FUmpBEMuFfckAIqEaVGUDxb6w==", "requires": { "p-try": "^2.0.0" } @@ -10323,6 +10599,16 @@ "resolved": "https://registry.npmjs.org/p-try/-/p-try-2.2.0.tgz", "integrity": "sha512-R4nPAVTAU0B9D35/Gk3uJf/7XYbQcyohSKdvAxIRSNghFl4e71hVoGnBNQz9cWaXxO2I10KTC+3jMdvvoKw6dQ==" }, + "schema-utils": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/schema-utils/-/schema-utils-1.0.0.tgz", + "integrity": "sha512-i27Mic4KovM/lnGsy8whRCHhc7VicJajAjTrYg11K9zfZXnYIt4k5F+kZkwjnrhKzLic/HLU4j11mjsz2G/75g==", + "requires": { + "ajv": "^6.1.0", + "ajv-errors": "^1.0.0", + "ajv-keywords": "^3.1.0" + } + }, "supports-color": { "version": "6.1.0", "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-6.1.0.tgz", @@ -10576,9 +10862,9 @@ "integrity": "sha1-tSQ9jz7BqjXxNkYFvA0QNuMKtp8=" }, "write-file-atomic": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-3.0.1.tgz", - "integrity": "sha512-JPStrIyyVJ6oCSz/691fAjFtefZ6q+fP6tm+OS4Qw6o+TGQxNp1ziY2PgS+X/m0V8OWhZiO/m4xSj+Pr4RrZvw==", + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/write-file-atomic/-/write-file-atomic-3.0.3.tgz", + "integrity": "sha512-AvHcyZ5JnSfq3ioSyjrBkH9yW4m7Ayk8/9My/DD9onKeu/94fwrMocemO2QAJFAlnnDN+ZDS+ZjAR5ua1/PV/Q==", "requires": { "imurmurhash": "^0.1.4", "is-typedarray": "^1.0.0", diff --git a/docs/package.json b/docs/package.json index 8ce869057..477ae89fa 100644 --- a/docs/package.json +++ b/docs/package.json @@ -4,7 +4,8 @@ "description": "Welcome to the Tendermint Core documentation!", "main": "index.js", "dependencies": { - "vuepress-theme-cosmos": "^1.0.150" + "@vuepress/plugin-google-analytics": "^1.4.1", + "vuepress-theme-cosmos": "^1.0.161" }, "scripts": { "preserve": "./pre.sh", @@ -16,4 +17,4 @@ }, "author": "", "license": "ISC" -} \ No newline at end of file +} diff --git a/docs/tendermint-core/local_config.png b/docs/tendermint-core/local_config.png new file mode 100644 index 000000000..050a6df2f Binary files /dev/null and b/docs/tendermint-core/local_config.png differ diff --git a/docs/tendermint-core/sentry_layout.png b/docs/tendermint-core/sentry_layout.png new file mode 100644 index 000000000..240abde18 Binary files /dev/null and b/docs/tendermint-core/sentry_layout.png differ diff --git a/docs/tendermint-core/validators.md b/docs/tendermint-core/validators.md index 97a5da8ca..911405d58 100644 --- a/docs/tendermint-core/validators.md +++ b/docs/tendermint-core/validators.md @@ -22,9 +22,78 @@ Validators have a cryptographic key-pair and an associated amount of There are two ways to become validator. -1. They can be pre-established in the [genesis state](./using-tendermint.md#genesis) -2. The ABCI app responds to the EndBlock message with changes to the - existing validator set. +1. They can be pre-established in the [genesis state](./using-tendermint.md#genesis) +2. The ABCI app responds to the EndBlock message with changes to the + existing validator set. + +## Setting up a Validator + +When setting up a validator there are countless ways to configure your setup. This guide is aimed at showing one of them, the sentry node design. This design is mainly for DDOS prevention. + +### Network Layout + +![ALT Network Layout](./sentry_layout.png) + +The diagram is based on AWS, other cloud providers will have similar solutions to design a solution. Running nodes is not limited to cloud providers, you can run nodes on bare metal systems as well. The architecture will be the same no matter which setup you decide to go with. + +The proposed network diagram is similar to the classical backend/frontend separation of services in a corporate environment. The “backend” in this case is the private network of the validator in the data center. The data center network might involve multiple subnets, firewalls and redundancy devices, which is not detailed on this diagram. The important point is that the data center allows direct connectivity to the chosen cloud environment. Amazon AWS has “Direct Connect”, while Google Cloud has “Partner Interconnect”. This is a dedicated connection to the cloud provider (usually directly to your virtual private cloud instance in one of the regions). + +All sentry nodes (the “frontend”) connect to the validator using this private connection. The validator does not have a public IP address to provide its services. + +Amazon has multiple availability zones within a region. One can install sentry nodes in other regions too. In this case the second, third and further regions need to have a private connection to the validator node. This can be achieved by VPC Peering (“VPC Network Peering” in Google Cloud). In this case, the second, third and further region sentry nodes will be directed to the first region and through the direct connect to the data center, arriving to the validator. + +A more persistent solution (not detailed on the diagram) is to have multiple direct connections to different regions from the data center. This way VPC Peering is not mandatory, although still beneficial for the sentry nodes. This overcomes the risk of depending on one region. It is more costly. + +### Local Configuration + +![ALT Local Configuration](./local_config.png) + +The validator will only talk to the sentry that are provided, the sentry nodes will communicate to the validator via a secret connection and the rest of the network through a normal connection. The sentry nodes do have the option of communicating with each other as well. + +When initializing nodes there are five parameters in the `config.toml` that may need to be altered. + +- `pex:` boolean. This turns the peer exchange reactor on or off for a node. When `pex=false`, only the `persistent_peers` list is available for connection. +- `persistent_peers:` a comma separated list of `nodeID@ip:port` values that define a list of peers that are expected to be online at all times. This is necessary at first startup because by setting `pex=false` the node will not be able to join the network. +- `unconditional_peer_ids:` comma separated list of nodeID's. These nodes will be connected to no matter the limits of inbound and outbound peers. This is useful for when sentry nodes have full address books. +- `private_peer_ids:` comma separated list of nodeID's. These nodes will not be gossiped to the network. This is an important field as you do not want your validator IP gossiped to the network. +- `addr_book_strict:` boolean. By default nodes with a routable address will be considered for connection. If this setting is turned off (false), non-routable IP addresses, like addresses in a private network can be added to the address book. + +#### Validator Node Configuration + +| Config Option | Setting | +| ---------------------- | -------------------------- | +| pex | false | +| persistent_peers | list of sentry nodes | +| private_peer_ids | none | +| unconditional_peer_ids | optionally sentry node IDs | +| addr_book_strict | false | + +The validator node should have `pex=false` so it does not gossip to the entire network. The persistent peers will be your sentry nodes. Private peers can be left empty as the validator is not trying to hide who it is communicating with. Setting unconditional peers is optional for a validator because they will not have a full address books. + +#### Sentry Node Configuration + +| Config Option | Setting | +| ---------------------- | --------------------------------------------- | +| pex | true | +| persistent_peers | validator node, optionally other sentry nodes | +| private_peer_ids | validator node ID | +| unconditional_peer_ids | validator node ID, optionally sentry node IDs | +| addr_book_strict | false | + +The sentry nodes should be able to talk to the entire network hence why `pex=true`. The persistent peers of a sentry node will be the validator, and optionally other sentry nodes. The sentry nodes should make sure that they do not gossip the validator's ip, to do this you must put the validators nodeID as a private peer. The unconditional peer IDs will be the validator ID and optionally other sentry nodes. + +> Note: Do not forget to secure your node's firewalls when setting them up. + +More Information can be found at these links: + +- https://kb.certus.one/ +- https://forum.cosmos.network/t/sentry-node-architecture-overview/454 + +### Validator keys + +Protecting a validator's consensus key is the most important factor to take in when designing your setup. The key that a validator is given upon creation of the node is called a consensus key, it has to be online at all times in order to vote on blocks. It is **not recommended** to merely hold your private key in the default json file (`priv_validator_key.json`). Fortunately, the [Interchain Foundation](https://interchain.io/) has worked with a team to build a key management server for validators. You can find documentation on how to use it [here](https://github.com/iqlusioninc/tmkms), it is used extensively in production. You are not limited to using this tool, there are also [HSMs](https://safenet.gemalto.com/data-encryption/hardware-security-modules-hsms/), there is not a recommended HSM. + +Currently Tendermint uses [Ed25519](https://ed25519.cr.yp.to/) keys which are widely supported across the security sector and HSMs. ## Committing a Block diff --git a/docs/tools/README.md b/docs/tools/README.md index bf9dd1f97..86ba128f6 100644 --- a/docs/tools/README.md +++ b/docs/tools/README.md @@ -16,14 +16,14 @@ Tendermint has some tools that are associated with it for: ## Benchmarking -- https://github.com/interchainio/tm-load-test +- https://github.com/informalsystems/tm-load-test `tm-load-test` is a distributed load testing tool (and framework) for load testing Tendermint networks. ## Testnets -- https://github.com/interchainio/testnets +- https://github.com/informalsystems/testnets This repository contains various different configurations of test networks for, and relating to, Tendermint. diff --git a/dredd.yml b/dredd.yml index ba315bb90..40ba564f8 100644 --- a/dredd.yml +++ b/dredd.yml @@ -17,7 +17,7 @@ user: null inline-errors: false details: false method: [GET] -loglevel: warning +loglevel: debug path: [] hooks-worker-timeout: 5000 hooks-worker-connect-timeout: 1500 diff --git a/evidence/codec.go b/evidence/codec.go index 135341068..650a34607 100644 --- a/evidence/codec.go +++ b/evidence/codec.go @@ -2,6 +2,7 @@ package evidence import ( amino "github.com/tendermint/go-amino" + cryptoamino "github.com/tendermint/tendermint/crypto/encoding/amino" "github.com/tendermint/tendermint/types" ) diff --git a/evidence/errors.go b/evidence/errors.go new file mode 100644 index 000000000..7bad19c81 --- /dev/null +++ b/evidence/errors.go @@ -0,0 +1,21 @@ +package evidence + +import ( + "fmt" +) + +// ErrInvalidEvidence returns when evidence failed to validate +type ErrInvalidEvidence struct { + Reason error +} + +func (e ErrInvalidEvidence) Error() string { + return fmt.Sprintf("evidence is not valid: %v ", e.Reason) +} + +// ErrEvidenceAlreadyStored indicates that the evidence has already been stored in the evidence db +type ErrEvidenceAlreadyStored struct{} + +func (e ErrEvidenceAlreadyStored) Error() string { + return "evidence is already stored" +} diff --git a/evidence/pool.go b/evidence/pool.go index 62b0a3325..75ac7f900 100644 --- a/evidence/pool.go +++ b/evidence/pool.go @@ -5,10 +5,10 @@ import ( "sync" "time" - clist "github.com/tendermint/tendermint/libs/clist" - "github.com/tendermint/tendermint/libs/log" dbm "github.com/tendermint/tm-db" + clist "github.com/tendermint/tendermint/libs/clist" + "github.com/tendermint/tendermint/libs/log" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" ) @@ -95,25 +95,29 @@ func (evpool *Pool) Update(block *types.Block, state sm.State) { } // AddEvidence checks the evidence is valid and adds it to the pool. -func (evpool *Pool) AddEvidence(evidence types.Evidence) (err error) { +func (evpool *Pool) AddEvidence(evidence types.Evidence) error { - // TODO: check if we already have evidence for this - // validator at this height so we dont get spammed + // check if evidence is already stored + if evpool.store.Has(evidence) { + return ErrEvidenceAlreadyStored{} + } if err := sm.VerifyEvidence(evpool.stateDB, evpool.State(), evidence); err != nil { - return err + return ErrInvalidEvidence{err} } // fetch the validator and return its voting power as its priority // TODO: something better ? - valset, _ := sm.LoadValidators(evpool.stateDB, evidence.Height()) - _, val := valset.GetByAddress(evidence.Address()) - priority := val.VotingPower - - added := evpool.store.AddNewEvidence(evidence, priority) - if !added { - // evidence already known, just ignore - return + valSet, err := sm.LoadValidators(evpool.stateDB, evidence.Height()) + if err != nil { + return err + } + _, val := valSet.GetByAddress(evidence.Address()) + priority := val.StakingPower + + _, err = evpool.store.AddNewEvidence(evidence, priority) + if err != nil { + return err } evpool.logger.Info("Verified new evidence of byzantine behaviour", "evidence", evidence) @@ -159,8 +163,7 @@ func (evpool *Pool) removeEvidence( // Remove the evidence if it's already in a block or if it's now too old. if _, ok := blockEvidenceMap[evMapKey(ev)]; ok || - ageNumBlocks > params.MaxAgeNumBlocks || - ageDuration > params.MaxAgeDuration { + (ageDuration > params.MaxAgeDuration && ageNumBlocks > params.MaxAgeNumBlocks) { // remove from clist evpool.evidenceList.Remove(e) e.DetachPrev() diff --git a/evidence/pool_test.go b/evidence/pool_test.go index 7224f9d17..a304e3aca 100644 --- a/evidence/pool_test.go +++ b/evidence/pool_test.go @@ -8,10 +8,11 @@ import ( "github.com/stretchr/testify/assert" + dbm "github.com/tendermint/tm-db" + sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" - dbm "github.com/tendermint/tm-db" ) func TestMain(m *testing.M) { @@ -26,7 +27,7 @@ func initializeValidatorState(valAddr []byte, height int64) dbm.DB { // create validator set and state vals := []*types.Validator{ - {Address: valAddr, VotingPower: 1}, + {Address: valAddr, StakingPower: 1}, } state := sm.State{ LastBlockHeight: 0, @@ -55,7 +56,7 @@ func TestEvidencePool(t *testing.T) { var ( valAddr = []byte("val1") - height = int64(5) + height = int64(100002) stateDB = initializeValidatorState(valAddr, height) evidenceDB = dbm.NewMemDB() pool = NewPool(stateDB, evidenceDB) @@ -63,11 +64,11 @@ func TestEvidencePool(t *testing.T) { ) goodEvidence := types.NewMockEvidence(height, time.Now(), 0, valAddr) - badEvidence := types.NewMockEvidence(height, evidenceTime, 0, valAddr) + badEvidence := types.NewMockEvidence(1, evidenceTime, 0, valAddr) // bad evidence err := pool.AddEvidence(badEvidence) - assert.NotNil(t, err) + assert.Error(t, err) // err: evidence created at 2019-01-01 00:00:00 +0000 UTC has expired. Evidence can not be older than: ... var wg sync.WaitGroup @@ -78,14 +79,14 @@ func TestEvidencePool(t *testing.T) { }() err = pool.AddEvidence(goodEvidence) - assert.Nil(t, err) + assert.NoError(t, err) wg.Wait() assert.Equal(t, 1, pool.evidenceList.Len()) - // if we send it again, it shouldnt change the size + // if we send it again, it shouldnt add and return an error err = pool.AddEvidence(goodEvidence) - assert.Nil(t, err) + assert.Error(t, err) assert.Equal(t, 1, pool.evidenceList.Len()) } @@ -131,10 +132,10 @@ func TestAddEvidence(t *testing.T) { evDescription string }{ {height, time.Now(), false, "valid evidence"}, - {height, evidenceTime, true, "evidence created at 2019-01-01 00:00:00 +0000 UTC has expired"}, - {int64(1), time.Now(), true, "evidence from height 1 is too old"}, + {height, evidenceTime, false, "valid evidence (despite old time)"}, + {int64(1), time.Now(), false, "valid evidence (despite old height)"}, {int64(1), evidenceTime, true, - "evidence from height 1 is too old & evidence created at 2019-01-01 00:00:00 +0000 UTC has expired"}, + "evidence from height 1 (created at: 2019-01-01 00:00:00 +0000 UTC) is too old"}, } for _, tc := range testCases { diff --git a/evidence/reactor.go b/evidence/reactor.go index e4dbd51ad..26343638a 100644 --- a/evidence/reactor.go +++ b/evidence/reactor.go @@ -82,10 +82,18 @@ func (evR *Reactor) Receive(chID byte, src p2p.Peer, msgBytes []byte) { case *ListMessage: for _, ev := range msg.Evidence { err := evR.evpool.AddEvidence(ev) - if err != nil { - evR.Logger.Info("Evidence is not valid", "evidence", msg.Evidence, "err", err) + switch err.(type) { + case ErrInvalidEvidence: + evR.Logger.Error("Evidence is not valid", "evidence", msg.Evidence, "err", err) // punish peer evR.Switch.StopPeerForError(src, err) + return + case ErrEvidenceAlreadyStored: + evR.Logger.Debug("Evidence already exists", "evidence", msg.Evidence) + case nil: + default: + evR.Logger.Error("Evidence has not been added", "evidence", msg.Evidence, "err", err) + return } } default: @@ -186,7 +194,7 @@ func (evR Reactor) checkSendEvidenceMessage( if peerHeight < evHeight { // peer is behind. sleep while he catches up return nil, true - } else if ageNumBlocks > params.MaxAgeNumBlocks || + } else if ageNumBlocks > params.MaxAgeNumBlocks && ageDuration > params.MaxAgeDuration { // evidence is too old, skip // NOTE: if evidence is too old for an honest peer, then we're behind and diff --git a/evidence/reactor_test.go b/evidence/reactor_test.go index b013b7715..135c191da 100644 --- a/evidence/reactor_test.go +++ b/evidence/reactor_test.go @@ -9,12 +9,13 @@ import ( "github.com/go-kit/kit/log/term" "github.com/stretchr/testify/assert" + dbm "github.com/tendermint/tm-db" + cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto/secp256k1" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/p2p" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) // evidenceLogger is a TestingLogger which uses a different diff --git a/evidence/store.go b/evidence/store.go index 3547b5ffc..f01e9de5f 100644 --- a/evidence/store.go +++ b/evidence/store.go @@ -3,8 +3,9 @@ package evidence import ( "fmt" - "github.com/tendermint/tendermint/types" dbm "github.com/tendermint/tm-db" + + "github.com/tendermint/tendermint/types" ) /* @@ -139,16 +140,22 @@ func (store *Store) GetInfo(height int64, hash []byte) Info { return ei } +// Has checks if the evidence is already stored +func (store *Store) Has(evidence types.Evidence) bool { + key := keyLookup(evidence) + ok, _ := store.db.Has(key) + return ok +} + // AddNewEvidence adds the given evidence to the database. // It returns false if the evidence is already stored. -func (store *Store) AddNewEvidence(evidence types.Evidence, priority int64) bool { +func (store *Store) AddNewEvidence(evidence types.Evidence, priority int64) (bool, error) { // check if we already have seen it - ei := store.getInfo(evidence) - if ei.Evidence != nil { - return false + if store.Has(evidence) { + return false, nil } - ei = Info{ + ei := Info{ Committed: false, Priority: priority, Evidence: evidence, @@ -156,16 +163,23 @@ func (store *Store) AddNewEvidence(evidence types.Evidence, priority int64) bool eiBytes := cdc.MustMarshalBinaryBare(ei) // add it to the store + var err error key := keyOutqueue(evidence, priority) - store.db.Set(key, eiBytes) + if err = store.db.Set(key, eiBytes); err != nil { + return false, err + } key = keyPending(evidence) - store.db.Set(key, eiBytes) + if err = store.db.Set(key, eiBytes); err != nil { + return false, err + } key = keyLookup(evidence) - store.db.SetSync(key, eiBytes) + if err = store.db.SetSync(key, eiBytes); err != nil { + return false, err + } - return true + return true, nil } // MarkEvidenceAsBroadcasted removes evidence from Outqueue. diff --git a/evidence/store_test.go b/evidence/store_test.go index b85a6437b..1d45f09a1 100644 --- a/evidence/store_test.go +++ b/evidence/store_test.go @@ -5,8 +5,10 @@ import ( "time" "github.com/stretchr/testify/assert" - "github.com/tendermint/tendermint/types" + "github.com/stretchr/testify/require" dbm "github.com/tendermint/tm-db" + + "github.com/tendermint/tendermint/types" ) //------------------------------------------- @@ -20,11 +22,13 @@ func TestStoreAddDuplicate(t *testing.T) { priority := int64(10) ev := types.NewMockEvidence(2, time.Now().UTC(), 1, []byte("val1")) - added := store.AddNewEvidence(ev, priority) + added, err := store.AddNewEvidence(ev, priority) + require.NoError(t, err) assert.True(added) // cant add twice - added = store.AddNewEvidence(ev, priority) + added, err = store.AddNewEvidence(ev, priority) + require.NoError(t, err) assert.False(added) } @@ -39,7 +43,8 @@ func TestStoreCommitDuplicate(t *testing.T) { store.MarkEvidenceAsCommitted(ev) - added := store.AddNewEvidence(ev, priority) + added, err := store.AddNewEvidence(ev, priority) + require.NoError(t, err) assert.False(added) } @@ -58,7 +63,8 @@ func TestStoreMark(t *testing.T) { priority := int64(10) ev := types.NewMockEvidence(2, time.Now().UTC(), 1, []byte("val1")) - added := store.AddNewEvidence(ev, priority) + added, err := store.AddNewEvidence(ev, priority) + require.NoError(t, err) assert.True(added) // get the evidence. verify. should be uncommitted @@ -115,7 +121,8 @@ func TestStorePriority(t *testing.T) { } for _, c := range cases { - added := store.AddNewEvidence(c.ev, c.priority) + added, err := store.AddNewEvidence(c.ev, c.priority) + require.NoError(t, err) assert.True(added) } diff --git a/go.mod b/go.mod index dfeee6219..aadb452fd 100644 --- a/go.mod +++ b/go.mod @@ -3,33 +3,37 @@ module github.com/tendermint/tendermint go 1.13 require ( - github.com/ChainSafe/go-schnorrkel v0.0.0-20200102211924-4bcbc698314f + github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d github.com/Workiva/go-datastructures v1.0.52 - github.com/btcsuite/btcd v0.0.0-20190115013929-ed77733ec07d - github.com/btcsuite/btcutil v0.0.0-20180706230648-ab6388e0c60a + github.com/btcsuite/btcd v0.20.1-beta + github.com/btcsuite/btcutil v1.0.2 github.com/coniks-sys/coniks-go v0.0.0-20180722014011-11acf4819b71 + github.com/datastream/go-fn v0.0.0-20130403065544-37331e464987 // indirect + github.com/datastream/probab v0.0.0-20150902151906-d47400db423d github.com/fortytw2/leaktest v1.3.0 github.com/go-kit/kit v0.10.0 github.com/go-logfmt/logfmt v0.5.0 github.com/gogo/protobuf v1.3.1 - github.com/golang/protobuf v1.3.4 - github.com/gorilla/websocket v1.4.1 - github.com/gtank/merlin v0.1.1-0.20191105220539-8318aed1a79f + github.com/golang/protobuf v1.4.0 + github.com/gorilla/websocket v1.4.2 + github.com/gtank/merlin v0.1.1 github.com/libp2p/go-buffer-pool v0.0.2 github.com/magiconair/properties v1.8.1 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.5.0 + github.com/prometheus/client_golang v1.5.1 github.com/r2ishiguro/vrf v0.0.0-20180716233122-192de52975eb - github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a + github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 github.com/rs/cors v1.7.0 + github.com/skelterjohn/go.matrix v0.0.0-20130517144113-daa59528eefd // indirect github.com/snikch/goodman v0.0.0-20171125024755-10e37e294daa - github.com/spf13/cobra v0.0.6 - github.com/spf13/viper v1.6.2 + github.com/spf13/cobra v1.0.0 + github.com/spf13/viper v1.6.3 github.com/stretchr/testify v1.5.1 github.com/tendermint/go-amino v0.14.1 - github.com/tendermint/tm-db v0.4.1 + github.com/tendermint/tm-db v0.5.1 github.com/yahoo/coname v0.0.0-20170609175141-84592ddf8673 // indirect - golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 - golang.org/x/net v0.0.0-20191119073136-fc4aabc6c914 - google.golang.org/grpc v1.27.1 + golang.org/x/crypto v0.0.0-20200406173513-056763e48d71 + golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e + google.golang.org/grpc v1.28.1 + gopkg.in/yaml.v3 v3.0.0-20200506231410-2ff61e1afc86 ) diff --git a/go.sum b/go.sum index 5a64c19de..80eda7433 100644 --- a/go.sum +++ b/go.sum @@ -2,8 +2,8 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMT cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/ChainSafe/go-schnorrkel v0.0.0-20200102211924-4bcbc698314f h1:4O1om+UVU+Hfcihr1timk8YNXHxzZWgCo7ofnrZRApw= -github.com/ChainSafe/go-schnorrkel v0.0.0-20200102211924-4bcbc698314f/go.mod h1:URdX5+vg25ts3aCh8H5IFZybJYKWhJHYMTnf+ULtoC4= +github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d h1:nalkkPQcITbvhmL4+C4cKA87NW0tfm3Kl9VXRoPywFg= +github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d/go.mod h1:URdX5+vg25ts3aCh8H5IFZybJYKWhJHYMTnf+ULtoC4= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= @@ -33,11 +33,12 @@ github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+Ce github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/btcsuite/btcd v0.0.0-20190115013929-ed77733ec07d h1:xG8Pj6Y6J760xwETNmMzmlt38QSwz0BLp1cZ09g27uw= -github.com/btcsuite/btcd v0.0.0-20190115013929-ed77733ec07d/go.mod h1:d3C0AkH6BRcvO8T0UEPu53cnw4IbV63x1bEjildYhO0= +github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw= +github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= -github.com/btcsuite/btcutil v0.0.0-20180706230648-ab6388e0c60a h1:RQMUrEILyYJEoAT34XS/kLu40vC0+po/UfxrBBA4qZE= -github.com/btcsuite/btcutil v0.0.0-20180706230648-ab6388e0c60a/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= +github.com/btcsuite/btcutil v1.0.2 h1:9iZ1Terx9fMIOtq1VrwdqfsATL9MC2l8ZrUY6YZ2uts= +github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc= @@ -52,12 +53,14 @@ github.com/cespare/xxhash/v2 v2.1.1 h1:6MnRN8NT7+YBpUIWxHtefFZOKTAPgGjpQSxqLNn0+ github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/coniks-sys/coniks-go v0.0.0-20180722014011-11acf4819b71 h1:MFLTqgfJclmtaQ1SRUrWwmDX/1UBok3XWUethkJ2swQ= github.com/coniks-sys/coniks-go v0.0.0-20180722014011-11acf4819b71/go.mod h1:TrHYHH4Wze7v7Hkwu1MH1W+mCPQKM+gs+PicdEV14o8= github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= @@ -68,6 +71,10 @@ github.com/cosmos/go-bip39 v0.0.0-20180819234021-555e2067c45d/go.mod h1:tSxLoYXy github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/datastream/go-fn v0.0.0-20130403065544-37331e464987 h1:S3JwKvmPJITKLLH7r3WYbBLjXu4lEZU9gFBLj01zaNU= +github.com/datastream/go-fn v0.0.0-20130403065544-37331e464987/go.mod h1:bJl2ftsgvIWNGkufh7xMrXAATJUEdner7/2wCuHmVLI= +github.com/datastream/probab v0.0.0-20150902151906-d47400db423d h1:wnkyVc4CQO5XlqF4RW4+y9qN05xY/frZn0IiT4Gi0qc= +github.com/datastream/probab v0.0.0-20150902151906-d47400db423d/go.mod h1:qktd+m4xKlvhKU9bN9YipjWd79+vYDYb+N85egJl/NM= github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= @@ -80,10 +87,10 @@ github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1 github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/etcd-io/bbolt v1.3.3 h1:gSJmxrs37LgTqR/oyJBWok6k6SvXEUerFTbltIhXkBM= -github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw= github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51 h1:0JZ+dUmQeA8IIVUMzysrX4/AKuQwWhV2dYQuPZdvdSQ= github.com/facebookgo/ensure v0.0.0-20160127193407-b4ab57deab51/go.mod h1:Yg+htXGokKKdzcwhuNDwVvN+uBxDGXJ7G/VN1d8fa64= github.com/facebookgo/stack v0.0.0-20160209184415-751773369052 h1:JWuenKqqX8nojtoVVWjGfOF9635RETekkoH6Cc9SX0A= @@ -115,7 +122,6 @@ github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= github.com/gogo/protobuf v1.3.1 h1:DqDEcV5aeaTmdFBePNpYsp3FlcVH/2ISVVM9Qf8PSls= github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -124,12 +130,18 @@ github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfb github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.4 h1:87PNWwrRvUSnqS4dlcBU/ftvOIBep4sYuBLlh6rX2wk= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0 h1:oOuy+ugB+P/kBdUnG5QaMXSIyJ1q38wWSojYCb3z5VQ= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/golang/snappy v0.0.1 h1:Qgr9rKW7uDUkrbSmQeiDsGa8SjGyCOGtuasMWwvp2P4= github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -147,15 +159,16 @@ github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2z github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.1 h1:q7AeDBpnBk8AogcD4DSag/Ukw/KV+YhzLj2bP5HvKCM= -github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.4.2 h1:+/TMaTYc4QFitKJxsQ7Yye35DkWvkdLcvGKqM+x0Ufc= +github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/gtank/merlin v0.1.1-0.20191105220539-8318aed1a79f h1:8N8XWLZelZNibkhM1FuF+3Ad3YIbgirjdMiVA0eUkaM= github.com/gtank/merlin v0.1.1-0.20191105220539-8318aed1a79f/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= +github.com/gtank/merlin v0.1.1 h1:eQ90iG7K9pOhtereWsmyRJ6RAwcP4tHTDBHXNg+u5is= +github.com/gtank/merlin v0.1.1/go.mod h1:T86dnYJhcGOh5BjZFCJWTDeTK7XW8uE+E21Cy/bIQ+s= github.com/gtank/ristretto255 v0.1.2 h1:JEqUCPA1NvLq5DwYtuzigd7ss8fwbYay9fi4/5uMzcc= github.com/gtank/ristretto255 v0.1.2/go.mod h1:Ph5OpO6c7xKUGROZfWVLiJf9icMDwUeIvY4OmlYW69o= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= @@ -287,8 +300,8 @@ github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= -github.com/prometheus/client_golang v1.5.0 h1:Ctq0iGpCmr3jeP77kbF2UxgvRwzWWz+4Bh9/vJTyg1A= -github.com/prometheus/client_golang v1.5.0/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= +github.com/prometheus/client_golang v1.5.1 h1:bdHYieyGlH+6OLEk2YQha8THib30KP0/yD0YH9m6xcA= +github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -312,8 +325,9 @@ github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+Gx github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= github.com/r2ishiguro/vrf v0.0.0-20180716233122-192de52975eb h1:3kW8n+FfBaUoqlHxCa6e90PXWpGCWWkdyTZ6F7c9m2I= github.com/r2ishiguro/vrf v0.0.0-20180716233122-192de52975eb/go.mod h1:2NzHJUkr/ERaPNQ2IUuNbB2jMTWYp2DxhcraWbzZj00= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a h1:9ZKAASQSHhDYGoxY8uLVpewe1GDZ2vu2Tr/vTdVAkFQ= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0 h1:MkV+77GLUNo5oJ0jf870itWm3D0Sjh7+Za9gazKc5LQ= +github.com/rcrowley/go-metrics v0.0.0-20200313005456-10cdbea86bc0/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik= @@ -325,6 +339,8 @@ github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/skelterjohn/go.matrix v0.0.0-20130517144113-daa59528eefd h1:+ZLYzP9SYC3WU9buyb9H0l9DQxqVFOCkDG8QnNBMAlA= +github.com/skelterjohn/go.matrix v0.0.0-20130517144113-daa59528eefd/go.mod h1:x7ui0Rh4QxcWEOgIfa3cr9q4W/wyLTDdzISxBmLVeX8= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d h1:zE9ykElWQ6/NYmHa3jpm/yHnI4xSofP+UP6SpjHcSeM= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= github.com/smartystreets/goconvey v1.6.4 h1:fv0U8FUIMPNf1L9lnHLvLhgicrIVChEkdzIKYqbNC9s= @@ -339,16 +355,16 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= -github.com/spf13/cobra v0.0.6 h1:breEStsVwemnKh2/s6gMvSdMEkwW0sK8vGStnlVBMCs= -github.com/spf13/cobra v0.0.6/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= +github.com/spf13/cobra v1.0.0 h1:6m/oheQuQ13N9ks4hubMG6BnvwOeaJrqSPLahSnczz8= +github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/spf13/viper v1.6.2 h1:7aKfF+e8/k68gda3LOjo5RxiUqddoFxVq4BKBPrxk5E= -github.com/spf13/viper v1.6.2/go.mod h1:t3iDnF5Jlj76alVNuyFBk5oUMCvsrkbvZK0WQdfDi5k= +github.com/spf13/viper v1.6.3 h1:pDDu1OyEDTKzpJwdq4TiuLyMsUgRa/BT5cn5O62NoHs= +github.com/spf13/viper v1.6.3/go.mod h1:jUMtyi0/lB5yZH/FjyGAoH7IMNrIhlBf6pXZmbMDvzw= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/amqp v0.0.0-20190827072141-edfb9018d271/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= github.com/streadway/handy v0.0.0-20190108123426-d5acb3125c2a/go.mod h1:qNTQ5P5JnDBl6z3cMAg/SywNDC5ABu5ApDIw6lUbRmI= @@ -367,8 +383,8 @@ github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c h1:g+WoO5jjkqGAzH github.com/tecbot/gorocksdb v0.0.0-20191217155057-f0fad39f321c/go.mod h1:ahpPrc7HpcfEWDQRZEmnXMzHY03mLDYMCxeDzy46i+8= github.com/tendermint/go-amino v0.14.1 h1:o2WudxNfdLNBwMyl2dqOJxiro5rfrEaU0Ugs6offJMk= github.com/tendermint/go-amino v0.14.1/go.mod h1:i/UKE5Uocn+argJJBb12qTZsCDBcAYMbR92AaJVmKso= -github.com/tendermint/tm-db v0.4.1 h1:TvX7JWjJOVZ+N3y+I86wddrGttOdMmmBxXcu0/Y7ZJ0= -github.com/tendermint/tm-db v0.4.1/go.mod h1:JsJ6qzYkCGiGwm5GHl/H5GLI9XLb6qZX7PRe425dHAY= +github.com/tendermint/tm-db v0.5.1 h1:H9HDq8UEA7Eeg13kdYckkgwwkQLBnJGgX4PgLJRhieY= +github.com/tendermint/tm-db v0.5.1/go.mod h1:g92zWjHpCYlEvQXvy9M168Su8V1IBEeawpXVVBaK4f4= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= @@ -378,6 +394,7 @@ github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/yahoo/coname v0.0.0-20170609175141-84592ddf8673 h1:PSg2cEFd+9Ae/r5x5iO8cJ3VmTbZNQp6X8tHDmVJAbA= github.com/yahoo/coname v0.0.0-20170609175141-84592ddf8673/go.mod h1:Wq2sZrP++Us4tAw1h58MHS8BGIpC4NmKHfvw2QWBe9U= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= go.etcd.io/bbolt v1.3.3 h1:MUGmc65QhB3pIlaQ5bB4LwqSj6GIonVJXpZiaKNyaKk= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= @@ -400,8 +417,10 @@ golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACk golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413 h1:ULYEB3JvPRE/IfO+9uO7vKV/xzVTO7XPAwm8xbf4w2g= golang.org/x/crypto v0.0.0-20191206172530-e9b2fee46413/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200406173513-056763e48d71 h1:DOmugCavvUtnUD114C1Wh+UgTgQZ4pMLzXxi1pSt+/Y= +golang.org/x/crypto v0.0.0-20200406173513-056763e48d71/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= @@ -410,6 +429,8 @@ golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -428,8 +449,9 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191119073136-fc4aabc6c914 h1:MlY3mEfbnWGmUi4rtHOtNnnnN4UJRGSyLPx+DXA5Sq4= -golang.org/x/net v0.0.0-20191119073136-fc4aabc6c914/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -453,8 +475,9 @@ golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200122134326-e047566fdf82 h1:ywK/j/KkyTHcdyYSZNXGjMwgmDSfjglYZ3vStQ/gSCU= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= @@ -474,7 +497,10 @@ golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBn golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114 h1:DnSr2mCsxyCE6ZgIkmcWUQY2R5cH/6wL7eIxEmQOMSE= golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200519205726-57a9e4404bf7/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543 h1:E7g+9GITq07hpfrRu66IVDexMakfv52eLZ2CXBWiKr4= @@ -497,9 +523,17 @@ google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= -google.golang.org/grpc v1.27.1 h1:zvIju4sqAGvwKspUQOhwnpcqSbzi7/H6QomNNjTL4sk= -google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.28.1 h1:C1QC6KzgSiLyBabDi87BbjaGreoRgGUF5nOyvfrAZ1k= +google.golang.org/grpc v1.28.1/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0 h1:qdOKuR/EIArgaWNjetjgTzgVTAZ+S/WXVrq9HW9zimw= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= @@ -522,6 +556,8 @@ gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.5 h1:ymVxjfMaHvXD8RqPRmzHHsB3VvucivSkIAvJFDI5O3c= gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v3 v3.0.0-20200506231410-2ff61e1afc86 h1:OfFoIUYv/me30yv7XlMy4F9RJw8DEm8WQ6QG1Ph4bH0= +gopkg.in/yaml.v3 v3.0.0-20200506231410-2ff61e1afc86/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/libs/cli/helper.go b/libs/cli/helper.go index 6bf23750c..a5014c16c 100644 --- a/libs/cli/helper.go +++ b/libs/cli/helper.go @@ -7,6 +7,8 @@ import ( "io/ioutil" "os" "path/filepath" + + "github.com/spf13/cobra" ) // WriteConfigVals writes a toml file with the given values. @@ -85,3 +87,42 @@ func RunCaptureWithArgs(cmd Executable, args []string, env map[string]string) (s stderr = <-*errC return stdout, stderr, err } + +// NewCompletionCmd returns a cobra.Command that generates bash and zsh +// completion scripts for the given root command. If hidden is true, the +// command will not show up in the root command's list of available commands. +func NewCompletionCmd(rootCmd *cobra.Command, hidden bool) *cobra.Command { + flagZsh := "zsh" + cmd := &cobra.Command{ + Use: "completion", + Short: "Generate shell completion scripts", + Long: fmt.Sprintf(`Generate Bash and Zsh completion scripts and print them to STDOUT. + +Once saved to file, a completion script can be loaded in the shell's +current session as shown: + + $ . <(%s completion) + +To configure your bash shell to load completions for each session add to +your $HOME/.bashrc or $HOME/.profile the following instruction: + + . <(%s completion) +`, rootCmd.Use, rootCmd.Use), + RunE: func(cmd *cobra.Command, _ []string) error { + zsh, err := cmd.Flags().GetBool(flagZsh) + if err != nil { + return err + } + if zsh { + return rootCmd.GenZshCompletion(cmd.OutOrStdout()) + } + return rootCmd.GenBashCompletion(cmd.OutOrStdout()) + }, + Hidden: hidden, + Args: cobra.NoArgs, + } + + cmd.Flags().Bool(flagZsh, false, "Generate Zsh completion script") + + return cmd +} diff --git a/libs/clist/clist_test.go b/libs/clist/clist_test.go index 1e25946ac..14b7e37c0 100644 --- a/libs/clist/clist_test.go +++ b/libs/clist/clist_test.go @@ -8,6 +8,7 @@ import ( "time" "github.com/stretchr/testify/assert" + tmrand "github.com/tendermint/tendermint/libs/rand" ) diff --git a/libs/kv/kvpair.go b/libs/kv/kvpair.go index 8eebae606..2474b2e47 100644 --- a/libs/kv/kvpair.go +++ b/libs/kv/kvpair.go @@ -35,4 +35,3 @@ func (kvs Pairs) Less(i, j int) bool { } func (kvs Pairs) Swap(i, j int) { kvs[i], kvs[j] = kvs[j], kvs[i] } func (kvs Pairs) Sort() { sort.Sort(kvs) } - diff --git a/libs/log/tmfmt_logger_test.go b/libs/log/tmfmt_logger_test.go index d6f039ce4..86d0e5693 100644 --- a/libs/log/tmfmt_logger_test.go +++ b/libs/log/tmfmt_logger_test.go @@ -10,6 +10,7 @@ import ( kitlog "github.com/go-kit/kit/log" "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/libs/log" ) diff --git a/libs/log/tracing_logger_test.go b/libs/log/tracing_logger_test.go index b40d2b9e0..354476755 100644 --- a/libs/log/tracing_logger_test.go +++ b/libs/log/tracing_logger_test.go @@ -8,6 +8,7 @@ import ( "testing" "github.com/pkg/errors" + "github.com/tendermint/tendermint/libs/log" ) diff --git a/libs/pubsub/pubsub.go b/libs/pubsub/pubsub.go index 325403cd8..2a89e7591 100644 --- a/libs/pubsub/pubsub.go +++ b/libs/pubsub/pubsub.go @@ -39,6 +39,7 @@ import ( "sync" "github.com/pkg/errors" + "github.com/tendermint/tendermint/libs/service" ) diff --git a/libs/rand/sampling.go b/libs/rand/sampling.go index 8ba7b4518..2f4000259 100644 --- a/libs/rand/sampling.go +++ b/libs/rand/sampling.go @@ -2,6 +2,8 @@ package rand import ( "fmt" + "math" + "math/big" s "sort" ) @@ -9,10 +11,9 @@ import ( type Candidate interface { Priority() uint64 LessThan(other Candidate) bool + SetWinPoint(winPoint int64) } -const uint64Mask = uint64(0x7FFFFFFFFFFFFFFF) - // Select a specified number of candidates randomly from the candidate set based on each priority. This function is // deterministic and will produce the same result for the same input. // @@ -32,7 +33,7 @@ func RandomSamplingWithPriority( thresholds := make([]uint64, sampleSize) for i := 0; i < sampleSize; i++ { // calculating [gross weights] × [(0,1] random number] - thresholds[i] = uint64(float64(nextRandom(&seed)&uint64Mask) / float64(uint64Mask+1) * float64(totalPriority)) + thresholds[i] = randomThreshold(&seed, totalPriority) } s.Slice(thresholds, func(i, j int) bool { return thresholds[i] < thresholds[j] }) @@ -65,6 +66,105 @@ func RandomSamplingWithPriority( totalPriority, actualTotalPriority, seed, sampleSize, undrawn, undrawn, thresholds[undrawn], len(candidates))) } +func moveWinnerToLast(candidates []Candidate, winner int) { + winnerCandidate := candidates[winner] + copy(candidates[winner:], candidates[winner+1:]) + candidates[len(candidates)-1] = winnerCandidate +} + +const uint64Mask = uint64(0x7FFFFFFFFFFFFFFF) + +var divider *big.Int + +func init() { + divider = big.NewInt(int64(uint64Mask)) + divider.Add(divider, big.NewInt(1)) +} + +func randomThreshold(seed *uint64, total uint64) uint64 { + if int64(total) < 0 { + panic(fmt.Sprintf("total priority is overflow: %d", total)) + } + totalBig := big.NewInt(int64(total)) + a := big.NewInt(int64(nextRandom(seed) & uint64Mask)) + a.Mul(a, totalBig) + a.Div(a, divider) + return a.Uint64() +} + +// `RandomSamplingWithoutReplacement` elects winners among candidates without replacement +// so it updates rewards of winners. This function continues to elect winners until the both of two +// conditions(minSamplingCount, minPriorityPercent) are met. +func RandomSamplingWithoutReplacement( + seed uint64, candidates []Candidate, minSamplingCount int) (winners []Candidate) { + + if len(candidates) < minSamplingCount { + panic(fmt.Sprintf("The number of candidates(%d) cannot be less minSamplingCount %d", + len(candidates), minSamplingCount)) + } + + totalPriority := sumTotalPriority(candidates) + candidates = sort(candidates) + winnersPriority := uint64(0) + losersPriorities := make([]uint64, len(candidates)) + winnerNum := 0 + for winnerNum < minSamplingCount { + if totalPriority-winnersPriority == 0 { + // it's possible if some candidates have zero priority + // if then, we can't elect voter any more; we should holt electing not to fall in infinity loop + break + } + threshold := randomThreshold(&seed, totalPriority-winnersPriority) + cumulativePriority := uint64(0) + found := false + for i, candidate := range candidates[:len(candidates)-winnerNum] { + if threshold < cumulativePriority+candidate.Priority() { + moveWinnerToLast(candidates, i) + winnersPriority += candidate.Priority() + losersPriorities[winnerNum] = totalPriority - winnersPriority + winnerNum++ + found = true + break + } + cumulativePriority += candidate.Priority() + } + + if !found { + panic(fmt.Sprintf("Cannot find random sample. winnerNum=%d, minSamplingCount=%d, "+ + "winnersPriority=%d, totalPriority=%d, threshold=%d", + winnerNum, minSamplingCount, winnersPriority, totalPriority, threshold)) + } + } + compensationProportions := make([]float64, winnerNum) + for i := winnerNum - 2; i >= 0; i-- { // last winner doesn't get compensation reward + compensationProportions[i] = compensationProportions[i+1] + 1/float64(losersPriorities[i]) + } + winners = candidates[len(candidates)-winnerNum:] + winPoints := make([]float64, len(winners)) + totalWinPoint := float64(0) + for i, winner := range winners { + winPoints[i] = 1 + float64(winner.Priority())*compensationProportions[i] + totalWinPoint += winPoints[i] + } + for i, winner := range winners { + if winPoints[i] > math.MaxInt64 || winPoints[i] < 0 { + panic(fmt.Sprintf("winPoint is invalid: %f", winPoints[i])) + } + winner.SetWinPoint(int64(float64(totalPriority) * winPoints[i] / totalWinPoint)) + } + return winners +} + +func sumTotalPriority(candidates []Candidate) (sum uint64) { + for _, candi := range candidates { + sum += candi.Priority() + } + if sum == 0 { + panic("all candidates have zero priority") + } + return +} + // SplitMix64 // http://xoshiro.di.unimi.it/splitmix64.c // diff --git a/libs/rand/sampling_test.go b/libs/rand/sampling_test.go index b090f75ab..1783b5801 100644 --- a/libs/rand/sampling_test.go +++ b/libs/rand/sampling_test.go @@ -3,17 +3,21 @@ package rand import ( "fmt" "math" + "math/rand" s "sort" "testing" + + "github.com/stretchr/testify/assert" ) type Element struct { - ID uint32 - Weight uint64 + id uint32 + winPoint int64 + weight uint64 } func (e *Element) Priority() uint64 { - return e.Weight + return e.weight } func (e *Element) LessThan(other Candidate) bool { @@ -21,7 +25,11 @@ func (e *Element) LessThan(other Candidate) bool { if !ok { panic("incompatible type") } - return e.ID < o.ID + return e.id < o.id +} + +func (e *Element) SetWinPoint(winPoint int64) { + e.winPoint += winPoint } func TestRandomSamplingWithPriority(t *testing.T) { @@ -47,7 +55,7 @@ func TestRandomSamplingWithPriority(t *testing.T) { for i := 0; i < 100000; i++ { elected = RandomSamplingWithPriority(uint64(i), candidates, 10, uint64(len(candidates))) for _, e := range elected { - counts[e.(*Element).ID]++ + counts[e.(*Element).id]++ } } expected := float64(1) / float64(100) @@ -84,10 +92,303 @@ func TestRandomSamplingPanicCase(t *testing.T) { } } +func resetWinPoint(candidate []Candidate) { + for _, c := range candidate { + c.(*Element).winPoint = 0 + } +} + +func TestRandomSamplingWithoutReplacement1Candidate(t *testing.T) { + candidates := newCandidates(1, func(i int) uint64 { return uint64(1000 * (i + 1)) }) + + winners := RandomSamplingWithoutReplacement(0, candidates, 1) + assert.True(t, len(winners) == 1) + assert.True(t, candidates[0] == winners[0]) + assert.True(t, winners[0].(*Element).winPoint == 1000) + resetWinPoint(candidates) + + winners2 := RandomSamplingWithoutReplacement(0, candidates, 0) + assert.True(t, len(winners2) == 0) + resetWinPoint(candidates) + + winners4 := RandomSamplingWithoutReplacement(0, candidates, 0) + assert.True(t, len(winners4) == 0) + resetWinPoint(candidates) +} + +// test samplingThreshold +func TestRandomSamplingWithoutReplacementSamplingThreshold(t *testing.T) { + candidates := newCandidates(100, func(i int) uint64 { return uint64(1000 * (i + 1)) }) + + for i := 1; i <= 100; i++ { + winners := RandomSamplingWithoutReplacement(0, candidates, i) + assert.True(t, len(winners) == i) + resetWinPoint(candidates) + } +} + +// test downscale of win point cases +func TestRandomSamplingWithoutReplacementDownscale(t *testing.T) { + candidates := newCandidates(10, func(i int) uint64 { + if i == 0 { + return math.MaxInt64 >> 1 + } + if i == 1 { + return 1 << 55 + } + if i == 3 { + return 1 << 54 + } + if i == 4 { + return 1 << 53 + } + return uint64(i) + }) + RandomSamplingWithoutReplacement(0, candidates, 5) +} + +// test random election should be deterministic +func TestRandomSamplingWithoutReplacementDeterministic(t *testing.T) { + candidates1 := newCandidates(100, func(i int) uint64 { return uint64(i + 1) }) + candidates2 := newCandidates(100, func(i int) uint64 { return uint64(i + 1) }) + for i := 1; i <= 100; i++ { + winners1 := RandomSamplingWithoutReplacement(uint64(i), candidates1, 50) + winners2 := RandomSamplingWithoutReplacement(uint64(i), candidates2, 50) + sameCandidates(winners1, winners2) + resetWinPoint(candidates1) + resetWinPoint(candidates2) + } +} + +func TestRandomSamplingWithoutReplacementIncludingZeroStakingPower(t *testing.T) { + // first candidate's priority is 0 + candidates1 := newCandidates(100, func(i int) uint64 { return uint64(i) }) + winners1 := RandomSamplingWithoutReplacement(0, candidates1, 100) + assert.True(t, len(winners1) == 99) + + candidates2 := newCandidates(100, func(i int) uint64 { + if i < 10 { + return 0 + } + return uint64(i) + }) + winners2 := RandomSamplingWithoutReplacement(0, candidates2, 95) + assert.True(t, len(winners2) == 90) +} + +func accumulateAndResetReward(candidate []Candidate, acc []uint64) { + for i, c := range candidate { + acc[i] += uint64(c.(*Element).winPoint) + c.(*Element).winPoint = 0 + } +} + +func TestDivider(t *testing.T) { + assert.True(t, divider.Uint64() == uint64Mask+1) +} + +func TestRandomThreshold(t *testing.T) { + loopCount := 100000 + + // randomThreshold() should not return a value greater than total. + for i := 0; i < loopCount; i++ { + seed := rand.Uint64() + total := rand.Int63() + random := randomThreshold(&seed, uint64(total)) + assert.True(t, random < uint64(total)) + } + + // test randomness + total := math.MaxInt64 + bitHit := make([]int, 63) + for i := 0; i < loopCount; i++ { + seed := rand.Uint64() + random := randomThreshold(&seed, uint64(total)) + for j := 0; j < 63; j++ { + if random&(1< 0 { + bitHit[j]++ + } + } + } + // all bit hit count should be near at loopCount/2 + for i := 0; i < len(bitHit); i++ { + assert.True(t, math.Abs(float64(bitHit[i])-float64(loopCount/2))/float64(loopCount/2) < 0.01) + } + + // verify idempotence + expect := [][]uint64{ + {7070836379803831726, 3176749709313725329, 6607573645926202312, 3491641484182981082, 3795411888399561855}, + {1227844342346046656, 2900311180284727168, 8193302169476290588, 2343329048962716018, 6435608444680946564}, + {1682153688901572301, 5713119979229610871, 1690050691353843586, 6615539178087966730, 965357176598405746}, + {2092789425003139052, 7803713333738082738, 391680292209432075, 3242280302033391430, 2071067388247806529}, + {7958955049054603977, 5770386275058218277, 6648532499409218539, 5505026356475271777, 3466385424369377032}} + for i := 0; i < len(expect); i++ { + seed := uint64(i) + for j := 0; j < len(expect[i]); j++ { + seed = randomThreshold(&seed, uint64(total)) + assert.True(t, seed == expect[i][j]) + } + } +} + +// test reward fairness +func TestRandomSamplingWithoutReplacementReward(t *testing.T) { + candidates := newCandidates(100, func(i int) uint64 { return uint64(i + 1) }) + + accumulatedRewards := make([]uint64, 100) + for i := 0; i < 100000; i++ { + // 25 samplingThreshold is minimum to pass this test + // If samplingThreshold is less than 25, the result says the reward is not fair + RandomSamplingWithoutReplacement(uint64(i), candidates, 25) + accumulateAndResetReward(candidates, accumulatedRewards) + } + for i := 0; i < 99; i++ { + assert.True(t, accumulatedRewards[i] < accumulatedRewards[i+1]) + } + + accumulatedRewards = make([]uint64, 100) + for i := 0; i < 50000; i++ { + RandomSamplingWithoutReplacement(uint64(i), candidates, 50) + accumulateAndResetReward(candidates, accumulatedRewards) + } + for i := 0; i < 99; i++ { + assert.True(t, accumulatedRewards[i] < accumulatedRewards[i+1]) + } + + accumulatedRewards = make([]uint64, 100) + for i := 0; i < 10000; i++ { + RandomSamplingWithoutReplacement(uint64(i), candidates, 100) + accumulateAndResetReward(candidates, accumulatedRewards) + } + for i := 0; i < 99; i++ { + assert.True(t, accumulatedRewards[i] < accumulatedRewards[i+1]) + } +} + +/** +conditions for fair reward +1. even staking power(less difference between min staking and max staking) +2. large total staking(a small total staking power makes a large error when converting float into int) +3. many sampling count +4. loop count +*/ +func TestRandomSamplingWithoutReplacementEquity(t *testing.T) { + loopCount := 10000 + + // good condition + candidates := newCandidates(100, func(i int) uint64 { return 1000000 + rand.Uint64()&0xFFFFF }) + accumulatedRewards := make([]uint64, 100) + for i := 0; i < loopCount; i++ { + RandomSamplingWithoutReplacement(uint64(i), candidates, 99) + accumulateAndResetReward(candidates, accumulatedRewards) + } + for i := 0; i < 99; i++ { + rewardPerStakingDiff := + math.Abs(float64(accumulatedRewards[i])/float64(candidates[i].Priority())/float64(loopCount) - 1) + assert.True(t, rewardPerStakingDiff < 0.01) + } + + // ======================================================================================================= + // The codes below are not test codes to verify logic, + // but codes to find out what parameters are that weaken the equity of rewards. + + // violation of condition 1 + candidates = newCandidates(100, func(i int) uint64 { return rand.Uint64() & 0xFFFFFFFFF }) + accumulatedRewards = make([]uint64, 100) + for i := 0; i < loopCount; i++ { + RandomSamplingWithoutReplacement(uint64(i), candidates, 99) + accumulateAndResetReward(candidates, accumulatedRewards) + } + maxRewardPerStakingDiff := float64(0) + for i := 0; i < 99; i++ { + rewardPerStakingDiff := + math.Abs(float64(accumulatedRewards[i])/float64(candidates[i].Priority())/float64(loopCount) - 1) + if maxRewardPerStakingDiff < rewardPerStakingDiff { + maxRewardPerStakingDiff = rewardPerStakingDiff + } + } + t.Logf("[! condition 1] max reward per staking difference: %f", maxRewardPerStakingDiff) + + // violation of condition 2 + candidates = newCandidates(100, func(i int) uint64 { return rand.Uint64() & 0xFFFFF }) + accumulatedRewards = make([]uint64, 100) + for i := 0; i < loopCount; i++ { + RandomSamplingWithoutReplacement(uint64(i), candidates, 99) + accumulateAndResetReward(candidates, accumulatedRewards) + } + maxRewardPerStakingDiff = float64(0) + for i := 0; i < 99; i++ { + rewardPerStakingDiff := + math.Abs(float64(accumulatedRewards[i])/float64(candidates[i].Priority())/float64(loopCount) - 1) + if maxRewardPerStakingDiff < rewardPerStakingDiff { + maxRewardPerStakingDiff = rewardPerStakingDiff + } + } + t.Logf("[! condition 2] max reward per staking difference: %f", maxRewardPerStakingDiff) + + // violation of condition 3 + candidates = newCandidates(100, func(i int) uint64 { return 1000000 + rand.Uint64()&0xFFFFF }) + accumulatedRewards = make([]uint64, 100) + for i := 0; i < loopCount; i++ { + RandomSamplingWithoutReplacement(uint64(i), candidates, 10) + accumulateAndResetReward(candidates, accumulatedRewards) + } + maxRewardPerStakingDiff = float64(0) + for i := 0; i < 99; i++ { + rewardPerStakingDiff := + math.Abs(float64(accumulatedRewards[i])/float64(candidates[i].Priority())/float64(loopCount) - 1) + if maxRewardPerStakingDiff < rewardPerStakingDiff { + maxRewardPerStakingDiff = rewardPerStakingDiff + } + } + t.Logf("[! condition 3] max reward per staking difference: %f", maxRewardPerStakingDiff) + + // violation of condition 4 + loopCount = 100 + candidates = newCandidates(100, func(i int) uint64 { return 1000000 + rand.Uint64()&0xFFFFF }) + accumulatedRewards = make([]uint64, 100) + for i := 0; i < loopCount; i++ { + RandomSamplingWithoutReplacement(uint64(i), candidates, 99) + accumulateAndResetReward(candidates, accumulatedRewards) + } + maxRewardPerStakingDiff = float64(0) + for i := 0; i < 99; i++ { + rewardPerStakingDiff := + math.Abs(float64(accumulatedRewards[i])/float64(candidates[i].Priority())/float64(loopCount) - 1) + if maxRewardPerStakingDiff < rewardPerStakingDiff { + maxRewardPerStakingDiff = rewardPerStakingDiff + } + } + t.Logf("[! condition 4] max reward per staking difference: %f", maxRewardPerStakingDiff) +} + +func TestRandomSamplingWithoutReplacementPanic(t *testing.T) { + type Case struct { + Candidates []Candidate + SamplingThreshold int + } + + cases := [...]*Case{ + // samplingThreshold is greater than the number of candidates + {newCandidates(9, func(i int) uint64 { return 10 }), 10}, + } + + for i, c := range cases { + func() { + defer func() { + if recover() == nil { + t.Errorf("expected panic didn't happen in case %d", i+1) + } + }() + RandomSamplingWithoutReplacement(0, c.Candidates, c.SamplingThreshold) + }() + } +} + func newCandidates(length int, prio func(int) uint64) (candidates []Candidate) { candidates = make([]Candidate, length) for i := 0; i < length; i++ { - candidates[i] = &Element{uint32(i), prio(i)} + candidates[i] = &Element{uint32(i), 0, prio(i)} } return } @@ -99,7 +400,10 @@ func sameCandidates(c1 []Candidate, c2 []Candidate) bool { s.Slice(c1, func(i, j int) bool { return c1[i].LessThan(c1[j]) }) s.Slice(c2, func(i, j int) bool { return c2[i].LessThan(c2[j]) }) for i := 0; i < len(c1); i++ { - if c1[i].(*Element).ID != c2[i].(*Element).ID { + if c1[i].(*Element).id != c2[i].(*Element).id { + return false + } + if c1[i].(*Element).winPoint != c2[i].(*Element).winPoint { return false } } diff --git a/libs/service/service.go b/libs/service/service.go index 9b3f36fff..f8358213b 100644 --- a/libs/service/service.go +++ b/libs/service/service.go @@ -130,12 +130,13 @@ func (bs *BaseService) SetLogger(l log.Logger) { func (bs *BaseService) Start() error { if atomic.CompareAndSwapUint32(&bs.started, 0, 1) { if atomic.LoadUint32(&bs.stopped) == 1 { - bs.Logger.Error(fmt.Sprintf("Not starting %v -- already stopped", bs.name), "impl", bs.impl) + bs.Logger.Error(fmt.Sprintf("Not starting %v service -- already stopped", bs.name), + "impl", bs.impl) // revert flag atomic.StoreUint32(&bs.started, 0) return ErrAlreadyStopped } - bs.Logger.Info(fmt.Sprintf("Starting %v", bs.name), "impl", bs.impl) + bs.Logger.Info(fmt.Sprintf("Starting %v service", bs.name), "impl", bs.impl) err := bs.impl.OnStart() if err != nil { // revert flag @@ -144,7 +145,7 @@ func (bs *BaseService) Start() error { } return nil } - bs.Logger.Debug(fmt.Sprintf("Not starting %v -- already started", bs.name), "impl", bs.impl) + bs.Logger.Debug(fmt.Sprintf("Not starting %v service -- already started", bs.name), "impl", bs.impl) return ErrAlreadyStarted } @@ -158,17 +159,18 @@ func (bs *BaseService) OnStart() error { return nil } func (bs *BaseService) Stop() error { if atomic.CompareAndSwapUint32(&bs.stopped, 0, 1) { if atomic.LoadUint32(&bs.started) == 0 { - bs.Logger.Error(fmt.Sprintf("Not stopping %v -- have not been started yet", bs.name), "impl", bs.impl) + bs.Logger.Error(fmt.Sprintf("Not stopping %v service -- has not been started yet", bs.name), + "impl", bs.impl) // revert flag atomic.StoreUint32(&bs.stopped, 0) return ErrNotStarted } - bs.Logger.Info(fmt.Sprintf("Stopping %v", bs.name), "impl", bs.impl) + bs.Logger.Info(fmt.Sprintf("Stopping %v service", bs.name), "impl", bs.impl) bs.impl.OnStop() close(bs.quit) return nil } - bs.Logger.Debug(fmt.Sprintf("Stopping %v (ignoring: already stopped)", bs.name), "impl", bs.impl) + bs.Logger.Debug(fmt.Sprintf("Stopping %v service (already stopped)", bs.name), "impl", bs.impl) return ErrAlreadyStopped } @@ -181,7 +183,7 @@ func (bs *BaseService) OnStop() {} // will be returned if the service is running. func (bs *BaseService) Reset() error { if !atomic.CompareAndSwapUint32(&bs.stopped, 1, 0) { - bs.Logger.Debug(fmt.Sprintf("Can't reset %v. Not stopped", bs.name), "impl", bs.impl) + bs.Logger.Debug(fmt.Sprintf("Can't reset %v service. Not stopped", bs.name), "impl", bs.impl) return fmt.Errorf("can't reset running %s", bs.name) } diff --git a/lite/base_verifier.go b/lite/base_verifier.go index 6a2a50ab5..b7fb4cb4d 100644 --- a/lite/base_verifier.go +++ b/lite/base_verifier.go @@ -12,26 +12,26 @@ import ( var _ Verifier = (*BaseVerifier)(nil) // BaseVerifier lets us check the validity of SignedHeaders at height or -// later, requiring sufficient votes (> 2/3) from the given valset. +// later, requiring sufficient votes (> 2/3) from the given voterSet. // To verify blocks produced by a blockchain with mutable validator sets, // use the DynamicVerifier. // TODO: Handle unbonding time. type BaseVerifier struct { - chainID string - height int64 - valset *types.ValidatorSet + chainID string + height int64 + voterSet *types.VoterSet } // NewBaseVerifier returns a new Verifier initialized with a validator set at // some height. -func NewBaseVerifier(chainID string, height int64, valset *types.ValidatorSet) *BaseVerifier { +func NewBaseVerifier(chainID string, height int64, valset *types.VoterSet) *BaseVerifier { if valset.IsNilOrEmpty() { - panic("NewBaseVerifier requires a valid valset") + panic("NewBaseVerifier requires a valid voterSet") } return &BaseVerifier{ - chainID: chainID, - height: height, - valset: valset, + chainID: chainID, + height: height, + voterSet: valset, } } @@ -56,9 +56,9 @@ func (bv *BaseVerifier) Verify(signedHeader types.SignedHeader) error { } // We can't verify with the wrong validator set. - if !bytes.Equal(signedHeader.ValidatorsHash, - bv.valset.Hash()) { - return lerr.ErrUnexpectedValidators(signedHeader.ValidatorsHash, bv.valset.Hash()) + if !bytes.Equal(signedHeader.VotersHash, + bv.voterSet.Hash()) { + return lerr.ErrUnexpectedValidators(signedHeader.VotersHash, bv.voterSet.Hash()) } // Do basic sanity checks. @@ -68,7 +68,7 @@ func (bv *BaseVerifier) Verify(signedHeader types.SignedHeader) error { } // Check commit signatures. - err = bv.valset.VerifyCommit( + err = bv.voterSet.VerifyCommit( bv.chainID, signedHeader.Commit.BlockID, signedHeader.Height, signedHeader.Commit) if err != nil { diff --git a/lite/base_verifier_test.go b/lite/base_verifier_test.go index 2ef1203fb..f52c3df7d 100644 --- a/lite/base_verifier_test.go +++ b/lite/base_verifier_test.go @@ -14,14 +14,14 @@ func TestBaseCert(t *testing.T) { keys := genPrivKeys(4) // 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do! - vals := keys.ToValidators(20, 10) + vals := types.ToVoterAll(keys.ToValidators(20, 10).Validators) // and a Verifier based on our known set chainID := "test-static" cert := NewBaseVerifier(chainID, 2, vals) cases := []struct { keys privKeys - vals *types.ValidatorSet + vals *types.VoterSet height int64 first, last int // who actually signs proper bool // true -> expect no error @@ -37,7 +37,8 @@ func TestBaseCert(t *testing.T) { {keys, vals, 4, 0, len(keys) - 1, false, false}, // Changing the power a little bit breaks the static validator. // The sigs are enough, but the validator hash is unknown. - {keys, keys.ToValidators(20, 11), 5, 0, len(keys), false, true}, + {keys, types.ToVoterAll(keys.ToValidators(20, 11).Validators), + 5, 0, len(keys), false, true}, } for _, tc := range cases { diff --git a/lite/client/provider.go b/lite/client/provider.go index e24dbe0e4..5122eafa6 100644 --- a/lite/client/provider.go +++ b/lite/client/provider.go @@ -1,5 +1,5 @@ /* -Package client defines a provider that uses a rpcclient +Package client defines a provider that uses a rpchttp to get information, which is used to get new headers and validators directly from a Tendermint client. */ @@ -12,6 +12,7 @@ import ( "github.com/tendermint/tendermint/lite" lerr "github.com/tendermint/tendermint/lite/errors" rpcclient "github.com/tendermint/tendermint/rpc/client" + rpchttp "github.com/tendermint/tendermint/rpc/client/http" ctypes "github.com/tendermint/tendermint/rpc/core/types" "github.com/tendermint/tendermint/types" ) @@ -40,7 +41,7 @@ func NewProvider(chainID string, client SignStatusClient) lite.Provider { // NewHTTPProvider can connect to a tendermint json-rpc endpoint // at the given url, and uses that as a read-only provider. func NewHTTPProvider(chainID, remote string) (lite.Provider, error) { - httpClient, err := rpcclient.NewHTTP(remote, "/websocket") + httpClient, err := rpchttp.New(remote, "/websocket") if err != nil { return nil, err } @@ -97,11 +98,11 @@ func (p *provider) fetchLatestCommit(minHeight int64, maxHeight int64) (*ctypes. } // Implements Provider. -func (p *provider) ValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { - return p.getValidatorSet(chainID, height) +func (p *provider) VoterSet(chainID string, height int64) (valset *types.VoterSet, err error) { + return p.getVoterSet(chainID, height) } -func (p *provider) getValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { +func (p *provider) getVoterSet(chainID string, height int64) (valset *types.VoterSet, err error) { if chainID != p.chainID { err = fmt.Errorf("expected chainID %s, got %s", p.chainID, chainID) return @@ -110,12 +111,15 @@ func (p *provider) getValidatorSet(chainID string, height int64) (valset *types. err = fmt.Errorf("expected height >= 1, got height %v", height) return } - res, err := p.client.Validators(&height, 0, 0) + + var res *ctypes.ResultVoters + res, err = p.client.Voters(&height, 0, 0) + if err != nil { // TODO pass through other types of errors. return nil, lerr.ErrUnknownValidators(chainID, height) } - valset = types.NewValidatorSet(res.Validators) + valset = types.WrapValidatorsToVoterSet(res.Voters) return } @@ -123,13 +127,13 @@ func (p *provider) getValidatorSet(chainID string, height int64) (valset *types. func (p *provider) fillFullCommit(signedHeader types.SignedHeader) (fc lite.FullCommit, err error) { // Get the validators. - valset, err := p.getValidatorSet(signedHeader.ChainID, signedHeader.Height) + valset, err := p.getVoterSet(signedHeader.ChainID, signedHeader.Height) if err != nil { return lite.FullCommit{}, err } // Get the next validators. - nextValset, err := p.getValidatorSet(signedHeader.ChainID, signedHeader.Height+1) + nextValset, err := p.getVoterSet(signedHeader.ChainID, signedHeader.Height+1) if err != nil { return lite.FullCommit{}, err } diff --git a/lite/commit.go b/lite/commit.go index 6cd354173..e808a0d0f 100644 --- a/lite/commit.go +++ b/lite/commit.go @@ -14,48 +14,48 @@ import ( // revert to block-by-block updating of lite Verifier's latest validator set, // even in the face of arbitrarily large power changes. type FullCommit struct { - SignedHeader types.SignedHeader `json:"signed_header"` - Validators *types.ValidatorSet `json:"validator_set"` - NextValidators *types.ValidatorSet `json:"next_validator_set"` + SignedHeader types.SignedHeader `json:"signed_header"` + Voters *types.VoterSet `json:"voter_set"` + NextVoters *types.VoterSet `json:"next_validator_set"` } // NewFullCommit returns a new FullCommit. -func NewFullCommit(signedHeader types.SignedHeader, valset, nextValset *types.ValidatorSet) FullCommit { +func NewFullCommit(signedHeader types.SignedHeader, voterSet, nextVoterSet *types.VoterSet) FullCommit { return FullCommit{ - SignedHeader: signedHeader, - Validators: valset, - NextValidators: nextValset, + SignedHeader: signedHeader, + Voters: voterSet, + NextVoters: nextVoterSet, } } // Validate the components and check for consistency. -// This also checks to make sure that Validators actually +// This also checks to make sure that Voters actually // signed the SignedHeader.Commit. -// If > 2/3 did not sign the Commit from fc.Validators, it +// If > 2/3 did not sign the Commit from fc.Voters, it // is not a valid commit! func (fc FullCommit) ValidateFull(chainID string) error { // Ensure that Validators exists and matches the header. - if fc.Validators.Size() == 0 { - return errors.New("need FullCommit.Validators") + if fc.Voters.Size() == 0 { + return errors.New("need FullCommit.Voters") } if !bytes.Equal( - fc.SignedHeader.ValidatorsHash, - fc.Validators.Hash()) { - return fmt.Errorf("header has vhash %X but valset hash is %X", - fc.SignedHeader.ValidatorsHash, - fc.Validators.Hash(), + fc.SignedHeader.VotersHash, + fc.Voters.Hash()) { + return fmt.Errorf("header has vhash %X but voterSet hash is %X", + fc.SignedHeader.VotersHash, + fc.Voters.Hash(), ) } // Ensure that NextValidators exists and matches the header. - if fc.NextValidators.Size() == 0 { + if fc.NextVoters.Size() == 0 { return errors.New("need FullCommit.NextValidators") } if !bytes.Equal( - fc.SignedHeader.NextValidatorsHash, - fc.NextValidators.Hash()) { - return fmt.Errorf("header has next vhash %X but next valset hash is %X", - fc.SignedHeader.NextValidatorsHash, - fc.NextValidators.Hash(), + fc.SignedHeader.NextVotersHash, + fc.NextVoters.Hash()) { + return fmt.Errorf("header has next vhash %X but next voterSet hash is %X", + fc.SignedHeader.NextVotersHash, + fc.NextVoters.Hash(), ) } // Validate the header. @@ -65,7 +65,7 @@ func (fc FullCommit) ValidateFull(chainID string) error { } // Validate the signatures on the commit. hdr, cmt := fc.SignedHeader.Header, fc.SignedHeader.Commit - return fc.Validators.VerifyCommit( + return fc.Voters.VerifyCommit( hdr.ChainID, cmt.BlockID, hdr.Height, cmt) } diff --git a/lite/dbprovider.go b/lite/dbprovider.go index 35f7270ae..ca27f6016 100644 --- a/lite/dbprovider.go +++ b/lite/dbprovider.go @@ -6,11 +6,12 @@ import ( "strconv" amino "github.com/tendermint/go-amino" + dbm "github.com/tendermint/tm-db" + cryptoamino "github.com/tendermint/tendermint/crypto/encoding/amino" log "github.com/tendermint/tendermint/libs/log" lerr "github.com/tendermint/tendermint/lite/errors" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) var _ PersistentProvider = (*DBProvider)(nil) @@ -59,16 +60,16 @@ func (dbp *DBProvider) SaveFullCommit(fc FullCommit) error { // Save the fc.validators. // We might be overwriting what we already have, but // it makes the logic easier for now. - vsKey := validatorSetKey(fc.ChainID(), fc.Height()) - vsBz, err := dbp.cdc.MarshalBinaryLengthPrefixed(fc.Validators) + vsKey := voterSetKey(fc.ChainID(), fc.Height()) + vsBz, err := dbp.cdc.MarshalBinaryLengthPrefixed(fc.Voters) if err != nil { return err } batch.Set(vsKey, vsBz) // Save the fc.NextValidators. - nvsKey := validatorSetKey(fc.ChainID(), fc.Height()+1) - nvsBz, err := dbp.cdc.MarshalBinaryLengthPrefixed(fc.NextValidators) + nvsKey := voterSetKey(fc.ChainID(), fc.Height()+1) + nvsBz, err := dbp.cdc.MarshalBinaryLengthPrefixed(fc.NextVoters) if err != nil { return err } @@ -148,12 +149,12 @@ func (dbp *DBProvider) LatestFullCommit(chainID string, minHeight, maxHeight int return FullCommit{}, lerr.ErrCommitNotFound() } -func (dbp *DBProvider) ValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { - return dbp.getValidatorSet(chainID, height) +func (dbp *DBProvider) VoterSet(chainID string, height int64) (valset *types.VoterSet, err error) { + return dbp.getVoterSet(chainID, height) } -func (dbp *DBProvider) getValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { - vsBz, err := dbp.db.Get(validatorSetKey(chainID, height)) +func (dbp *DBProvider) getVoterSet(chainID string, height int64) (voterSet *types.VoterSet, err error) { + vsBz, err := dbp.db.Get(voterSetKey(chainID, height)) if err != nil { return nil, err } @@ -161,38 +162,37 @@ func (dbp *DBProvider) getValidatorSet(chainID string, height int64) (valset *ty err = lerr.ErrUnknownValidators(chainID, height) return } - err = dbp.cdc.UnmarshalBinaryLengthPrefixed(vsBz, &valset) + err = dbp.cdc.UnmarshalBinaryLengthPrefixed(vsBz, &voterSet) if err != nil { return } - // To test deep equality. This makes it easier to test for e.g. valset + // To test deep equality. This makes it easier to test for e.g. voterSet // equivalence using assert.Equal (tests for deep equality) in our tests, // which also tests for unexported/private field equivalence. - valset.TotalVotingPower() - + voterSet.TotalVotingPower() return } func (dbp *DBProvider) fillFullCommit(sh types.SignedHeader) (FullCommit, error) { var chainID = sh.ChainID var height = sh.Height - var valset, nextValset *types.ValidatorSet + var valset, nextValset *types.VoterSet // Load the validator set. - valset, err := dbp.getValidatorSet(chainID, height) + valset, err := dbp.getVoterSet(chainID, height) if err != nil { return FullCommit{}, err } // Load the next validator set. - nextValset, err = dbp.getValidatorSet(chainID, height+1) + nextValset, err = dbp.getVoterSet(chainID, height+1) if err != nil { return FullCommit{}, err } // Return filled FullCommit. return FullCommit{ - SignedHeader: sh, - Validators: valset, - NextValidators: nextValset, + SignedHeader: sh, + Voters: valset, + NextVoters: nextValset, }, nil } @@ -243,7 +243,7 @@ func signedHeaderKey(chainID string, height int64) []byte { return []byte(fmt.Sprintf("%s/%010d/sh", chainID, height)) } -func validatorSetKey(chainID string, height int64) []byte { +func voterSetKey(chainID string, height int64) []byte { return []byte(fmt.Sprintf("%s/%010d/vs", chainID, height)) } diff --git a/lite/dynamic_verifier.go b/lite/dynamic_verifier.go index d4efdcbeb..7c27d7635 100644 --- a/lite/dynamic_verifier.go +++ b/lite/dynamic_verifier.go @@ -107,7 +107,7 @@ func (dv *DynamicVerifier) Verify(shdr types.SignedHeader) error { } // Get the latest known full commit <= h-1 from our trusted providers. - // The full commit at h-1 contains the valset to sign for h. + // The full commit at h-1 contains the voterSet to sign for h. prevHeight := shdr.Height - 1 trustedFC, err := dv.trusted.LatestFullCommit(dv.chainID, 1, prevHeight) if err != nil { @@ -115,38 +115,38 @@ func (dv *DynamicVerifier) Verify(shdr types.SignedHeader) error { } // sync up to the prevHeight and assert our latest NextValidatorSet - // is the ValidatorSet for the SignedHeader + // is the VoterSet for the SignedHeader if trustedFC.Height() == prevHeight { - // Return error if valset doesn't match. + // Return error if voterSet doesn't match. if !bytes.Equal( - trustedFC.NextValidators.Hash(), - shdr.Header.ValidatorsHash) { + trustedFC.NextVoters.Hash(), + shdr.Header.VotersHash) { return lerr.ErrUnexpectedValidators( - trustedFC.NextValidators.Hash(), - shdr.Header.ValidatorsHash) + trustedFC.NextVoters.Hash(), + shdr.Header.VotersHash) } } else { - // If valset doesn't match, try to update + // If voterSet doesn't match, try to update if !bytes.Equal( - trustedFC.NextValidators.Hash(), - shdr.Header.ValidatorsHash) { + trustedFC.NextVoters.Hash(), + shdr.Header.VotersHash) { // ... update. trustedFC, err = dv.updateToHeight(prevHeight) if err != nil { return err } - // Return error if valset _still_ doesn't match. - if !bytes.Equal(trustedFC.NextValidators.Hash(), - shdr.Header.ValidatorsHash) { + // Return error if voterSet _still_ doesn't match. + if !bytes.Equal(trustedFC.NextVoters.Hash(), + shdr.Header.VotersHash) { return lerr.ErrUnexpectedValidators( - trustedFC.NextValidators.Hash(), - shdr.Header.ValidatorsHash) + trustedFC.NextVoters.Hash(), + shdr.Header.VotersHash) } } } - // Verify the signed header using the matching valset. - cert := NewBaseVerifier(dv.chainID, trustedFC.Height()+1, trustedFC.NextValidators) + // Verify the signed header using the matching voterSet. + cert := NewBaseVerifier(dv.chainID, trustedFC.Height()+1, trustedFC.NextVoters) err = cert.Verify(shdr) if err != nil { return err @@ -160,7 +160,7 @@ func (dv *DynamicVerifier) Verify(shdr types.SignedHeader) error { // See https://github.com/tendermint/tendermint/issues/3174. // Get the next validator set. - nextValset, err := dv.source.ValidatorSet(dv.chainID, shdr.Height+1) + nextValset, err := dv.source.VoterSet(dv.chainID, shdr.Height+1) if lerr.IsErrUnknownValidators(err) { // Ignore this error. return nil @@ -170,9 +170,9 @@ func (dv *DynamicVerifier) Verify(shdr types.SignedHeader) error { // Create filled FullCommit. nfc := FullCommit{ - SignedHeader: shdr, - Validators: trustedFC.NextValidators, - NextValidators: nextValset, + SignedHeader: shdr, + Voters: trustedFC.NextVoters, + NextVoters: nextValset, } // Validate the full commit. This checks the cryptographic // signatures of Commit against Validators. @@ -191,8 +191,8 @@ func (dv *DynamicVerifier) verifyAndSave(trustedFC, sourceFC FullCommit) error { if trustedFC.Height() >= sourceFC.Height() { panic("should not happen") } - err := trustedFC.NextValidators.VerifyFutureCommit( - sourceFC.Validators, + err := trustedFC.NextVoters.VerifyFutureCommit( + sourceFC.Voters, dv.chainID, sourceFC.SignedHeader.Commit.BlockID, sourceFC.SignedHeader.Height, sourceFC.SignedHeader.Commit, ) diff --git a/lite/dynamic_verifier_test.go b/lite/dynamic_verifier_test.go index 441010efb..abb7d3cc5 100644 --- a/lite/dynamic_verifier_test.go +++ b/lite/dynamic_verifier_test.go @@ -8,9 +8,10 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" + log "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) const testChainID = "inquiry-test" @@ -32,8 +33,8 @@ func TestInquirerValidPath(t *testing.T) { count := 50 fcz := make([]FullCommit, count) for i := 0; i < count; i++ { - vals := keys.ToValidators(vote, 0) - nextVals := nkeys.ToValidators(vote, 0) + vals := types.ToVoterAll(keys.ToValidators(vote, 0).Validators) + nextVals := types.ToVoterAll(nkeys.ToValidators(vote, 0).Validators) h := int64(1 + i) appHash := []byte(fmt.Sprintf("h=%d", h)) fcz[i] = keys.GenFullCommit( @@ -77,7 +78,7 @@ func TestDynamicVerify(t *testing.T) { trust := NewDBProvider("trust", dbm.NewMemDB()) source := NewDBProvider("source", dbm.NewMemDB()) - // 10 commits with one valset, 1 to change, + // 10 commits with one voterSet, 1 to change, // 10 commits with the next one n1, n2 := 10, 10 nCommits := n1 + n2 + 1 @@ -88,9 +89,9 @@ func TestDynamicVerify(t *testing.T) { chainID := "dynamic-verifier" power := int64(10) keys1 := genPrivKeys(5) - vals1 := keys1.ToValidators(power, 0) + vals1 := types.ToVoterAll(keys1.ToValidators(power, 0).Validators) keys2 := genPrivKeys(5) - vals2 := keys2.ToValidators(power, 0) + vals2 := types.ToVoterAll(keys2.ToValidators(power, 0).Validators) // make some commits with the first for i := 0; i < n1; i++ { @@ -126,7 +127,7 @@ func TestDynamicVerify(t *testing.T) { } -func makeFullCommit(height int64, keys privKeys, vals, nextVals *types.ValidatorSet, chainID string) FullCommit { +func makeFullCommit(height int64, keys privKeys, vals, nextVals *types.VoterSet, chainID string) FullCommit { height++ consHash := []byte("special-params") appHash := []byte(fmt.Sprintf("h=%d", height)) @@ -153,8 +154,8 @@ func TestInquirerVerifyHistorical(t *testing.T) { consHash := []byte("special-params") fcz := make([]FullCommit, count) for i := 0; i < count; i++ { - vals := keys.ToValidators(vote, 0) - nextVals := nkeys.ToValidators(vote, 0) + vals := types.ToVoterAll(keys.ToValidators(vote, 0).Validators) + nextVals := types.ToVoterAll(nkeys.ToValidators(vote, 0).Validators) h := int64(1 + i) appHash := []byte(fmt.Sprintf("h=%d", h)) resHash := []byte(fmt.Sprintf("res=%d", h)) @@ -236,8 +237,8 @@ func TestConcurrencyInquirerVerify(t *testing.T) { consHash := []byte("special-params") fcz := make([]FullCommit, count) for i := 0; i < count; i++ { - vals := keys.ToValidators(vote, 0) - nextVals := nkeys.ToValidators(vote, 0) + vals := types.ToVoterAll(keys.ToValidators(vote, 0).Validators) + nextVals := types.ToVoterAll(nkeys.ToValidators(vote, 0).Validators) h := int64(1 + i) appHash := []byte(fmt.Sprintf("h=%d", h)) resHash := []byte(fmt.Sprintf("res=%d", h)) diff --git a/lite/helpers.go b/lite/helpers.go index 29dd50b5b..5665d7250 100644 --- a/lite/helpers.go +++ b/lite/helpers.go @@ -56,7 +56,7 @@ func (pkz privKeys) ExtendSecp(n int) privKeys { return append(pkz, extra...) } -// ToValidators produces a valset from the set of keys. +// ToValidators produces a voterSet from the set of keys. // The first key has weight `init` and it increases by `inc` every step // so we can have all the same weight, or a simple linear distribution // (should be enough for testing). @@ -117,7 +117,7 @@ func makeVote(header *types.Header, valset *types.ValidatorSet, key crypto.PrivK } func genHeader(chainID string, height int64, txs types.Txs, - valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte) *types.Header { + valset, nextValset *types.VoterSet, appHash, consHash, resHash []byte) *types.Header { return &types.Header{ ChainID: chainID, @@ -125,18 +125,18 @@ func genHeader(chainID string, height int64, txs types.Txs, Time: tmtime.Now(), // LastBlockID // LastCommitHash - ValidatorsHash: valset.Hash(), - NextValidatorsHash: nextValset.Hash(), - DataHash: txs.Hash(), - AppHash: appHash, - ConsensusHash: consHash, - LastResultsHash: resHash, + VotersHash: valset.Hash(), + NextVotersHash: nextValset.Hash(), + DataHash: txs.Hash(), + AppHash: appHash, + ConsensusHash: consHash, + LastResultsHash: resHash, } } // GenSignedHeader calls genHeader and signHeader and combines them into a SignedHeader. func (pkz privKeys) GenSignedHeader(chainID string, height int64, txs types.Txs, - valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) types.SignedHeader { + valset, nextValset *types.VoterSet, appHash, consHash, resHash []byte, first, last int) types.SignedHeader { header := genHeader(chainID, height, txs, valset, nextValset, appHash, consHash, resHash) check := types.SignedHeader{ @@ -148,7 +148,7 @@ func (pkz privKeys) GenSignedHeader(chainID string, height int64, txs types.Txs, // GenFullCommit calls genHeader and signHeader and combines them into a FullCommit. func (pkz privKeys) GenFullCommit(chainID string, height int64, txs types.Txs, - valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) FullCommit { + valset, nextValset *types.VoterSet, appHash, consHash, resHash []byte, first, last int) FullCommit { header := genHeader(chainID, height, txs, valset, nextValset, appHash, consHash, resHash) commit := types.SignedHeader{ diff --git a/lite/multiprovider.go b/lite/multiprovider.go index 364647a40..704dc5e60 100644 --- a/lite/multiprovider.go +++ b/lite/multiprovider.go @@ -71,11 +71,11 @@ func (mc *multiProvider) LatestFullCommit(chainID string, minHeight, maxHeight i return } -// ValidatorSet returns validator set at height as provided by the first +// VoterSet returns validator set at height as provided by the first // provider which has it, or an error otherwise. -func (mc *multiProvider) ValidatorSet(chainID string, height int64) (valset *types.ValidatorSet, err error) { +func (mc *multiProvider) VoterSet(chainID string, height int64) (valset *types.VoterSet, err error) { for _, p := range mc.providers { - valset, err = p.ValidatorSet(chainID, height) + valset, err = p.VoterSet(chainID, height) if err == nil { // TODO Log unexpected types of errors. return valset, nil diff --git a/lite/provider.go b/lite/provider.go index ebab16264..571fe9d93 100644 --- a/lite/provider.go +++ b/lite/provider.go @@ -14,9 +14,9 @@ type Provider interface { // If maxHeight is zero, returns the latest where minHeight <= height. LatestFullCommit(chainID string, minHeight, maxHeight int64) (FullCommit, error) - // Get the valset that corresponds to chainID and height and return. + // Get the voterSet that corresponds to chainID and height and return. // Height must be >= 1. - ValidatorSet(chainID string, height int64) (*types.ValidatorSet, error) + VoterSet(chainID string, height int64) (*types.VoterSet, error) // Set a logger. SetLogger(logger log.Logger) diff --git a/lite/provider_test.go b/lite/provider_test.go index 98fff8cb4..31d5fe199 100644 --- a/lite/provider_test.go +++ b/lite/provider_test.go @@ -7,10 +7,11 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" + log "github.com/tendermint/tendermint/libs/log" lerr "github.com/tendermint/tendermint/lite/errors" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) // missingProvider doesn't store anything, always a miss. @@ -26,8 +27,8 @@ func (missingProvider) SaveFullCommit(FullCommit) error { return nil } func (missingProvider) LatestFullCommit(chainID string, minHeight, maxHeight int64) (FullCommit, error) { return FullCommit{}, lerr.ErrCommitNotFound() } -func (missingProvider) ValidatorSet(chainID string, height int64) (*types.ValidatorSet, error) { - return nil, errors.New("missing validator set") +func (missingProvider) VoterSet(chainID string, height int64) (*types.VoterSet, error) { + return nil, errors.New("missing voter set") } func (missingProvider) SetLogger(_ log.Logger) {} @@ -54,7 +55,7 @@ func checkProvider(t *testing.T, p PersistentProvider, chainID, app string) { // Make a bunch of full commits. fcz := make([]FullCommit, count) for i := 0; i < count; i++ { - vals := keys.ToValidators(10, int64(count/2)) + vals := types.ToVoterAll(keys.ToValidators(10, int64(count/2)).Validators) h := int64(20 + 10*i) fcz[i] = keys.GenFullCommit(chainID, h, nil, vals, vals, appHash, []byte("params"), []byte("results"), 0, 5) } @@ -72,8 +73,8 @@ func checkProvider(t *testing.T, p PersistentProvider, chainID, app string) { fc2, err := p.LatestFullCommit(chainID, fc.Height(), fc.Height()) assert.Nil(err) assert.Equal(fc.SignedHeader, fc2.SignedHeader) - assert.Equal(fc.Validators, fc2.Validators) - assert.Equal(fc.NextValidators, fc2.NextValidators) + assert.Equal(fc.Voters, fc2.Voters) + assert.Equal(fc.NextVoters, fc2.NextVoters) } // Make sure we get the last hash if we overstep. @@ -118,7 +119,7 @@ func TestMultiLatestFullCommit(t *testing.T) { // Set a bunch of full commits. for i := 0; i < count; i++ { - vals := keys.ToValidators(10, int64(count/2)) + vals := types.ToVoterAll(keys.ToValidators(10, int64(count/2)).Validators) h := int64(10 * (i + 1)) fc := keys.GenFullCommit(chainID, h, nil, vals, vals, appHash, []byte("params"), []byte("results"), 0, 5) err := p2.SaveFullCommit(fc) diff --git a/lite/proxy/proxy.go b/lite/proxy/proxy.go index 5fb51f0b3..53a28a3f1 100644 --- a/lite/proxy/proxy.go +++ b/lite/proxy/proxy.go @@ -132,9 +132,9 @@ func makeTxFunc(c rpcclient.Client) func(ctx *rpctypes.Context, hash []byte, pro func makeValidatorsFunc(c rpcclient.Client) func( ctx *rpctypes.Context, height *int64, -) (*ctypes.ResultValidators, error) { - return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultValidators, error) { - return c.Validators(height, 0, 0) +) (*ctypes.ResultVoters, error) { + return func(ctx *rpctypes.Context, height *int64) (*ctypes.ResultVoters, error) { + return c.Voters(height, 0, 0) } } diff --git a/lite/proxy/query_test.go b/lite/proxy/query_test.go index 34d9d1d4c..21afd48cb 100644 --- a/lite/proxy/query_test.go +++ b/lite/proxy/query_test.go @@ -15,6 +15,7 @@ import ( certclient "github.com/tendermint/tendermint/lite/client" nm "github.com/tendermint/tendermint/node" "github.com/tendermint/tendermint/rpc/client" + rpclocal "github.com/tendermint/tendermint/rpc/client/local" rpctest "github.com/tendermint/tendermint/rpc/test" "github.com/tendermint/tendermint/types" ) @@ -47,14 +48,14 @@ func _TestAppProofs(t *testing.T) { assert, require := assert.New(t), require.New(t) prt := defaultProofRuntime() - cl := client.NewLocal(node) + cl := rpclocal.New(node) client.WaitForHeight(cl, 1, nil) // This sets up our trust on the node based on some past point. source := certclient.NewProvider(chainID, cl) seed, err := source.LatestFullCommit(chainID, 1, 1) require.NoError(err, "%#v", err) - cert := lite.NewBaseVerifier(chainID, seed.Height(), seed.Validators) + cert := lite.NewBaseVerifier(chainID, seed.Height(), seed.Voters) // Wait for tx confirmation. done := make(chan int64) @@ -126,7 +127,7 @@ func _TestAppProofs(t *testing.T) { func TestTxProofs(t *testing.T) { assert, require := assert.New(t), require.New(t) - cl := client.NewLocal(node) + cl := rpclocal.New(node) client.WaitForHeight(cl, 1, nil) tx := kvstoreTx([]byte("key-a"), []byte("value-a")) @@ -139,7 +140,7 @@ func TestTxProofs(t *testing.T) { source := certclient.NewProvider(chainID, cl) seed, err := source.LatestFullCommit(chainID, brh-2, brh-2) require.NoError(err, "%#v", err) - cert := lite.NewBaseVerifier(chainID, seed.Height(), seed.Validators) + cert := lite.NewBaseVerifier(chainID, seed.Height(), seed.Voters) // First let's make sure a bogus transaction hash returns a valid non-existence proof. key := types.Tx([]byte("bogus")).Hash() diff --git a/lite/proxy/validate_test.go b/lite/proxy/validate_test.go index cf9a0de6b..a47741848 100644 --- a/lite/proxy/validate_test.go +++ b/lite/proxy/validate_test.go @@ -18,9 +18,9 @@ var ( ) var hdrHeight11 = types.Header{ - Height: 11, - Time: testTime1, - ValidatorsHash: []byte("Tendermint"), + Height: 11, + Time: testTime1, + VotersHash: []byte("Tendermint"), } func TestValidateBlock(t *testing.T) { @@ -143,8 +143,8 @@ func TestValidateBlockMeta(t *testing.T) { { meta: &types.BlockMeta{ Header: types.Header{ - Height: 11, - ValidatorsHash: []byte("lite-test"), + Height: 11, + VotersHash: []byte("lite-test"), // TODO: should be able to use empty time after Amino upgrade Time: testTime1, }, @@ -159,15 +159,15 @@ func TestValidateBlockMeta(t *testing.T) { meta: &types.BlockMeta{ Header: types.Header{ Height: 11, DataHash: deadBeefHash, - ValidatorsHash: []byte("Tendermint"), - Time: testTime1, + VotersHash: []byte("Tendermint"), + Time: testTime1, }, }, signedHeader: types.SignedHeader{ Header: &types.Header{ Height: 11, DataHash: deadBeefHash, - ValidatorsHash: []byte("Tendermint"), - Time: testTime2, + VotersHash: []byte("Tendermint"), + Time: testTime2, }, Commit: types.NewCommit(11, 0, types.BlockID{Hash: []byte("DEADBEEF")}, nil), }, @@ -178,15 +178,15 @@ func TestValidateBlockMeta(t *testing.T) { meta: &types.BlockMeta{ Header: types.Header{ Height: 11, DataHash: deadBeefHash, - ValidatorsHash: []byte("Tendermint"), - Time: testTime2, + VotersHash: []byte("Tendermint"), + Time: testTime2, }, }, signedHeader: types.SignedHeader{ Header: &types.Header{ Height: 11, DataHash: deadBeefHash, - ValidatorsHash: []byte("Tendermint-x"), - Time: testTime2, + VotersHash: []byte("Tendermint-x"), + Time: testTime2, }, Commit: types.NewCommit(11, 0, types.BlockID{Hash: []byte("DEADBEEF")}, nil), }, diff --git a/lite/proxy/verifier.go b/lite/proxy/verifier.go index b5fc3af3f..5486a3ea9 100644 --- a/lite/proxy/verifier.go +++ b/lite/proxy/verifier.go @@ -3,10 +3,11 @@ package proxy import ( "github.com/pkg/errors" + dbm "github.com/tendermint/tm-db" + log "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/lite" lclient "github.com/tendermint/tendermint/lite/client" - dbm "github.com/tendermint/tm-db" ) func NewVerifier( diff --git a/lite2/client.go b/lite2/client.go index b5ffca78c..29c380eb6 100644 --- a/lite2/client.go +++ b/lite2/client.go @@ -24,6 +24,17 @@ const ( defaultPruningSize = 1000 defaultMaxRetryAttempts = 10 + // For bisection, when using the cache of headers from the previous batch, + // they will always be at a height greater than 1/2 (normal bisection) so to + // find something in between the range, 9/16 is used. + bisectionNumerator = 9 + bisectionDenominator = 16 + + // 10s should cover most of the clients. + // References: + // - http://vancouver-webpages.com/time/web.html + // - https://blog.codinghorror.com/keeping-time-on-the-pc/ + defaultMaxClockDrift = 10 * time.Second ) // Option sets a parameter for the light client. @@ -39,11 +50,11 @@ func SequentialVerification() Option { } // SkippingVerification option configures the light client to skip headers as -// long as {trustLevel} of the old validator set signed the new header. The +// long as {trustLevel} of the old voter set signed the new header. The // bisection algorithm from the specification is used for finding the minimal // "trust path". // -// trustLevel - fraction of the old validator set (in terms of voting power), +// trustLevel - fraction of the old voter set (in terms of voting power), // which must sign the new header in order for us to trust it. NOTE this only // applies to non-adjacent headers. For adjacent headers, sequential // verification is used. @@ -54,9 +65,9 @@ func SkippingVerification(trustLevel tmmath.Fraction) Option { } } -// PruningSize option sets the maximum amount of headers & validator set pairs +// PruningSize option sets the maximum amount of headers & voter set pairs // that the light client stores. When Prune() is run, all headers (along with -// the associated validator sets) that are earlier than the h amount of headers +// the associated voter sets) that are earlier than the h amount of headers // will be removed from the store. Default: 1000. A pruning size of 0 will not // prune the lite client at all. func PruningSize(h uint16) Option { @@ -89,6 +100,14 @@ func MaxRetryAttempts(max uint16) Option { } } +// MaxClockDrift defines how much new (untrusted) header's Time can drift into +// the future. Default: 10s. +func MaxClockDrift(d time.Duration) Option { + return func(c *Client) { + c.maxClockDrift = d + } +} + // Client represents a light client, connected to a single chain, which gets // headers from a primary provider, verifies them either sequentially or by // skipping some and stores them in a trusted store (usually, a local FS). @@ -100,6 +119,7 @@ type Client struct { verificationMode mode trustLevel tmmath.Fraction maxRetryAttempts uint16 // see MaxRetryAttempts option + maxClockDrift time.Duration // Mutex for locking during changes of the lite clients providers providerMutex sync.Mutex @@ -112,8 +132,8 @@ type Client struct { trustedStore store.Store // Highest trusted header from the store (height=H). latestTrustedHeader *types.SignedHeader - // Highest validator set from the store (height=H). - latestTrustedVals *types.ValidatorSet + // Highest voter set from the store (height=H). + latestTrustedVoters *types.VoterSet // See RemoveNoLongerTrustedHeadersPeriod option pruningSize uint16 @@ -145,7 +165,7 @@ func NewClient( options ...Option) (*Client, error) { if err := trustOptions.ValidateBasic(); err != nil { - return nil, errors.Wrap(err, "invalid TrustOptions") + return nil, fmt.Errorf("invalid TrustOptions: %w", err) } c, err := NewClientFromTrustedStore(chainID, trustOptions.Period, primary, witnesses, trustedStore, options...) @@ -187,6 +207,7 @@ func NewClientFromTrustedStore( verificationMode: skipping, trustLevel: DefaultTrustLevel, maxRetryAttempts: defaultMaxRetryAttempts, + maxClockDrift: defaultMaxClockDrift, primary: primary, witnesses: witnesses, trustedStore: trustedStore, @@ -202,13 +223,13 @@ func NewClientFromTrustedStore( // Validate the number of witnesses. if len(c.witnesses) < 1 { - return nil, errors.New("expected at least one witness") + return nil, errNoWitnesses{} } // Verify witnesses are all on the same chain. for i, w := range witnesses { if w.ChainID() != chainID { - return nil, errors.Errorf("witness #%d: %v is on another chain %s, expected %s", + return nil, fmt.Errorf("witness #%d: %v is on another chain %s, expected %s", i, w, w.ChainID(), chainID) } } @@ -230,24 +251,24 @@ func NewClientFromTrustedStore( func (c *Client) restoreTrustedHeaderAndVals() error { lastHeight, err := c.trustedStore.LastSignedHeaderHeight() if err != nil { - return errors.Wrap(err, "can't get last trusted header height") + return fmt.Errorf("can't get last trusted header height: %w", err) } if lastHeight > 0 { trustedHeader, err := c.trustedStore.SignedHeader(lastHeight) if err != nil { - return errors.Wrap(err, "can't get last trusted header") + return fmt.Errorf("can't get last trusted header: %w", err) } - trustedVals, err := c.trustedStore.ValidatorSet(lastHeight) + trustedVals, err := c.trustedStore.VoterSet(lastHeight) if err != nil { - return errors.Wrap(err, "can't get last trusted validators") + return fmt.Errorf("can't get last trusted voters: %w", err) } c.latestTrustedHeader = trustedHeader - c.latestTrustedVals = trustedVals + c.latestTrustedVoters = trustedVals - c.logger.Info("Restored trusted header and vals", "height", lastHeight) + c.logger.Info("Restored trusted header and voters", "height", lastHeight) } return nil @@ -296,7 +317,7 @@ func (c *Client) checkTrustedHeaderUsingOptions(options TrustOptions) error { // remove all the headers (options.Height, trustedHeader.Height] err := c.cleanupAfter(options.Height) if err != nil { - return errors.Wrapf(err, "cleanupAfter(%d)", options.Height) + return fmt.Errorf("cleanupAfter(%d): %w", options.Height, err) } c.logger.Info("Rolled back to older header (newer headers were removed)", @@ -318,7 +339,7 @@ func (c *Client) checkTrustedHeaderUsingOptions(options TrustOptions) error { if c.confirmationFn(action) { err := c.Cleanup() if err != nil { - return errors.Wrap(err, "failed to cleanup") + return fmt.Errorf("failed to cleanup: %w", err) } } else { return errors.New("refused to remove the stored headers despite hashes mismatch") @@ -346,7 +367,7 @@ func (c *Client) initializeWithTrustOptions(options TrustOptions) error { } if !bytes.Equal(h.Hash(), options.Hash) { - return errors.Errorf("expected header's hash %X, but got %X", options.Hash, h.Hash()) + return fmt.Errorf("expected header's hash %X, but got %X", options.Hash, h.Hash()) } err = c.compareNewHeaderWithWitnesses(h) @@ -354,32 +375,32 @@ func (c *Client) initializeWithTrustOptions(options TrustOptions) error { return err } - // 2) Fetch and verify the vals. - vals, err := c.validatorSetFromPrimary(options.Height) + // 2) Fetch and verify the voters. + voters, err := c.voterSetFromPrimary(options.Height) if err != nil { return err } - if !bytes.Equal(h.ValidatorsHash, vals.Hash()) { - return errors.Errorf("expected header's validators (%X) to match those that were supplied (%X)", - h.ValidatorsHash, - vals.Hash(), + if !bytes.Equal(h.VotersHash, voters.Hash()) { + return fmt.Errorf("expected header's voters (%X) to match those that were supplied (%X)", + h.VotersHash, + voters.Hash(), ) } - // Ensure that +2/3 of validators signed correctly. - err = vals.VerifyCommit(c.chainID, h.Commit.BlockID, h.Height, h.Commit) + // Ensure that +2/3 of voters signed correctly. + err = voters.VerifyCommit(c.chainID, h.Commit.BlockID, h.Height, h.Commit) if err != nil { - return errors.Wrap(err, "invalid commit") + return fmt.Errorf("invalid commit: %w", err) } // 3) Persist both of them and continue. - return c.updateTrustedHeaderAndVals(h, vals) + return c.updateTrustedHeaderAndVals(h, voters) } // TrustedHeader returns a trusted header at the given height (0 - the latest). // -// Headers along with validator sets, which can't be trusted anymore, are +// Headers along with voter sets, which can't be trusted anymore, are // removed once a day (can be changed with RemoveNoLongerTrustedHeadersPeriod // option). // . @@ -400,13 +421,13 @@ func (c *Client) TrustedHeader(height int64) (*types.SignedHeader, error) { return c.trustedStore.SignedHeader(height) } -// TrustedValidatorSet returns a trusted validator set at the given height (0 - +// TrustedVoterSet returns a trusted voter set at the given height (0 - // latest). The second return parameter is the height used (useful if 0 was // passed; otherwise can be ignored). // // height must be >= 0. // -// Headers along with validator sets are +// Headers along with voter sets are // removed once a day (can be changed with RemoveNoLongerTrustedHeadersPeriod // option). // @@ -414,15 +435,15 @@ func (c *Client) TrustedHeader(height int64) (*types.SignedHeader, error) { // - there are some issues with the trusted store, although that should not // happen normally; // - negative height is passed; -// - header signed by that validator set has not been verified yet +// - header signed by that voter set has not been verified yet // // Safe for concurrent use by multiple goroutines. -func (c *Client) TrustedValidatorSet(height int64) (valSet *types.ValidatorSet, heightUsed int64, err error) { +func (c *Client) TrustedVoterSet(height int64) (valSet *types.VoterSet, heightUsed int64, err error) { heightUsed, err = c.compareWithLatestHeight(height) if err != nil { return nil, heightUsed, err } - valSet, err = c.trustedStore.ValidatorSet(heightUsed) + valSet, err = c.trustedStore.VoterSet(heightUsed) if err != nil { return nil, heightUsed, err } @@ -432,7 +453,7 @@ func (c *Client) TrustedValidatorSet(height int64) (valSet *types.ValidatorSet, func (c *Client) compareWithLatestHeight(height int64) (int64, error) { latestHeight, err := c.LastTrustedHeight() if err != nil { - return 0, errors.Wrap(err, "can't get last trusted height") + return 0, fmt.Errorf("can't get last trusted height: %w", err) } if latestHeight == -1 { return 0, errors.New("no headers exist") @@ -440,7 +461,7 @@ func (c *Client) compareWithLatestHeight(height int64) (int64, error) { switch { case height > latestHeight: - return 0, errors.Errorf("unverified header/valset requested (latest: %d)", latestHeight) + return 0, fmt.Errorf("unverified header/valset requested (latest: %d)", latestHeight) case height == 0: return latestHeight, nil case height < 0: @@ -450,30 +471,7 @@ func (c *Client) compareWithLatestHeight(height int64) (int64, error) { return height, nil } -// LastTrustedHeight returns a last trusted height. -1 and nil are returned if -// there are no trusted headers. -// -// Safe for concurrent use by multiple goroutines. -func (c *Client) LastTrustedHeight() (int64, error) { - return c.trustedStore.LastSignedHeaderHeight() -} - -// FirstTrustedHeight returns a first trusted height. -1 and nil are returned if -// there are no trusted headers. -// -// Safe for concurrent use by multiple goroutines. -func (c *Client) FirstTrustedHeight() (int64, error) { - return c.trustedStore.FirstSignedHeaderHeight() -} - -// ChainID returns the chain ID the light client was configured with. -// -// Safe for concurrent use by multiple goroutines. -func (c *Client) ChainID() string { - return c.chainID -} - -// VerifyHeaderAtHeight fetches header and validators at the given height +// VerifyHeaderAtHeight fetches header and voters at the given height // and calls VerifyHeader. It returns header immediately if such exists in // trustedStore (no verification is needed). // @@ -505,27 +503,33 @@ func (c *Client) VerifyHeaderAtHeight(height int64, now time.Time) (*types.Signe // VerifyHeader verifies new header against the trusted state. It returns // immediately if newHeader exists in trustedStore (no verification is -// needed). +// needed). Else it performs one of the two types of verification: // -// SequentialVerification: verifies that 2/3 of the trusted validator set has +// SequentialVerification: verifies that 2/3 of the trusted voter set has // signed the new header. If the headers are not adjacent, **all** intermediate -// headers will be requested. +// headers will be requested. Intermediate headers are not saved to database. // // SkippingVerification(trustLevel): verifies that {trustLevel} of the trusted -// validator set has signed the new header. If it's not the case and the +// voter set has signed the new header. If it's not the case and the // headers are not adjacent, bisection is performed and necessary (not all) // intermediate headers will be requested. See the specification for details. +// Intermediate headers are not saved to database. // https://github.com/tendermint/spec/blob/master/spec/consensus/light-client.md // +// If the header, which is older than the currently trusted header, is +// requested and the light client does not have it, VerifyHeader will perform: +// a) bisection verification if nearest trusted header is found & not expired +// b) backwards verification in all other cases +// // It returns ErrOldHeaderExpired if the latest trusted header expired. // // If the primary provides an invalid header (ErrInvalidHeader), it is rejected // and replaced by another provider until all are exhausted. // -// If, at any moment, SignedHeader or ValidatorSet are not found by the primary +// If, at any moment, SignedHeader or VoterSet are not found by the primary // provider, provider.ErrSignedHeaderNotFound / // provider.ErrValidatorSetNotFound error is returned. -func (c *Client) VerifyHeader(newHeader *types.SignedHeader, newVals *types.ValidatorSet, now time.Time) error { +func (c *Client) VerifyHeader(newHeader *types.SignedHeader, newVals *types.VoterSet, now time.Time) error { if newHeader.Height <= 0 { return errors.New("negative or zero height") } @@ -535,7 +539,7 @@ func (c *Client) VerifyHeader(newHeader *types.SignedHeader, newVals *types.Vali if err == nil { // Make sure it's the same header. if !bytes.Equal(h.Hash(), newHeader.Hash()) { - return errors.Errorf("existing trusted header %X does not match newHeader %X", h.Hash(), newHeader.Hash()) + return fmt.Errorf("existing trusted header %X does not match newHeader %X", h.Hash(), newHeader.Hash()) } c.logger.Info("Header has already been verified", "height", newHeader.Height, "hash", hash2str(newHeader.Hash())) @@ -545,117 +549,87 @@ func (c *Client) VerifyHeader(newHeader *types.SignedHeader, newVals *types.Vali return c.verifyHeader(newHeader, newVals, now) } -func (c *Client) verifyHeader(newHeader *types.SignedHeader, newVals *types.ValidatorSet, now time.Time) error { +func (c *Client) verifyHeader(newHeader *types.SignedHeader, newVals *types.VoterSet, now time.Time) error { c.logger.Info("VerifyHeader", "height", newHeader.Height, "hash", hash2str(newHeader.Hash()), "vals", hash2str(newVals.Hash())) var err error - // 1) If going forward, perform either bisection or sequential verification + // 1) If going forward, perform either bisection or sequential verification. if newHeader.Height >= c.latestTrustedHeader.Height { switch c.verificationMode { case sequential: err = c.sequence(c.latestTrustedHeader, newHeader, newVals, now) case skipping: - err = c.bisection(c.latestTrustedHeader, c.latestTrustedVals, newHeader, newVals, now) + err = c.bisection(c.latestTrustedHeader, c.latestTrustedVoters, newHeader, newVals, now) default: panic(fmt.Sprintf("Unknown verification mode: %b", c.verificationMode)) } } else { - // 2) Otherwise, perform backwards verification - // Find the closest trusted header after newHeader.Height - var closestHeader *types.SignedHeader - closestHeader, err = c.trustedStore.SignedHeaderAfter(newHeader.Height) + // 2) If verifying before the first trusted header, perform backwards + // verification. + var ( + closestHeader *types.SignedHeader + firstHeaderHeight int64 + ) + firstHeaderHeight, err = c.FirstTrustedHeight() if err != nil { - return errors.Wrapf(err, "can't get signed header after height %d", newHeader.Height) + return fmt.Errorf("can't get first header height: %w", err) + } + if newHeader.Height < firstHeaderHeight { + closestHeader, err = c.TrustedHeader(firstHeaderHeight) + if err != nil { + return fmt.Errorf("can't get first signed header: %w", err) + } + if HeaderExpired(closestHeader, c.trustingPeriod, now) { + closestHeader = c.latestTrustedHeader + } + err = c.backwards(closestHeader, newHeader, now) + } else { + // 3) OR if between trusted headers where the nearest has not expired, + // perform bisection verification, else backwards. + closestHeader, err = c.trustedStore.SignedHeaderBefore(newHeader.Height) + if err != nil { + return fmt.Errorf("can't get signed header before height %d: %w", newHeader.Height, err) + } + var closestVotorSet *types.VoterSet + if c.verificationMode == sequential || HeaderExpired(closestHeader, c.trustingPeriod, now) { + err = c.backwards(c.latestTrustedHeader, newHeader, now) + } else { + closestVotorSet, _, err = c.TrustedVoterSet(closestHeader.Height) + if err != nil { + return fmt.Errorf("can't get voter set at height %d: %w", closestHeader.Height, err) + } + err = c.bisection(closestHeader, closestVotorSet, newHeader, newVals, now) + } } - - err = c.backwards(closestHeader, newHeader, now) } if err != nil { c.logger.Error("Can't verify", "err", err) return err } - + // 4) Compare header with other witnesses if err := c.compareNewHeaderWithWitnesses(newHeader); err != nil { c.logger.Error("Error when comparing new header with witnesses", "err", err) return err } + // 5) Once verified, save and return return c.updateTrustedHeaderAndVals(newHeader, newVals) } -// Primary returns the primary provider. -// -// NOTE: provider may be not safe for concurrent access. -func (c *Client) Primary() provider.Provider { - c.providerMutex.Lock() - defer c.providerMutex.Unlock() - return c.primary -} - -// Witnesses returns the witness providers. -// -// NOTE: providers may be not safe for concurrent access. -func (c *Client) Witnesses() []provider.Provider { - c.providerMutex.Lock() - defer c.providerMutex.Unlock() - return c.witnesses -} - -// Cleanup removes all the data (headers and validator sets) stored. Note: the -// client must be stopped at this point. -func (c *Client) Cleanup() error { - c.logger.Info("Removing all the data") - c.latestTrustedHeader = nil - c.latestTrustedVals = nil - return c.trustedStore.Prune(0) -} - -// cleanupAfter deletes all headers & validator sets after +height+. It also -// resets latestTrustedHeader to the latest header. -func (c *Client) cleanupAfter(height int64) error { - nextHeight := height - - for { - h, err := c.trustedStore.SignedHeaderAfter(nextHeight) - if err == store.ErrSignedHeaderNotFound { - break - } else if err != nil { - return errors.Wrapf(err, "failed to get header after %d", nextHeight) - } - - err = c.trustedStore.DeleteSignedHeaderAndValidatorSet(h.Height) - if err != nil { - c.logger.Error("can't remove a trusted header & validator set", "err", err, - "height", h.Height) - } - - nextHeight = h.Height - } - - c.latestTrustedHeader = nil - c.latestTrustedVals = nil - err := c.restoreTrustedHeaderAndVals() - if err != nil { - return err - } - - return nil -} - // see VerifyHeader func (c *Client) sequence( initiallyTrustedHeader *types.SignedHeader, newHeader *types.SignedHeader, - newVals *types.ValidatorSet, + newVals *types.VoterSet, now time.Time) error { var ( trustedHeader = initiallyTrustedHeader interimHeader *types.SignedHeader - interimVals *types.ValidatorSet + interimVals *types.VoterSet err error ) @@ -667,7 +641,7 @@ func (c *Client) sequence( } else { // intermediate headers interimHeader, interimVals, err = c.fetchHeaderAndValsAtHeight(height) if err != nil { - return errors.Wrapf(err, "failed to obtain the header #%d", height) + return err } } @@ -679,12 +653,12 @@ func (c *Client) sequence( "newHash", hash2str(interimHeader.Hash())) err = VerifyAdjacent(c.chainID, trustedHeader, interimHeader, interimVals, - c.trustingPeriod, now) + c.trustingPeriod, now, c.maxClockDrift) if err != nil { - err = errors.Wrapf(err, "verify adjacent from #%d to #%d failed", - trustedHeader.Height, interimHeader.Height) + err = fmt.Errorf("verify adjacent from #%d to #%d failed: %w", + trustedHeader.Height, interimHeader.Height, err) - switch errors.Cause(err).(type) { + switch errors.Unwrap(err).(type) { case ErrInvalidHeader: c.logger.Error("primary sent invalid header -> replacing", "err", err) replaceErr := c.replacePrimaryProvider() @@ -708,47 +682,64 @@ func (c *Client) sequence( } // see VerifyHeader +// Bisection finds the middle header between a trusted and new header, reiterating the action until it +// verifies a header. A cache of headers requested by the primary is kept such that when a +// verification is made, and the light client tries again to verify the new header in the middle, +// the light client does not need to ask for all the same headers again. func (c *Client) bisection( initiallyTrustedHeader *types.SignedHeader, - initiallyTrustedVals *types.ValidatorSet, + initiallyTrustedVals *types.VoterSet, newHeader *types.SignedHeader, - newVals *types.ValidatorSet, + newVals *types.VoterSet, now time.Time) error { + type headerSet struct { + sh *types.SignedHeader + valSet *types.VoterSet + } + var ( + headerCache = []headerSet{{newHeader, newVals}} + depth = 0 + trustedHeader = initiallyTrustedHeader trustedVals = initiallyTrustedVals - - interimHeader = newHeader - interimVals = newVals ) for { c.logger.Debug("Verify newHeader against trustedHeader", "trustedHeight", trustedHeader.Height, "trustedHash", hash2str(trustedHeader.Hash()), - "newHeight", interimHeader.Height, - "newHash", hash2str(interimHeader.Hash())) + "newHeight", headerCache[depth].sh.Height, + "newHash", hash2str(headerCache[depth].sh.Hash())) - err := Verify(c.chainID, trustedHeader, trustedVals, interimHeader, interimVals, c.trustingPeriod, now, - c.trustLevel) + err := Verify(c.chainID, trustedHeader, trustedVals, headerCache[depth].sh, headerCache[depth].valSet, + c.trustingPeriod, now, c.maxClockDrift, c.trustLevel) switch err.(type) { case nil: - if interimHeader.Height == newHeader.Height { + // Have we verified the last header + if depth == 0 { return nil } - - // Update the lower bound to the previous upper bound - trustedHeader, trustedVals = interimHeader, interimVals - // Update the upper bound to the untrustedHeader - interimHeader, interimVals = newHeader, newVals + // If not, update the lower bound to the previous upper bound + trustedHeader, trustedVals = headerCache[depth].sh, headerCache[depth].valSet + // Remove the untrusted header at the lower bound in the header cache - it's no longer useful + headerCache = headerCache[:depth] + // Reset the cache depth so that we start from the upper bound again + depth = 0 case ErrNewValSetCantBeTrusted: - pivotHeight := (interimHeader.Height + trustedHeader.Height) / 2 - interimHeader, interimVals, err = c.fetchHeaderAndValsAtHeight(pivotHeight) - if err != nil { - return err + // do add another header to the end of the cache + if depth == len(headerCache)-1 { + pivotHeight := (headerCache[depth].sh.Height + trustedHeader. + Height) * bisectionNumerator / bisectionDenominator + interimHeader, interimVals, err := c.fetchHeaderAndValsAtHeight(pivotHeight) + if err != nil { + return err + } + headerCache = append(headerCache, headerSet{interimHeader, interimVals}) } + depth++ case ErrInvalidHeader: c.logger.Error("primary sent invalid header -> replacing", "err", err) @@ -756,52 +747,134 @@ func (c *Client) bisection( if replaceErr != nil { c.logger.Error("Can't replace primary", "err", replaceErr) // return original error - return errors.Wrapf(err, "verify from #%d to #%d failed", - trustedHeader.Height, interimHeader.Height) + return fmt.Errorf("verify non adjacent from #%d to #%d failed: %w", + trustedHeader.Height, headerCache[depth].sh.Height, err) } // attempt to verify the header again continue default: - return errors.Wrapf(err, "verify from #%d to #%d failed", - trustedHeader.Height, interimHeader.Height) + return fmt.Errorf("verify non adjacent from #%d to #%d failed: %w", + trustedHeader.Height, headerCache[depth].sh.Height, err) + } + } +} + +// LastTrustedHeight returns a last trusted height. -1 and nil are returned if +// there are no trusted headers. +// +// Safe for concurrent use by multiple goroutines. +func (c *Client) LastTrustedHeight() (int64, error) { + return c.trustedStore.LastSignedHeaderHeight() +} + +// FirstTrustedHeight returns a first trusted height. -1 and nil are returned if +// there are no trusted headers. +// +// Safe for concurrent use by multiple goroutines. +func (c *Client) FirstTrustedHeight() (int64, error) { + return c.trustedStore.FirstSignedHeaderHeight() +} + +// ChainID returns the chain ID the light client was configured with. +// +// Safe for concurrent use by multiple goroutines. +func (c *Client) ChainID() string { + return c.chainID +} + +// Primary returns the primary provider. +// +// NOTE: provider may be not safe for concurrent access. +func (c *Client) Primary() provider.Provider { + c.providerMutex.Lock() + defer c.providerMutex.Unlock() + return c.primary +} + +// Witnesses returns the witness providers. +// +// NOTE: providers may be not safe for concurrent access. +func (c *Client) Witnesses() []provider.Provider { + c.providerMutex.Lock() + defer c.providerMutex.Unlock() + return c.witnesses +} + +// Cleanup removes all the data (headers and voter sets) stored. Note: the +// client must be stopped at this point. +func (c *Client) Cleanup() error { + c.logger.Info("Removing all the data") + c.latestTrustedHeader = nil + c.latestTrustedVoters = nil + return c.trustedStore.Prune(0) +} + +// cleanupAfter deletes all headers & voter sets after +height+. It also +// resets latestTrustedHeader to the latest header. +func (c *Client) cleanupAfter(height int64) error { + prevHeight := c.latestTrustedHeader.Height + + for { + h, err := c.trustedStore.SignedHeaderBefore(prevHeight) + if err == store.ErrSignedHeaderNotFound || (h != nil && h.Height <= height) { + break + } else if err != nil { + return fmt.Errorf("failed to get header before %d: %w", prevHeight, err) + } + + err = c.trustedStore.DeleteSignedHeaderAndValidatorSet(h.Height) + if err != nil { + c.logger.Error("can't remove a trusted header & voter set", "err", err, + "height", h.Height) } + + prevHeight = h.Height + } + + c.latestTrustedHeader = nil + c.latestTrustedVoters = nil + err := c.restoreTrustedHeaderAndVals() + if err != nil { + return err } + + return nil } -func (c *Client) updateTrustedHeaderAndVals(h *types.SignedHeader, vals *types.ValidatorSet) error { - if !bytes.Equal(h.ValidatorsHash, vals.Hash()) { - return errors.Errorf("expected validator's hash %X, but got %X", h.ValidatorsHash, vals.Hash()) +func (c *Client) updateTrustedHeaderAndVals(h *types.SignedHeader, voters *types.VoterSet) error { + if !bytes.Equal(h.VotersHash, voters.Hash()) { + return fmt.Errorf("expected voter's hash %X, but got %X", h.VotersHash, voters.Hash()) } - if err := c.trustedStore.SaveSignedHeaderAndValidatorSet(h, vals); err != nil { - return errors.Wrap(err, "failed to save trusted header") + if err := c.trustedStore.SaveSignedHeaderAndValidatorSet(h, voters); err != nil { + return fmt.Errorf("failed to save trusted header: %w", err) } if c.pruningSize > 0 { if err := c.trustedStore.Prune(c.pruningSize); err != nil { - return errors.Wrap(err, "prune") + return fmt.Errorf("prune: %w", err) } } if c.latestTrustedHeader == nil || h.Height > c.latestTrustedHeader.Height { c.latestTrustedHeader = h - c.latestTrustedVals = vals + c.latestTrustedVoters = voters } return nil } -// fetch header and validators for the given height (0 - latest) from primary +// fetch header and voters for the given height (0 - latest) from primary // provider. -func (c *Client) fetchHeaderAndValsAtHeight(height int64) (*types.SignedHeader, *types.ValidatorSet, error) { +func (c *Client) fetchHeaderAndValsAtHeight(height int64) (*types.SignedHeader, *types.VoterSet, error) { h, err := c.signedHeaderFromPrimary(height) if err != nil { - return nil, nil, errors.Wrapf(err, "failed to obtain the header #%d", height) + return nil, nil, fmt.Errorf("failed to obtain the header #%d: %w", height, err) } - vals, err := c.validatorSetFromPrimary(height) + vals, err := c.voterSetFromPrimary(height) if err != nil { - return nil, nil, errors.Wrapf(err, "failed to obtain the vals #%d", height) + return nil, nil, fmt.Errorf("failed to obtain the vals #%d: %w", height, err) } return h, vals, nil } @@ -815,6 +888,7 @@ func (c *Client) backwards( now time.Time) error { if HeaderExpired(initiallyTrustedHeader, c.trustingPeriod, now) { + c.logger.Error("Header Expired") return ErrOldHeaderExpired{initiallyTrustedHeader.Time.Add(c.trustingPeriod), now} } @@ -827,16 +901,20 @@ func (c *Client) backwards( for trustedHeader.Height > newHeader.Height { interimHeader, err = c.signedHeaderFromPrimary(trustedHeader.Height - 1) if err != nil { - return errors.Wrapf(err, "failed to obtain the header at height #%d", trustedHeader.Height-1) + return fmt.Errorf("failed to obtain the header at height #%d: %w", trustedHeader.Height-1, err) } - + c.logger.Debug("Verify newHeader against trustedHeader", + "trustedHeight", trustedHeader.Height, + "trustedHash", hash2str(trustedHeader.Hash()), + "newHeight", interimHeader.Height, + "newHash", hash2str(interimHeader.Hash())) if err := VerifyBackwards(c.chainID, interimHeader, trustedHeader); err != nil { c.logger.Error("primary sent invalid header -> replacing", "err", err) if replaceErr := c.replacePrimaryProvider(); replaceErr != nil { c.logger.Error("Can't replace primary", "err", replaceErr) // return original error - return errors.Wrapf(err, "verify backwards from %d to %d failed", - trustedHeader.Height, interimHeader.Height) + return fmt.Errorf("verify backwards from %d to %d failed: %w", + trustedHeader.Height, interimHeader.Height, err) } } @@ -861,7 +939,7 @@ func (c *Client) compareNewHeaderWithWitnesses(h *types.SignedHeader) error { witnessesToRemove := make([]int, 0) for attempt := uint16(1); attempt <= c.maxRetryAttempts; attempt++ { if len(c.witnesses) == 0 { - return errors.New("could not find any witnesses. please reset the light client") + return errNoWitnesses{} } for i, witness := range c.witnesses { @@ -878,7 +956,7 @@ func (c *Client) compareNewHeaderWithWitnesses(h *types.SignedHeader) error { } if !bytes.Equal(h.Hash(), altH.Hash()) { - if err = c.latestTrustedVals.VerifyCommitTrusting(c.chainID, altH.Commit.BlockID, + if err = c.latestTrustedVoters.VerifyCommitTrusting(c.chainID, altH.Commit.BlockID, altH.Height, altH.Commit, c.trustLevel); err != nil { c.logger.Error("Witness sent us incorrect header", "err", err, "witness", witness) witnessesToRemove = append(witnessesToRemove, i) @@ -887,7 +965,7 @@ func (c *Client) compareNewHeaderWithWitnesses(h *types.SignedHeader) error { // TODO: send the diverged headers to primary && all witnesses - return errors.Errorf( + return fmt.Errorf( "header hash %X does not match one %X from the witness %v", h.Hash(), altH.Hash(), witness) } @@ -930,7 +1008,7 @@ func (c *Client) removeWitness(idx int) { func (c *Client) Update(now time.Time) (*types.SignedHeader, error) { lastTrustedHeight, err := c.LastTrustedHeight() if err != nil { - return nil, errors.Wrap(err, "can't get last trusted height") + return nil, fmt.Errorf("can't get last trusted height: %w", err) } if lastTrustedHeight == -1 { @@ -940,7 +1018,7 @@ func (c *Client) Update(now time.Time) (*types.SignedHeader, error) { latestHeader, latestVals, err := c.fetchHeaderAndValsAtHeight(0) if err != nil { - return nil, errors.Wrapf(err, "can't get latest header and vals") + return nil, err } if latestHeader.Height > lastTrustedHeight { @@ -962,7 +1040,7 @@ func (c *Client) replacePrimaryProvider() error { defer c.providerMutex.Unlock() if len(c.witnesses) <= 1 { - return errors.Errorf("only one witness left. please reset the light client") + return errNoWitnesses{} } c.primary = c.witnesses[0] c.witnesses = c.witnesses[1:] @@ -982,7 +1060,7 @@ func (c *Client) signedHeaderFromPrimary(height int64) (*types.SignedHeader, err if err == nil { // sanity check if height > 0 && h.Height != height { - return nil, errors.Errorf("expected %d height, got %d", height, h.Height) + return nil, fmt.Errorf("expected %d height, got %d", height, h.Height) } return h, nil } @@ -1002,18 +1080,18 @@ func (c *Client) signedHeaderFromPrimary(height int64) (*types.SignedHeader, err return c.signedHeaderFromPrimary(height) } -// validatorSetFromPrimary retrieves the ValidatorSet from the primary provider +// voterSetFromPrimary retrieves the VoterSet from the primary provider // at the specified height. Handles dropout by the primary provider after 5 // attempts by replacing it with an alternative provider. -func (c *Client) validatorSetFromPrimary(height int64) (*types.ValidatorSet, error) { +func (c *Client) voterSetFromPrimary(height int64) (*types.VoterSet, error) { for attempt := uint16(1); attempt <= c.maxRetryAttempts; attempt++ { c.providerMutex.Lock() - vals, err := c.primary.ValidatorSet(height) + voters, err := c.primary.VoterSet(height) c.providerMutex.Unlock() if err == nil || err == provider.ErrValidatorSetNotFound { - return vals, err + return voters, err } - c.logger.Error("Failed to get validator set from primary", "attempt", attempt, "err", err) + c.logger.Error("Failed to get voter set from primary", "attempt", attempt, "err", err) time.Sleep(backoffTimeout(attempt)) } @@ -1023,7 +1101,7 @@ func (c *Client) validatorSetFromPrimary(height int64) (*types.ValidatorSet, err return nil, err } - return c.validatorSetFromPrimary(height) + return c.voterSetFromPrimary(height) } // exponential backoff (with jitter) diff --git a/lite2/client_benchmark_test.go b/lite2/client_benchmark_test.go new file mode 100644 index 000000000..5877dbc3c --- /dev/null +++ b/lite2/client_benchmark_test.go @@ -0,0 +1,106 @@ +package lite_test + +import ( + "testing" + "time" + + dbm "github.com/tendermint/tm-db" + + "github.com/tendermint/tendermint/libs/log" + lite "github.com/tendermint/tendermint/lite2" + "github.com/tendermint/tendermint/lite2/provider" + mockp "github.com/tendermint/tendermint/lite2/provider/mock" + dbs "github.com/tendermint/tendermint/lite2/store/db" +) + +// NOTE: block is produced every minute. Make sure the verification time +// provided in the function call is correct for the size of the blockchain. The +// benchmarking may take some time hence it can be more useful to set the time +// or the amount of iterations use the flag -benchtime t -> i.e. -benchtime 5m +// or -benchtime 100x. +// +// Remember that none of these benchmarks account for network latency. +var ( + benchmarkFullNode = mockp.New(GenMockNode(chainID, 1000, 100, 1, bTime)) + genesisHeader, _ = benchmarkFullNode.SignedHeader(1) +) + +func BenchmarkSequence(b *testing.B) { + c, err := lite.NewClient( + chainID, + lite.TrustOptions{ + Period: 24 * time.Hour, + Height: 1, + Hash: genesisHeader.Hash(), + }, + benchmarkFullNode, + []provider.Provider{benchmarkFullNode}, + dbs.New(dbm.NewMemDB(), chainID), + lite.Logger(log.TestingLogger()), + lite.SequentialVerification(), + ) + if err != nil { + b.Fatal(err) + } + b.ResetTimer() + + for n := 0; n < b.N; n++ { + _, err = c.VerifyHeaderAtHeight(1000, bTime.Add(1000*time.Minute)) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkBisection(b *testing.B) { + c, err := lite.NewClient( + chainID, + lite.TrustOptions{ + Period: 24 * time.Hour, + Height: 1, + Hash: genesisHeader.Hash(), + }, + benchmarkFullNode, + []provider.Provider{benchmarkFullNode}, + dbs.New(dbm.NewMemDB(), chainID), + lite.Logger(log.TestingLogger()), + ) + if err != nil { + b.Fatal(err) + } + b.ResetTimer() + + for n := 0; n < b.N; n++ { + _, err = c.VerifyHeaderAtHeight(1000, bTime.Add(1000*time.Minute)) + if err != nil { + b.Fatal(err) + } + } +} + +func BenchmarkBackwards(b *testing.B) { + trustedHeader, _ := benchmarkFullNode.SignedHeader(0) + c, err := lite.NewClient( + chainID, + lite.TrustOptions{ + Period: 24 * time.Hour, + Height: trustedHeader.Height, + Hash: trustedHeader.Hash(), + }, + benchmarkFullNode, + []provider.Provider{benchmarkFullNode}, + dbs.New(dbm.NewMemDB(), chainID), + lite.Logger(log.TestingLogger()), + ) + if err != nil { + b.Fatal(err) + } + b.ResetTimer() + + for n := 0; n < b.N; n++ { + _, err = c.VerifyHeaderAtHeight(1, bTime) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/lite2/client_test.go b/lite2/client_test.go index 65ea55122..9cb99eb74 100644 --- a/lite2/client_test.go +++ b/lite2/client_test.go @@ -1,4 +1,4 @@ -package lite +package lite_test import ( "sync" @@ -11,6 +11,7 @@ import ( dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/libs/log" + lite "github.com/tendermint/tendermint/lite2" "github.com/tendermint/tendermint/lite2/provider" mockp "github.com/tendermint/tendermint/lite2/provider/mock" dbs "github.com/tendermint/tendermint/lite2/store/db" @@ -23,7 +24,7 @@ const ( var ( keys = genPrivKeys(4) - vals = keys.ToValidators(20, 10) + vals = types.ToVoterAll(keys.ToValidators(20, 10).Validators) bTime, _ = time.Parse(time.RFC3339, "2006-01-02T15:04:05Z") h1 = keys.GenSignedHeader(chainID, 1, bTime, nil, vals, vals, []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)) @@ -34,12 +35,12 @@ var ( h3 = keys.GenSignedHeaderLastBlockID(chainID, 3, bTime.Add(1*time.Hour), nil, vals, vals, []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys), types.BlockID{Hash: h2.Hash()}) trustPeriod = 4 * time.Hour - trustOptions = TrustOptions{ + trustOptions = lite.TrustOptions{ Period: 4 * time.Hour, Height: 1, Hash: h1.Hash(), } - valSet = map[int64]*types.ValidatorSet{ + valSet = map[int64]*types.VoterSet{ 1: vals, 2: vals, 3: vals, @@ -57,17 +58,18 @@ var ( headerSet, valSet, ) - deadNode = mockp.NewDeadMock(chainID) + deadNode = mockp.NewDeadMock(chainID) + largeFullNode = mockp.New(GenMockNode(chainID, 10, 3, 0, bTime)) ) func TestClient_SequentialVerification(t *testing.T) { newKeys := genPrivKeys(4) - newVals := newKeys.ToValidators(10, 1) + newVals := types.ToVoterAll(newKeys.ToValidators(10, 1).Validators) testCases := []struct { name string otherHeaders map[int64]*types.SignedHeader // all except ^ - vals map[int64]*types.ValidatorSet + vals map[int64]*types.VoterSet initErr bool verifyErr bool }{ @@ -85,7 +87,7 @@ func TestClient_SequentialVerification(t *testing.T) { 1: keys.GenSignedHeader(chainID, 1, bTime.Add(1*time.Hour), nil, vals, vals, []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)), }, - map[int64]*types.ValidatorSet{ + map[int64]*types.VoterSet{ 1: vals, }, true, @@ -126,7 +128,7 @@ func TestClient_SequentialVerification(t *testing.T) { { "bad: different validator set at height 3", headerSet, - map[int64]*types.ValidatorSet{ + map[int64]*types.VoterSet{ 1: vals, 2: vals, 3: newVals, @@ -139,7 +141,7 @@ func TestClient_SequentialVerification(t *testing.T) { for _, tc := range testCases { tc := tc t.Run(tc.name, func(t *testing.T) { - c, err := NewClient( + c, err := lite.NewClient( chainID, trustOptions, mockp.New( @@ -153,7 +155,7 @@ func TestClient_SequentialVerification(t *testing.T) { tc.vals, )}, dbs.New(dbm.NewMemDB(), chainID), - SequentialVerification(), + lite.SequentialVerification(), ) if tc.initErr { @@ -176,16 +178,16 @@ func TestClient_SequentialVerification(t *testing.T) { func TestClient_SkippingVerification(t *testing.T) { // required for 2nd test case newKeys := genPrivKeys(4) - newVals := newKeys.ToValidators(10, 1) + newVals := types.ToVoterAll(newKeys.ToValidators(10, 1).Validators) // 1/3+ of vals, 2/3- of newVals transitKeys := keys.Extend(3) - transitVals := transitKeys.ToValidators(10, 1) + transitVals := types.ToVoterAll(transitKeys.ToValidators(10, 1).Validators) testCases := []struct { name string otherHeaders map[int64]*types.SignedHeader // all except ^ - vals map[int64]*types.ValidatorSet + vals map[int64]*types.VoterSet initErr bool verifyErr bool }{ @@ -209,7 +211,7 @@ func TestClient_SkippingVerification(t *testing.T) { 3: transitKeys.GenSignedHeader(chainID, 3, bTime.Add(2*time.Hour), nil, transitVals, transitVals, []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(transitKeys)), }, - map[int64]*types.ValidatorSet{ + map[int64]*types.VoterSet{ 1: vals, 2: vals, 3: transitVals, @@ -229,7 +231,7 @@ func TestClient_SkippingVerification(t *testing.T) { 3: newKeys.GenSignedHeader(chainID, 3, bTime.Add(2*time.Hour), nil, newVals, newVals, []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(newKeys)), }, - map[int64]*types.ValidatorSet{ + map[int64]*types.VoterSet{ 1: vals, 2: vals, 3: newVals, @@ -249,7 +251,7 @@ func TestClient_SkippingVerification(t *testing.T) { 3: newKeys.GenSignedHeader(chainID, 3, bTime.Add(2*time.Hour), nil, newVals, newVals, []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(newKeys)), }, - map[int64]*types.ValidatorSet{ + map[int64]*types.VoterSet{ 1: vals, 2: vals, 3: newVals, @@ -262,7 +264,7 @@ func TestClient_SkippingVerification(t *testing.T) { for _, tc := range testCases { tc := tc t.Run(tc.name, func(t *testing.T) { - c, err := NewClient( + c, err := lite.NewClient( chainID, trustOptions, mockp.New( @@ -276,7 +278,7 @@ func TestClient_SkippingVerification(t *testing.T) { tc.vals, )}, dbs.New(dbm.NewMemDB(), chainID), - SkippingVerification(DefaultTrustLevel), + lite.SkippingVerification(lite.DefaultTrustLevel), ) if tc.initErr { require.Error(t, err) @@ -296,13 +298,13 @@ func TestClient_SkippingVerification(t *testing.T) { } func TestClient_Cleanup(t *testing.T) { - c, err := NewClient( + c, err := lite.NewClient( chainID, trustOptions, fullNode, []provider.Provider{fullNode}, dbs.New(dbm.NewMemDB(), chainID), - Logger(log.TestingLogger()), + lite.Logger(log.TestingLogger()), ) require.NoError(t, err) _, err = c.TrustedHeader(1) @@ -316,7 +318,7 @@ func TestClient_Cleanup(t *testing.T) { assert.Error(t, err) assert.Nil(t, h) - valSet, _, err := c.TrustedValidatorSet(1) + valSet, _, err := c.TrustedVoterSet(1) assert.Error(t, err) assert.Nil(t, valSet) } @@ -329,13 +331,13 @@ func TestClientRestoresTrustedHeaderAfterStartup1(t *testing.T) { err := trustedStore.SaveSignedHeaderAndValidatorSet(h1, vals) require.NoError(t, err) - c, err := NewClient( + c, err := lite.NewClient( chainID, trustOptions, fullNode, []provider.Provider{fullNode}, trustedStore, - Logger(log.TestingLogger()), + lite.Logger(log.TestingLogger()), ) require.NoError(t, err) @@ -344,11 +346,11 @@ func TestClientRestoresTrustedHeaderAfterStartup1(t *testing.T) { assert.NotNil(t, h) assert.Equal(t, h.Hash(), h1.Hash()) - valSet, _, err := c.TrustedValidatorSet(1) + valSet, _, err := c.TrustedVoterSet(1) assert.NoError(t, err) assert.NotNil(t, valSet) if assert.NotNil(t, valSet) { - assert.Equal(t, h.ValidatorsHash.Bytes(), valSet.Hash()) + assert.Equal(t, h.VotersHash.Bytes(), valSet.Hash()) } } @@ -371,9 +373,9 @@ func TestClientRestoresTrustedHeaderAfterStartup1(t *testing.T) { valSet, ) - c, err := NewClient( + c, err := lite.NewClient( chainID, - TrustOptions{ + lite.TrustOptions{ Period: 4 * time.Hour, Height: 1, Hash: header1.Hash(), @@ -381,7 +383,7 @@ func TestClientRestoresTrustedHeaderAfterStartup1(t *testing.T) { primary, []provider.Provider{primary}, trustedStore, - Logger(log.TestingLogger()), + lite.Logger(log.TestingLogger()), ) require.NoError(t, err) @@ -391,11 +393,11 @@ func TestClientRestoresTrustedHeaderAfterStartup1(t *testing.T) { assert.Equal(t, h.Hash(), header1.Hash()) } - valSet, _, err := c.TrustedValidatorSet(1) + valSet, _, err := c.TrustedVoterSet(1) assert.NoError(t, err) assert.NotNil(t, valSet) if assert.NotNil(t, valSet) { - assert.Equal(t, h.ValidatorsHash.Bytes(), valSet.Hash()) + assert.Equal(t, h.VotersHash.Bytes(), valSet.Hash()) } } } @@ -408,9 +410,9 @@ func TestClientRestoresTrustedHeaderAfterStartup2(t *testing.T) { err := trustedStore.SaveSignedHeaderAndValidatorSet(h1, vals) require.NoError(t, err) - c, err := NewClient( + c, err := lite.NewClient( chainID, - TrustOptions{ + lite.TrustOptions{ Period: 4 * time.Hour, Height: 2, Hash: h2.Hash(), @@ -418,7 +420,7 @@ func TestClientRestoresTrustedHeaderAfterStartup2(t *testing.T) { fullNode, []provider.Provider{fullNode}, trustedStore, - Logger(log.TestingLogger()), + lite.Logger(log.TestingLogger()), ) require.NoError(t, err) @@ -428,11 +430,11 @@ func TestClientRestoresTrustedHeaderAfterStartup2(t *testing.T) { assert.NotNil(t, h) assert.Equal(t, h.Hash(), h1.Hash()) - valSet, _, err := c.TrustedValidatorSet(1) + valSet, _, err := c.TrustedVoterSet(1) assert.NoError(t, err) assert.NotNil(t, valSet) if assert.NotNil(t, valSet) { - assert.Equal(t, h.ValidatorsHash.Bytes(), valSet.Hash()) + assert.Equal(t, h.VotersHash.Bytes(), valSet.Hash()) } } @@ -459,9 +461,9 @@ func TestClientRestoresTrustedHeaderAfterStartup2(t *testing.T) { valSet, ) - c, err := NewClient( + c, err := lite.NewClient( chainID, - TrustOptions{ + lite.TrustOptions{ Period: 4 * time.Hour, Height: 2, Hash: diffHeader2.Hash(), @@ -469,7 +471,7 @@ func TestClientRestoresTrustedHeaderAfterStartup2(t *testing.T) { primary, []provider.Provider{primary}, trustedStore, - Logger(log.TestingLogger()), + lite.Logger(log.TestingLogger()), ) require.NoError(t, err) @@ -478,7 +480,7 @@ func TestClientRestoresTrustedHeaderAfterStartup2(t *testing.T) { assert.Error(t, err) assert.Nil(t, h) - valSet, _, err := c.TrustedValidatorSet(1) + valSet, _, err := c.TrustedVoterSet(1) assert.Error(t, err) assert.Nil(t, valSet) } @@ -497,13 +499,13 @@ func TestClientRestoresTrustedHeaderAfterStartup3(t *testing.T) { err = trustedStore.SaveSignedHeaderAndValidatorSet(h2, vals) require.NoError(t, err) - c, err := NewClient( + c, err := lite.NewClient( chainID, trustOptions, fullNode, []provider.Provider{fullNode}, trustedStore, - Logger(log.TestingLogger()), + lite.Logger(log.TestingLogger()), ) require.NoError(t, err) @@ -513,11 +515,11 @@ func TestClientRestoresTrustedHeaderAfterStartup3(t *testing.T) { assert.NotNil(t, h) assert.Equal(t, h.Hash(), h1.Hash()) - valSet, _, err := c.TrustedValidatorSet(1) + valSet, _, err := c.TrustedVoterSet(1) assert.NoError(t, err) assert.NotNil(t, valSet) if assert.NotNil(t, valSet) { - assert.Equal(t, h.ValidatorsHash.Bytes(), valSet.Hash()) + assert.Equal(t, h.VotersHash.Bytes(), valSet.Hash()) } // Check we no longer have 2nd header (+header2+). @@ -525,7 +527,7 @@ func TestClientRestoresTrustedHeaderAfterStartup3(t *testing.T) { assert.Error(t, err) assert.Nil(t, h) - valSet, _, err = c.TrustedValidatorSet(2) + valSet, _, err = c.TrustedVoterSet(2) assert.Error(t, err) assert.Nil(t, valSet) } @@ -554,9 +556,9 @@ func TestClientRestoresTrustedHeaderAfterStartup3(t *testing.T) { valSet, ) - c, err := NewClient( + c, err := lite.NewClient( chainID, - TrustOptions{ + lite.TrustOptions{ Period: 4 * time.Hour, Height: 1, Hash: header1.Hash(), @@ -564,7 +566,7 @@ func TestClientRestoresTrustedHeaderAfterStartup3(t *testing.T) { primary, []provider.Provider{primary}, trustedStore, - Logger(log.TestingLogger()), + lite.Logger(log.TestingLogger()), ) require.NoError(t, err) @@ -574,11 +576,11 @@ func TestClientRestoresTrustedHeaderAfterStartup3(t *testing.T) { assert.NotNil(t, h) assert.Equal(t, h.Hash(), header1.Hash()) - valSet, _, err := c.TrustedValidatorSet(1) + valSet, _, err := c.TrustedVoterSet(1) assert.NoError(t, err) assert.NotNil(t, valSet) if assert.NotNil(t, valSet) { - assert.Equal(t, h.ValidatorsHash.Bytes(), valSet.Hash()) + assert.Equal(t, h.VotersHash.Bytes(), valSet.Hash()) } // Check we no longer have invalid 2nd header (+header2+). @@ -586,20 +588,20 @@ func TestClientRestoresTrustedHeaderAfterStartup3(t *testing.T) { assert.Error(t, err) assert.Nil(t, h) - valSet, _, err = c.TrustedValidatorSet(2) + valSet, _, err = c.TrustedVoterSet(2) assert.Error(t, err) assert.Nil(t, valSet) } } func TestClient_Update(t *testing.T) { - c, err := NewClient( + c, err := lite.NewClient( chainID, trustOptions, fullNode, []provider.Provider{fullNode}, dbs.New(dbm.NewMemDB(), chainID), - Logger(log.TestingLogger()), + lite.Logger(log.TestingLogger()), ) require.NoError(t, err) @@ -610,21 +612,21 @@ func TestClient_Update(t *testing.T) { assert.EqualValues(t, 3, h.Height) } - valSet, _, err := c.TrustedValidatorSet(3) + valSet, _, err := c.TrustedVoterSet(3) assert.NoError(t, err) if assert.NotNil(t, valSet) { - assert.Equal(t, h.ValidatorsHash.Bytes(), valSet.Hash()) + assert.Equal(t, h.VotersHash.Bytes(), valSet.Hash()) } } func TestClient_Concurrency(t *testing.T) { - c, err := NewClient( + c, err := lite.NewClient( chainID, trustOptions, fullNode, []provider.Provider{fullNode}, dbs.New(dbm.NewMemDB(), chainID), - Logger(log.TestingLogger()), + lite.Logger(log.TestingLogger()), ) require.NoError(t, err) @@ -652,7 +654,7 @@ func TestClient_Concurrency(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, h) - vals, _, err := c.TrustedValidatorSet(2) + vals, _, err := c.TrustedVoterSet(2) assert.NoError(t, err) assert.NotNil(t, vals) }() @@ -662,14 +664,14 @@ func TestClient_Concurrency(t *testing.T) { } func TestClientReplacesPrimaryWithWitnessIfPrimaryIsUnavailable(t *testing.T) { - c, err := NewClient( + c, err := lite.NewClient( chainID, trustOptions, deadNode, []provider.Provider{fullNode, fullNode}, dbs.New(dbm.NewMemDB(), chainID), - Logger(log.TestingLogger()), - MaxRetryAttempts(1), + lite.Logger(log.TestingLogger()), + lite.MaxRetryAttempts(1), ) require.NoError(t, err) @@ -682,62 +684,65 @@ func TestClientReplacesPrimaryWithWitnessIfPrimaryIsUnavailable(t *testing.T) { func TestClient_BackwardsVerification(t *testing.T) { { - c, err := NewClient( + trustHeader, _ := largeFullNode.SignedHeader(6) + c, err := lite.NewClient( chainID, - TrustOptions{ - Period: 1 * time.Hour, - Height: 3, - Hash: h3.Hash(), + lite.TrustOptions{ + Period: 4 * time.Minute, + Height: trustHeader.Height, + Hash: trustHeader.Hash(), }, - fullNode, - []provider.Provider{fullNode}, + largeFullNode, + []provider.Provider{largeFullNode}, dbs.New(dbm.NewMemDB(), chainID), - Logger(log.TestingLogger()), + lite.Logger(log.TestingLogger()), ) require.NoError(t, err) - // 1) header is missing => expect no error - h, err := c.VerifyHeaderAtHeight(2, bTime.Add(1*time.Hour).Add(1*time.Second)) + // 1) verify before the trusted header using backwards => expect no error + h, err := c.VerifyHeaderAtHeight(5, bTime.Add(6*time.Minute)) require.NoError(t, err) if assert.NotNil(t, h) { - assert.EqualValues(t, 2, h.Height) + assert.EqualValues(t, 5, h.Height) } // 2) untrusted header is expired but trusted header is not => expect no error - h, err = c.VerifyHeaderAtHeight(1, bTime.Add(1*time.Hour).Add(1*time.Second)) + h, err = c.VerifyHeaderAtHeight(3, bTime.Add(8*time.Minute)) assert.NoError(t, err) assert.NotNil(t, h) // 3) already stored headers should return the header without error - h, err = c.VerifyHeaderAtHeight(2, bTime.Add(1*time.Hour).Add(1*time.Second)) + h, err = c.VerifyHeaderAtHeight(5, bTime.Add(6*time.Minute)) assert.NoError(t, err) assert.NotNil(t, h) - } - { - c, err := NewClient( - chainID, - TrustOptions{ - Period: 1 * time.Hour, - Height: 3, - Hash: h3.Hash(), - }, - fullNode, - []provider.Provider{fullNode}, - dbs.New(dbm.NewMemDB(), chainID), - Logger(log.TestingLogger()), - ) + + // 4a) First verify latest header + _, err = c.VerifyHeaderAtHeight(9, bTime.Add(9*time.Minute)) require.NoError(t, err) - // 3) trusted header has expired => expect error - _, err = c.VerifyHeaderAtHeight(1, bTime.Add(4*time.Hour).Add(1*time.Second)) + // 4b) Verify backwards using bisection => expect no error + _, err = c.VerifyHeaderAtHeight(7, bTime.Add(10*time.Minute)) + assert.NoError(t, err) + // shouldn't have verified this header in the process + _, err = c.TrustedHeader(8) + assert.Error(t, err) + + // 5) trusted header has expired => expect error + _, err = c.VerifyHeaderAtHeight(1, bTime.Add(20*time.Minute)) assert.Error(t, err) + + // 6) Try bisection method, but closest header (at 7) has expired + // so change to backwards => expect no error + _, err = c.VerifyHeaderAtHeight(8, bTime.Add(12*time.Minute)) + assert.NoError(t, err) + } { testCases := []struct { provider provider.Provider }{ { - // provides incorrect height + // 7) provides incorrect height mockp.New( chainID, map[int64]*types.SignedHeader{ @@ -750,7 +755,7 @@ func TestClient_BackwardsVerification(t *testing.T) { ), }, { - // provides incorrect hash + // 8) provides incorrect hash mockp.New( chainID, map[int64]*types.SignedHeader{ @@ -765,9 +770,9 @@ func TestClient_BackwardsVerification(t *testing.T) { } for _, tc := range testCases { - c, err := NewClient( + c, err := lite.NewClient( chainID, - TrustOptions{ + lite.TrustOptions{ Period: 1 * time.Hour, Height: 3, Hash: h3.Hash(), @@ -775,7 +780,7 @@ func TestClient_BackwardsVerification(t *testing.T) { tc.provider, []provider.Provider{tc.provider}, dbs.New(dbm.NewMemDB(), chainID), - Logger(log.TestingLogger()), + lite.Logger(log.TestingLogger()), ) require.NoError(t, err) @@ -791,7 +796,7 @@ func TestClient_NewClientFromTrustedStore(t *testing.T) { err := db.SaveSignedHeaderAndValidatorSet(h1, vals) require.NoError(t, err) - c, err := NewClientFromTrustedStore( + c, err := lite.NewClientFromTrustedStore( chainID, trustPeriod, deadNode, @@ -806,23 +811,23 @@ func TestClient_NewClientFromTrustedStore(t *testing.T) { assert.NoError(t, err) assert.EqualValues(t, 1, h.Height) - valSet, _, err := c.TrustedValidatorSet(1) + valSet, _, err := c.TrustedVoterSet(1) assert.NoError(t, err) assert.NotNil(t, valSet) if assert.NotNil(t, valSet) { - assert.Equal(t, h.ValidatorsHash.Bytes(), valSet.Hash()) + assert.Equal(t, h.VotersHash.Bytes(), valSet.Hash()) } } func TestNewClientErrorsIfAllWitnessesUnavailable(t *testing.T) { - _, err := NewClient( + _, err := lite.NewClient( chainID, trustOptions, fullNode, []provider.Provider{deadNode, deadNode}, dbs.New(dbm.NewMemDB(), chainID), - Logger(log.TestingLogger()), - MaxRetryAttempts(1), + lite.Logger(log.TestingLogger()), + lite.MaxRetryAttempts(1), ) if assert.Error(t, err) { assert.Contains(t, err.Error(), "awaiting response from all witnesses exceeded dropout time") @@ -839,7 +844,7 @@ func TestClientRemovesWitnessIfItSendsUsIncorrectHeader(t *testing.T) { []byte("app_hash2"), []byte("cons_hash"), []byte("results_hash"), len(keys), len(keys), types.BlockID{Hash: h1.Hash()}), }, - map[int64]*types.ValidatorSet{ + map[int64]*types.VoterSet{ 1: vals, 2: vals, }, @@ -852,20 +857,20 @@ func TestClientRemovesWitnessIfItSendsUsIncorrectHeader(t *testing.T) { 2: h2, 3: {Header: nil, Commit: nil}, }, - map[int64]*types.ValidatorSet{ + map[int64]*types.VoterSet{ 1: vals, 2: vals, }, ) - c, err := NewClient( + c, err := lite.NewClient( chainID, trustOptions, fullNode, []provider.Provider{badProvider1, badProvider2}, dbs.New(dbm.NewMemDB(), chainID), - Logger(log.TestingLogger()), - MaxRetryAttempts(1), + lite.Logger(log.TestingLogger()), + lite.MaxRetryAttempts(1), ) // witness should have behaved properly -> no error require.NoError(t, err) @@ -885,13 +890,13 @@ func TestClientRemovesWitnessIfItSendsUsIncorrectHeader(t *testing.T) { } func TestClientTrustedValidatorSet(t *testing.T) { - c, err := NewClient( + c, err := lite.NewClient( chainID, trustOptions, fullNode, []provider.Provider{fullNode}, dbs.New(dbm.NewMemDB(), chainID), - Logger(log.TestingLogger()), + lite.Logger(log.TestingLogger()), ) require.NoError(t, err) @@ -899,7 +904,7 @@ func TestClientTrustedValidatorSet(t *testing.T) { _, err = c.VerifyHeaderAtHeight(2, bTime.Add(2*time.Hour).Add(1*time.Second)) require.NoError(t, err) - valSet, height, err := c.TrustedValidatorSet(0) + valSet, height, err := c.TrustedVoterSet(0) assert.NoError(t, err) assert.NotNil(t, valSet) assert.EqualValues(t, 2, height) diff --git a/lite2/doc.go b/lite2/doc.go index b61f5453f..f42aa64f1 100644 --- a/lite2/doc.go +++ b/lite2/doc.go @@ -97,6 +97,18 @@ Verify function verifies a new header against some trusted header. See https://github.com/tendermint/spec/blob/master/spec/consensus/light-client/verification.md for details. +There are two methods of verification: sequential and bisection + +Sequential uses the headers hashes and the validator sets to verify each adjacent header until +it reaches the target header. + +Bisection finds the middle header between a trusted and new header, reiterating the action until it +verifies a header. A cache of headers requested by the primary is kept such that when a +verification is made, and the light client tries again to verify the new header in the middle, +the light client does not need to ask for all the same headers again. + +refer to docs/imgs/light_client_bisection_alg.png + ## 3. Secure RPC proxy Tendermint RPC exposes a lot of info, but a malicious node could return any @@ -108,5 +120,8 @@ some other node. See https://docs.tendermint.com/master/tendermint-core/light-client-protocol.html for usage example. +Or see +https://github.com/tendermint/spec/tree/master/spec/consensus/light-client +for the full spec */ package lite diff --git a/lite2/errors.go b/lite2/errors.go index 13a6cf29d..12e9d42c3 100644 --- a/lite2/errors.go +++ b/lite2/errors.go @@ -38,3 +38,11 @@ type ErrInvalidHeader struct { func (e ErrInvalidHeader) Error() string { return fmt.Sprintf("invalid header: %v", e.Reason) } + +// errNoWitnesses means that there are not enough witnesses connected to +// continue running the light client. +type errNoWitnesses struct{} + +func (e errNoWitnesses) Error() string { + return "no witnesses connected. please reset light client" +} diff --git a/lite2/example_test.go b/lite2/example_test.go index e8c3b8bb3..0de5f1349 100644 --- a/lite2/example_test.go +++ b/lite2/example_test.go @@ -1,4 +1,4 @@ -package lite +package lite_test import ( "fmt" @@ -11,6 +11,7 @@ import ( dbm "github.com/tendermint/tm-db" "github.com/tendermint/tendermint/abci/example/kvstore" + lite "github.com/tendermint/tendermint/lite2" "github.com/tendermint/tendermint/lite2/provider" httpp "github.com/tendermint/tendermint/lite2/provider/http" dbs "github.com/tendermint/tendermint/lite2/store/db" @@ -48,9 +49,9 @@ func ExampleClient_Update() { stdlog.Fatal(err) } - c, err := NewClient( + c, err := lite.NewClient( chainID, - TrustOptions{ + lite.TrustOptions{ Period: 504 * time.Hour, // 21 days Height: 2, Hash: header.Hash(), @@ -117,9 +118,9 @@ func ExampleClient_VerifyHeaderAtHeight() { stdlog.Fatal(err) } - c, err := NewClient( + c, err := lite.NewClient( chainID, - TrustOptions{ + lite.TrustOptions{ Period: 504 * time.Hour, // 21 days Height: 2, Hash: header.Hash(), diff --git a/lite2/test_helpers.go b/lite2/helpers_test.go similarity index 63% rename from lite2/test_helpers.go rename to lite2/helpers_test.go index cc1bf4eb9..01cb743f6 100644 --- a/lite2/test_helpers.go +++ b/lite2/helpers_test.go @@ -1,4 +1,4 @@ -package lite +package lite_test import ( "time" @@ -69,6 +69,14 @@ func (pkz privKeys) ToValidators(init, inc int64) *types.ValidatorSet { return types.NewValidatorSet(res) } +func (pkz privKeys) ToVoters(init, inc int64) *types.VoterSet { + res := make([]*types.Validator, len(pkz)) + for i, k := range pkz { + res[i] = types.NewValidator(k.PubKey(), init+int64(i)*inc) + } + return types.ToVoterAll(res) +} + // signHeader properly signs the header with all keys from first to last exclusive. func (pkz privKeys) signHeader(header *types.Header, first, last int) *types.Commit { commitSigs := make([]types.CommitSig, len(pkz)) @@ -120,7 +128,7 @@ func makeVote(header *types.Header, valset *types.ValidatorSet, } func genHeader(chainID string, height int64, bTime time.Time, txs types.Txs, - valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte) *types.Header { + valset, nextValset *types.VoterSet, appHash, consHash, resHash []byte) *types.Header { return &types.Header{ ChainID: chainID, @@ -128,18 +136,18 @@ func genHeader(chainID string, height int64, bTime time.Time, txs types.Txs, Time: bTime, // LastBlockID // LastCommitHash - ValidatorsHash: valset.Hash(), - NextValidatorsHash: nextValset.Hash(), - DataHash: txs.Hash(), - AppHash: appHash, - ConsensusHash: consHash, - LastResultsHash: resHash, + VotersHash: valset.Hash(), + NextVotersHash: nextValset.Hash(), + DataHash: txs.Hash(), + AppHash: appHash, + ConsensusHash: consHash, + LastResultsHash: resHash, } } // GenSignedHeader calls genHeader and signHeader and combines them into a SignedHeader. func (pkz privKeys) GenSignedHeader(chainID string, height int64, bTime time.Time, txs types.Txs, - valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int) *types.SignedHeader { + valset, nextValset *types.VoterSet, appHash, consHash, resHash []byte, first, last int) *types.SignedHeader { header := genHeader(chainID, height, bTime, txs, valset, nextValset, appHash, consHash, resHash) return &types.SignedHeader{ @@ -150,7 +158,7 @@ func (pkz privKeys) GenSignedHeader(chainID string, height int64, bTime time.Tim // GenSignedHeaderLastBlockID calls genHeader and signHeader and combines them into a SignedHeader. func (pkz privKeys) GenSignedHeaderLastBlockID(chainID string, height int64, bTime time.Time, txs types.Txs, - valset, nextValset *types.ValidatorSet, appHash, consHash, resHash []byte, first, last int, + valset, nextValset *types.VoterSet, appHash, consHash, resHash []byte, first, last int, lastBlockID types.BlockID) *types.SignedHeader { header := genHeader(chainID, height, bTime, txs, valset, nextValset, appHash, consHash, resHash) @@ -160,3 +168,61 @@ func (pkz privKeys) GenSignedHeaderLastBlockID(chainID string, height int64, bTi Commit: pkz.signHeader(header, first, last), } } + +func (pkz privKeys) ChangeKeys(delta int) privKeys { + newKeys := pkz[delta:] + return newKeys.Extend(delta) +} + +// Generates the header and validator set to create a full entire mock node with blocks to height ( +// blockSize) and with variation in validator sets. BlockIntervals are in per minute. +// NOTE: Expected to have a large validator set size ~ 100 validators. +func GenMockNode( + chainID string, + blockSize int64, + valSize int, + valVariation float32, + bTime time.Time) ( + string, + map[int64]*types.SignedHeader, + map[int64]*types.VoterSet) { + + var ( + headers = make(map[int64]*types.SignedHeader, blockSize) + voterSet = make(map[int64]*types.VoterSet, blockSize) + keys = genPrivKeys(valSize) + totalVariation = valVariation + valVariationInt int + newKeys privKeys + ) + + valVariationInt = int(totalVariation) + totalVariation = -float32(valVariationInt) + newKeys = keys.ChangeKeys(valVariationInt) + + // genesis header and vals + lastHeader := keys.GenSignedHeader(chainID, 1, bTime.Add(1*time.Minute), nil, + keys.ToVoters(2, 2), newKeys.ToVoters(2, 2), []byte("app_hash"), []byte("cons_hash"), + []byte("results_hash"), 0, len(keys)) + currentHeader := lastHeader + headers[1] = currentHeader + voterSet[1] = keys.ToVoters(2, 2) + keys = newKeys + + for height := int64(2); height <= blockSize; height++ { + totalVariation += valVariation + valVariationInt = int(totalVariation) + totalVariation = -float32(valVariationInt) + newKeys = keys.ChangeKeys(valVariationInt) + currentHeader = keys.GenSignedHeaderLastBlockID(chainID, height, bTime.Add(time.Duration(height)*time.Minute), + nil, + keys.ToVoters(2, 2), newKeys.ToVoters(2, 2), []byte("app_hash"), []byte("cons_hash"), + []byte("results_hash"), 0, len(keys), types.BlockID{Hash: lastHeader.Hash()}) + headers[height] = currentHeader + voterSet[height] = keys.ToVoters(2, 2) + lastHeader = currentHeader + keys = newKeys + } + + return chainID, headers, voterSet +} diff --git a/lite2/provider/http/http.go b/lite2/provider/http/http.go index 130bf0a24..20b26eedb 100644 --- a/lite2/provider/http/http.go +++ b/lite2/provider/http/http.go @@ -7,6 +7,7 @@ import ( "github.com/tendermint/tendermint/lite2/provider" rpcclient "github.com/tendermint/tendermint/rpc/client" + rpchttp "github.com/tendermint/tendermint/rpc/client/http" "github.com/tendermint/tendermint/types" ) @@ -21,25 +22,31 @@ type SignStatusClient interface { // http provider uses an RPC client (or SignStatusClient more generally) to // obtain the necessary information. type http struct { - chainID string - client SignStatusClient + SignStatusClient // embed so interface can be converted to SignStatusClient for tests + chainID string } -// New creates a HTTP provider, which is using the rpcclient.HTTP -// client under the hood. +// New creates a HTTP provider, which is using the rpchttp.HTTP client under the +// hood. If no scheme is provided in the remote URL, http will be used by default. func New(chainID, remote string) (provider.Provider, error) { - httpClient, err := rpcclient.NewHTTP(remote, "/websocket") + // ensure URL scheme is set (default HTTP) when not provided + if !strings.Contains(remote, "://") { + remote = "http://" + remote + } + + httpClient, err := rpchttp.New(remote, "/websocket") if err != nil { return nil, err } + return NewWithClient(chainID, httpClient), nil } // NewWithClient allows you to provide custom SignStatusClient. func NewWithClient(chainID string, client SignStatusClient) provider.Provider { return &http{ - chainID: chainID, - client: client, + SignStatusClient: client, + chainID: chainID, } } @@ -49,7 +56,7 @@ func (p *http) ChainID() string { } func (p *http) String() string { - return fmt.Sprintf("http{%s}", p.client.Remote()) + return fmt.Sprintf("http{%s}", p.Remote()) } // SignedHeader fetches a SignedHeader at the given height and checks the @@ -60,7 +67,7 @@ func (p *http) SignedHeader(height int64) (*types.SignedHeader, error) { return nil, err } - commit, err := p.client.Commit(h) + commit, err := p.SignStatusClient.Commit(h) if err != nil { // TODO: standartise errors on the RPC side if strings.Contains(err.Error(), "height must be less than or equal") { @@ -81,16 +88,16 @@ func (p *http) SignedHeader(height int64) (*types.SignedHeader, error) { return &commit.SignedHeader, nil } -// ValidatorSet fetches a ValidatorSet at the given height. Multiple HTTP +// VoterSet fetches a VoterSet at the given height. Multiple HTTP // requests might be required if the validator set size is over 100. -func (p *http) ValidatorSet(height int64) (*types.ValidatorSet, error) { +func (p *http) VoterSet(height int64) (*types.VoterSet, error) { h, err := validateHeight(height) if err != nil { return nil, err } const maxPerPage = 100 - res, err := p.client.Validators(h, 0, maxPerPage) + res, err := p.SignStatusClient.Voters(h, 0, maxPerPage) if err != nil { // TODO: standartise errors on the RPC side if strings.Contains(err.Error(), "height must be less than or equal") { @@ -100,23 +107,23 @@ func (p *http) ValidatorSet(height int64) (*types.ValidatorSet, error) { } var ( - vals = res.Validators + vals = res.Voters page = 1 ) // Check if there are more validators. - for len(res.Validators) == maxPerPage { - res, err = p.client.Validators(h, page, maxPerPage) + for len(res.Voters) == maxPerPage { + res, err = p.SignStatusClient.Voters(h, page, maxPerPage) if err != nil { return nil, err } - if len(res.Validators) > 0 { - vals = append(vals, res.Validators...) + if len(res.Voters) > 0 { + vals = append(vals, res.Voters...) } page++ } - return types.NewValidatorSet(vals), nil + return types.WrapValidatorsToVoterSet(vals), nil } func validateHeight(height int64) (*int64, error) { diff --git a/lite2/provider/http/http_test.go b/lite2/provider/http/http_test.go index 1e5f4cb2b..009fe1944 100644 --- a/lite2/provider/http/http_test.go +++ b/lite2/provider/http/http_test.go @@ -1,6 +1,7 @@ -package http +package http_test import ( + "fmt" "os" "testing" @@ -8,11 +9,26 @@ import ( "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/abci/example/kvstore" + "github.com/tendermint/tendermint/lite2/provider/http" rpcclient "github.com/tendermint/tendermint/rpc/client" rpctest "github.com/tendermint/tendermint/rpc/test" "github.com/tendermint/tendermint/types" ) +func TestNewProvider(t *testing.T) { + c, err := http.New("chain-test", "192.168.0.1:26657") + require.NoError(t, err) + require.Equal(t, fmt.Sprintf("%s", c), "http{http://192.168.0.1:26657}") + + c, err = http.New("chain-test", "http://153.200.0.1:26657") + require.NoError(t, err) + require.Equal(t, fmt.Sprintf("%s", c), "http{http://153.200.0.1:26657}") + + c, err = http.New("chain-test", "153.200.0.1") + require.NoError(t, err) + require.Equal(t, fmt.Sprintf("%s", c), "http{http://153.200.0.1}") +} + func TestMain(m *testing.M) { app := kvstore.NewApplication() node := rpctest.StartTendermint(app) @@ -33,12 +49,12 @@ func TestProvider(t *testing.T) { } chainID := genDoc.ChainID t.Log("chainID:", chainID) - p, err := New(chainID, rpcAddr) + p, err := http.New(chainID, rpcAddr) require.Nil(t, err) require.NotNil(t, p) // let it produce some blocks - err = rpcclient.WaitForHeight(p.(*http).client, 6, nil) + err = rpcclient.WaitForHeight(p.(rpcclient.StatusClient), 6, nil) require.Nil(t, err) // let's get the highest block diff --git a/lite2/provider/mock/deadmock.go b/lite2/provider/mock/deadmock.go index 77c474411..55bfa44b9 100644 --- a/lite2/provider/mock/deadmock.go +++ b/lite2/provider/mock/deadmock.go @@ -28,6 +28,6 @@ func (p *deadMock) SignedHeader(height int64) (*types.SignedHeader, error) { return nil, errors.New("no response from provider") } -func (p *deadMock) ValidatorSet(height int64) (*types.ValidatorSet, error) { +func (p *deadMock) VoterSet(height int64) (*types.VoterSet, error) { return nil, errors.New("no response from provider") } diff --git a/lite2/provider/mock/mock.go b/lite2/provider/mock/mock.go index 7ff7bc9a1..4ffc9c37f 100644 --- a/lite2/provider/mock/mock.go +++ b/lite2/provider/mock/mock.go @@ -11,12 +11,12 @@ import ( type mock struct { chainID string headers map[int64]*types.SignedHeader - vals map[int64]*types.ValidatorSet + vals map[int64]*types.VoterSet } // New creates a mock provider with the given set of headers and validator // sets. -func New(chainID string, headers map[int64]*types.SignedHeader, vals map[int64]*types.ValidatorSet) provider.Provider { +func New(chainID string, headers map[int64]*types.SignedHeader, vals map[int64]*types.VoterSet) provider.Provider { return &mock{ chainID: chainID, headers: headers, @@ -53,7 +53,7 @@ func (p *mock) SignedHeader(height int64) (*types.SignedHeader, error) { return nil, provider.ErrSignedHeaderNotFound } -func (p *mock) ValidatorSet(height int64) (*types.ValidatorSet, error) { +func (p *mock) VoterSet(height int64) (*types.VoterSet, error) { if height == 0 && len(p.vals) > 0 { return p.vals[int64(len(p.vals))], nil } diff --git a/lite2/provider/provider.go b/lite2/provider/provider.go index 773e17e32..ba4d0cdad 100644 --- a/lite2/provider/provider.go +++ b/lite2/provider/provider.go @@ -22,14 +22,14 @@ type Provider interface { // error is returned. SignedHeader(height int64) (*types.SignedHeader, error) - // ValidatorSet returns the ValidatorSet that corresponds to height. + // VoterSet returns the VoterSet that corresponds to height. // // 0 - the latest. // height must be >= 0. // - // If the provider fails to fetch the ValidatorSet due to the IO or other + // If the provider fails to fetch the VoterSet due to the IO or other // issues, an error will be returned. - // If there's no ValidatorSet for the given height, ErrValidatorSetNotFound + // If there's no VoterSet for the given height, ErrValidatorSetNotFound // error is returned. - ValidatorSet(height int64) (*types.ValidatorSet, error) + VoterSet(height int64) (*types.VoterSet, error) } diff --git a/lite2/proxy/routes.go b/lite2/proxy/routes.go index f7d5cd25b..45c9ad41c 100644 --- a/lite2/proxy/routes.go +++ b/lite2/proxy/routes.go @@ -132,10 +132,10 @@ func makeTxSearchFunc(c *lrpc.Client) rpcTxSearchFunc { } type rpcValidatorsFunc func(ctx *rpctypes.Context, height *int64, - page, perPage int) (*ctypes.ResultValidators, error) + page, perPage int) (*ctypes.ResultVoters, error) func makeValidatorsFunc(c *lrpc.Client) rpcValidatorsFunc { - return func(ctx *rpctypes.Context, height *int64, page, perPage int) (*ctypes.ResultValidators, error) { + return func(ctx *rpctypes.Context, height *int64, page, perPage int) (*ctypes.ResultVoters, error) { return c.Validators(height, page, perPage) } } diff --git a/lite2/rpc/client.go b/lite2/rpc/client.go index abd15adc2..53c41a7a8 100644 --- a/lite2/rpc/client.go +++ b/lite2/rpc/client.go @@ -3,12 +3,11 @@ package rpc import ( "bytes" "context" + "errors" "fmt" "strings" "time" - "github.com/pkg/errors" - "github.com/tendermint/tendermint/crypto/merkle" tmbytes "github.com/tendermint/tendermint/libs/bytes" service "github.com/tendermint/tendermint/libs/service" @@ -19,6 +18,8 @@ import ( "github.com/tendermint/tendermint/types" ) +var errNegOrZeroHeight = errors.New("negative or zero height") + // Client is an RPC client, which uses lite#Client to verify data (if it can be // proved!). type Client struct { @@ -80,13 +81,13 @@ func (c *Client) ABCIQueryWithOptions(path string, data tmbytes.HexBytes, // Validate the response. if resp.IsErr() { - return nil, errors.Errorf("err response code: %v", resp.Code) + return nil, fmt.Errorf("err response code: %v", resp.Code) } if len(resp.Key) == 0 || resp.Proof == nil { return nil, errors.New("empty tree") } if resp.Height <= 0 { - return nil, errors.New("negative or zero height") + return nil, errNegOrZeroHeight } // Update the light client if we're behind. @@ -109,7 +110,7 @@ func (c *Client) ABCIQueryWithOptions(path string, data tmbytes.HexBytes, kp = kp.AppendKey(resp.Key, merkle.KeyEncodingURL) err = c.prt.VerifyValue(resp.Proof, h.AppHash, kp.String(), resp.Value) if err != nil { - return nil, errors.Wrap(err, "verify value proof") + return nil, fmt.Errorf("verify value proof: %w", err) } return &ctypes.ResultABCIQuery{Response: resp}, nil } @@ -118,7 +119,7 @@ func (c *Client) ABCIQueryWithOptions(path string, data tmbytes.HexBytes, // XXX How do we encode the key into a string... err = c.prt.VerifyAbsence(resp.Proof, h.AppHash, string(resp.Key)) if err != nil { - return nil, errors.Wrap(err, "verify absence proof") + return nil, fmt.Errorf("verify absence proof: %w", err) } return &ctypes.ResultABCIQuery{Response: resp}, nil } @@ -156,7 +157,32 @@ func (c *Client) ConsensusState() (*ctypes.ResultConsensusState, error) { } func (c *Client) ConsensusParams(height *int64) (*ctypes.ResultConsensusParams, error) { - return c.next.ConsensusParams(height) + res, err := c.next.ConsensusParams(height) + if err != nil { + return nil, err + } + + // Validate res. + if err := res.ConsensusParams.Validate(); err != nil { + return nil, err + } + if res.BlockHeight <= 0 { + return nil, errNegOrZeroHeight + } + + // Update the light client if we're behind. + h, err := c.updateLiteClientIfNeededTo(res.BlockHeight) + if err != nil { + return nil, err + } + + // Verify hash. + if cH, tH := res.ConsensusParams.Hash(), h.ConsensusHash; !bytes.Equal(cH, tH) { + return nil, fmt.Errorf("params hash %X does not match trusted hash %X", + cH, tH) + } + + return res, nil } func (c *Client) Health() (*ctypes.ResultHealth, error) { @@ -172,12 +198,12 @@ func (c *Client) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlock } // Validate res. - for _, meta := range res.BlockMetas { + for i, meta := range res.BlockMetas { if meta == nil { - return nil, errors.New("nil BlockMeta") + return nil, fmt.Errorf("nil block meta %d", i) } if err := meta.ValidateBasic(); err != nil { - return nil, errors.Wrap(err, "invalid BlockMeta") + return nil, fmt.Errorf("invalid block meta %d: %w", i, err) } } @@ -193,10 +219,10 @@ func (c *Client) BlockchainInfo(minHeight, maxHeight int64) (*ctypes.ResultBlock for _, meta := range res.BlockMetas { h, err := c.lc.TrustedHeader(meta.Header.Height) if err != nil { - return nil, errors.Wrapf(err, "TrustedHeader(%d)", meta.Header.Height) + return nil, fmt.Errorf("trusted header %d: %w", meta.Header.Height, err) } if bmH, tH := meta.Header.Hash(), h.Hash(); !bytes.Equal(bmH, tH) { - return nil, errors.Errorf("BlockMeta#Header %X does not match with trusted header %X", + return nil, fmt.Errorf("block meta header %X does not match with trusted header %X", bmH, tH) } } @@ -223,7 +249,7 @@ func (c *Client) Block(height *int64) (*ctypes.ResultBlock, error) { return nil, err } if bmH, bH := res.BlockID.Hash, res.Block.Hash(); !bytes.Equal(bmH, bH) { - return nil, errors.Errorf("BlockID %X does not match with Block %X", + return nil, fmt.Errorf("blockID %X does not match with block %X", bmH, bH) } @@ -235,7 +261,7 @@ func (c *Client) Block(height *int64) (*ctypes.ResultBlock, error) { // Verify block. if bH, tH := res.Block.Hash(), h.Hash(); !bytes.Equal(bH, tH) { - return nil, errors.Errorf("Block#Header %X does not match with trusted header %X", + return nil, fmt.Errorf("block header %X does not match with trusted header %X", bH, tH) } @@ -243,7 +269,30 @@ func (c *Client) Block(height *int64) (*ctypes.ResultBlock, error) { } func (c *Client) BlockResults(height *int64) (*ctypes.ResultBlockResults, error) { - return c.next.BlockResults(height) + res, err := c.next.BlockResults(height) + if err != nil { + return nil, err + } + + // Validate res. + if res.Height <= 0 { + return nil, errNegOrZeroHeight + } + + // Update the light client if we're behind. + h, err := c.updateLiteClientIfNeededTo(res.Height + 1) + if err != nil { + return nil, err + } + + // Verify block results. + results := types.NewResults(res.TxsResults) + if rH, tH := results.Hash(), h.LastResultsHash; !bytes.Equal(rH, tH) { + return nil, fmt.Errorf("last results %X does not match with trusted last results %X", + rH, tH) + } + + return res, nil } func (c *Client) Commit(height *int64) (*ctypes.ResultCommit, error) { @@ -256,6 +305,9 @@ func (c *Client) Commit(height *int64) (*ctypes.ResultCommit, error) { if err := res.SignedHeader.ValidateBasic(c.lc.ChainID()); err != nil { return nil, err } + if res.Height <= 0 { + return nil, errNegOrZeroHeight + } // Update the light client if we're behind. h, err := c.updateLiteClientIfNeededTo(res.Height) @@ -265,7 +317,7 @@ func (c *Client) Commit(height *int64) (*ctypes.ResultCommit, error) { // Verify commit. if rH, tH := res.Hash(), h.Hash(); !bytes.Equal(rH, tH) { - return nil, errors.Errorf("header %X does not match with trusted header %X", + return nil, fmt.Errorf("header %X does not match with trusted header %X", rH, tH) } @@ -282,7 +334,7 @@ func (c *Client) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) { // Validate res. if res.Height <= 0 { - return nil, errors.Errorf("invalid ResultTx: %v", res) + return nil, errNegOrZeroHeight } // Update the light client if we're behind. @@ -300,8 +352,40 @@ func (c *Client) TxSearch(query string, prove bool, page, perPage int, orderBy s return c.next.TxSearch(query, prove, page, perPage, orderBy) } -func (c *Client) Validators(height *int64, page, perPage int) (*ctypes.ResultValidators, error) { - return c.next.Validators(height, page, perPage) +// Validators fetches and verifies validators. +// +// WARNING: only full validator sets are verified (when length of validators is +// less than +perPage+. +perPage+ default is 30, max is 100). +func (c *Client) Validators(height *int64, page, perPage int) (*ctypes.ResultVoters, error) { + res, err := c.next.Voters(height, page, perPage) + if err != nil { + return nil, err + } + + // Validate res. + if res.BlockHeight <= 0 { + return nil, errNegOrZeroHeight + } + + // Update the light client if we're behind. + h, err := c.updateLiteClientIfNeededTo(res.BlockHeight) + if err != nil { + return nil, err + } + + // Verify validators. + if res.Count <= res.Total { + if rH, tH := types.WrapValidatorsToVoterSet(res.Voters).Hash(), h.VotersHash; !bytes.Equal(rH, tH) { + return nil, fmt.Errorf("validators %X does not match with trusted validators %X", + rH, tH) + } + } + + return res, nil +} + +func (c *Client) Voters(height *int64, page, perPage int) (*ctypes.ResultVoters, error) { + return c.next.Voters(height, page, perPage) } func (c *Client) BroadcastEvidence(ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { @@ -323,7 +407,10 @@ func (c *Client) UnsubscribeAll(ctx context.Context, subscriber string) error { func (c *Client) updateLiteClientIfNeededTo(height int64) (*types.SignedHeader, error) { h, err := c.lc.VerifyHeaderAtHeight(height, time.Now()) - return h, errors.Wrapf(err, "failed to update light client to %d", height) + if err != nil { + return nil, fmt.Errorf("failed to update light client to %d: %w", height, err) + } + return h, nil } func (c *Client) RegisterOpDecoder(typ string, dec merkle.OpDecoder) { @@ -382,7 +469,7 @@ func (c *Client) UnsubscribeAllWS(ctx *rpctypes.Context) (*ctypes.ResultUnsubscr func parseQueryStorePath(path string) (storeName string, err error) { if !strings.HasPrefix(path, "/") { - return "", fmt.Errorf("expected path to start with /") + return "", errors.New("expected path to start with /") } paths := strings.SplitN(path[1:], "/", 3) diff --git a/lite2/store/db/db.go b/lite2/store/db/db.go index d405b9865..dc4c4f5a4 100644 --- a/lite2/store/db/db.go +++ b/lite2/store/db/db.go @@ -47,11 +47,11 @@ func New(db dbm.DB, prefix string) store.Store { return &dbs{db: db, prefix: prefix, cdc: cdc, size: size} } -// SaveSignedHeaderAndValidatorSet persists SignedHeader and ValidatorSet to +// SaveSignedHeaderAndValidatorSet persists SignedHeader and VoterSet to // the db. // // Safe for concurrent use by multiple goroutines. -func (s *dbs) SaveSignedHeaderAndValidatorSet(sh *types.SignedHeader, valSet *types.ValidatorSet) error { +func (s *dbs) SaveSignedHeaderAndValidatorSet(sh *types.SignedHeader, valSet *types.VoterSet) error { if sh.Height <= 0 { panic("negative or zero height") } @@ -84,7 +84,7 @@ func (s *dbs) SaveSignedHeaderAndValidatorSet(sh *types.SignedHeader, valSet *ty return err } -// DeleteSignedHeaderAndValidatorSet deletes SignedHeader and ValidatorSet from +// DeleteSignedHeaderAndValidatorSet deletes SignedHeader and VoterSet from // the db. // // Safe for concurrent use by multiple goroutines. @@ -132,10 +132,10 @@ func (s *dbs) SignedHeader(height int64) (*types.SignedHeader, error) { return signedHeader, err } -// ValidatorSet loads ValidatorSet at the given height. +// VoterSet loads VoterSet at the given height. // // Safe for concurrent use by multiple goroutines. -func (s *dbs) ValidatorSet(height int64) (*types.ValidatorSet, error) { +func (s *dbs) VoterSet(height int64) (*types.VoterSet, error) { if height <= 0 { panic("negative or zero height") } @@ -148,7 +148,7 @@ func (s *dbs) ValidatorSet(height int64) (*types.ValidatorSet, error) { return nil, store.ErrValidatorSetNotFound } - var valSet *types.ValidatorSet + var valSet *types.VoterSet err = s.cdc.UnmarshalBinaryLengthPrefixed(bz, &valSet) return valSet, err } @@ -203,18 +203,18 @@ func (s *dbs) FirstSignedHeaderHeight() (int64, error) { return -1, nil } -// SignedHeaderAfter iterates over headers until it finds a header after one at -// height. It returns ErrSignedHeaderNotFound if no such header exists. +// SignedHeaderBefore iterates over headers until it finds a header before +// the given height. It returns ErrSignedHeaderNotFound if no such header exists. // // Safe for concurrent use by multiple goroutines. -func (s *dbs) SignedHeaderAfter(height int64) (*types.SignedHeader, error) { +func (s *dbs) SignedHeaderBefore(height int64) (*types.SignedHeader, error) { if height <= 0 { panic("negative or zero height") } - itr, err := s.db.Iterator( - s.shKey(height+1), - append(s.shKey(1<<63-1), byte(0x00)), + itr, err := s.db.ReverseIterator( + s.shKey(1), + s.shKey(height), ) if err != nil { panic(err) diff --git a/lite2/store/db/db_test.go b/lite2/store/db/db_test.go index 2b82de8f3..93d5a8f79 100644 --- a/lite2/store/db/db_test.go +++ b/lite2/store/db/db_test.go @@ -26,7 +26,7 @@ func TestLast_FirstSignedHeaderHeight(t *testing.T) { // 1 key err = dbStore.SaveSignedHeaderAndValidatorSet( - &types.SignedHeader{Header: &types.Header{Height: 1}}, &types.ValidatorSet{}) + &types.SignedHeader{Header: &types.Header{Height: 1}}, &types.VoterSet{}) require.NoError(t, err) height, err = dbStore.LastSignedHeaderHeight() @@ -46,20 +46,20 @@ func Test_SaveSignedHeaderAndValidatorSet(t *testing.T) { require.Error(t, err) assert.Nil(t, h) - valSet, err := dbStore.ValidatorSet(1) + valSet, err := dbStore.VoterSet(1) require.Error(t, err) assert.Nil(t, valSet) // 1 key err = dbStore.SaveSignedHeaderAndValidatorSet( - &types.SignedHeader{Header: &types.Header{Height: 1}}, &types.ValidatorSet{}) + &types.SignedHeader{Header: &types.Header{Height: 1}}, &types.VoterSet{}) require.NoError(t, err) h, err = dbStore.SignedHeader(1) require.NoError(t, err) assert.NotNil(t, h) - valSet, err = dbStore.ValidatorSet(1) + valSet, err = dbStore.VoterSet(1) require.NoError(t, err) assert.NotNil(t, valSet) @@ -71,24 +71,24 @@ func Test_SaveSignedHeaderAndValidatorSet(t *testing.T) { require.Error(t, err) assert.Nil(t, h) - valSet, err = dbStore.ValidatorSet(1) + valSet, err = dbStore.VoterSet(1) require.Error(t, err) assert.Nil(t, valSet) } -func Test_SignedHeaderAfter(t *testing.T) { - dbStore := New(dbm.NewMemDB(), "Test_SignedHeaderAfter") +func Test_SignedHeaderBefore(t *testing.T) { + dbStore := New(dbm.NewMemDB(), "Test_SignedHeaderBefore") assert.Panics(t, func() { - dbStore.SignedHeaderAfter(0) - dbStore.SignedHeaderAfter(100) + _, _ = dbStore.SignedHeaderBefore(0) + _, _ = dbStore.SignedHeaderBefore(100) }) err := dbStore.SaveSignedHeaderAndValidatorSet( - &types.SignedHeader{Header: &types.Header{Height: 2}}, &types.ValidatorSet{}) + &types.SignedHeader{Header: &types.Header{Height: 2}}, &types.VoterSet{}) require.NoError(t, err) - h, err := dbStore.SignedHeaderAfter(1) + h, err := dbStore.SignedHeaderBefore(3) require.NoError(t, err) if assert.NotNil(t, h) { assert.EqualValues(t, 2, h.Height) @@ -105,7 +105,7 @@ func Test_Prune(t *testing.T) { // One header err = dbStore.SaveSignedHeaderAndValidatorSet( - &types.SignedHeader{Header: &types.Header{Height: 2}}, &types.ValidatorSet{}) + &types.SignedHeader{Header: &types.Header{Height: 2}}, &types.VoterSet{}) require.NoError(t, err) assert.EqualValues(t, 1, dbStore.Size()) @@ -121,7 +121,7 @@ func Test_Prune(t *testing.T) { // Multiple headers for i := 1; i <= 10; i++ { err = dbStore.SaveSignedHeaderAndValidatorSet( - &types.SignedHeader{Header: &types.Header{Height: int64(i)}}, &types.ValidatorSet{}) + &types.SignedHeader{Header: &types.Header{Height: int64(i)}}, &types.VoterSet{}) require.NoError(t, err) } @@ -144,10 +144,10 @@ func Test_Concurrency(t *testing.T) { defer wg.Done() dbStore.SaveSignedHeaderAndValidatorSet( - &types.SignedHeader{Header: &types.Header{Height: i}}, &types.ValidatorSet{}) + &types.SignedHeader{Header: &types.Header{Height: i}}, &types.VoterSet{}) dbStore.SignedHeader(i) - dbStore.ValidatorSet(i) + dbStore.VoterSet(i) dbStore.LastSignedHeaderHeight() dbStore.FirstSignedHeaderHeight() diff --git a/lite2/store/store.go b/lite2/store/store.go index 7ea6b9c6b..f1f436d91 100644 --- a/lite2/store/store.go +++ b/lite2/store/store.go @@ -5,13 +5,13 @@ import "github.com/tendermint/tendermint/types" // Store is anything that can persistenly store headers. type Store interface { // SaveSignedHeaderAndValidatorSet saves a SignedHeader (h: sh.Height) and a - // ValidatorSet (h: sh.Height). + // VoterSet (h: sh.Height). // // height must be > 0. - SaveSignedHeaderAndValidatorSet(sh *types.SignedHeader, valSet *types.ValidatorSet) error + SaveSignedHeaderAndValidatorSet(sh *types.SignedHeader, valSet *types.VoterSet) error // DeleteSignedHeaderAndValidatorSet deletes SignedHeader (h: height) and - // ValidatorSet (h: height). + // VoterSet (h: height). // // height must be > 0. DeleteSignedHeaderAndValidatorSet(height int64) error @@ -24,12 +24,12 @@ type Store interface { // If SignedHeader is not found, ErrSignedHeaderNotFound is returned. SignedHeader(height int64) (*types.SignedHeader, error) - // ValidatorSet returns the ValidatorSet that corresponds to height. + // VoterSet returns the VoterSet that corresponds to height. // // height must be > 0. // - // If ValidatorSet is not found, ErrValidatorSetNotFound is returned. - ValidatorSet(height int64) (*types.ValidatorSet, error) + // If VoterSet is not found, ErrValidatorSetNotFound is returned. + VoterSet(height int64) (*types.VoterSet, error) // LastSignedHeaderHeight returns the last (newest) SignedHeader height. // @@ -41,10 +41,10 @@ type Store interface { // If the store is empty, -1 and nil error are returned. FirstSignedHeaderHeight() (int64, error) - // SignedHeaderAfter returns the SignedHeader after the certain height. + // SignedHeaderBefore returns the SignedHeader before a certain height. // // height must be > 0 && <= LastSignedHeaderHeight. - SignedHeaderAfter(height int64) (*types.SignedHeader, error) + SignedHeaderBefore(height int64) (*types.SignedHeader, error) // Prune removes headers & the associated validator sets when Store reaches a // defined size (number of header & validator set pairs). diff --git a/lite2/verifier.go b/lite2/verifier.go index 6d8459ab6..6abd51c12 100644 --- a/lite2/verifier.go +++ b/lite2/verifier.go @@ -10,10 +10,6 @@ import ( "github.com/tendermint/tendermint/types" ) -const ( - maxClockDrift = 10 * time.Second -) - var ( // DefaultTrustLevel - new header can be trusted if at least one correct // validator signed it. @@ -30,14 +26,18 @@ var ( // d) more than 2/3 of untrustedVals have signed h2 // (otherwise, ErrInvalidHeader is returned) // e) headers are non-adjacent. +// +// maxClockDrift defines how much untrustedHeader.Time can drift into the +// future. func VerifyNonAdjacent( chainID string, - trustedHeader *types.SignedHeader, // height=X - trustedVals *types.ValidatorSet, // height=X or height=X+1 - untrustedHeader *types.SignedHeader, // height=Y - untrustedVals *types.ValidatorSet, // height=Y + trustedHeader *types.SignedHeader, + trustedVals *types.VoterSet, + untrustedHeader *types.SignedHeader, + untrustedVals *types.VoterSet, trustingPeriod time.Duration, now time.Time, + maxClockDrift time.Duration, trustLevel tmmath.Fraction) error { if untrustedHeader.Height == trustedHeader.Height+1 { @@ -48,7 +48,11 @@ func VerifyNonAdjacent( return ErrOldHeaderExpired{trustedHeader.Time.Add(trustingPeriod), now} } - if err := verifyNewHeaderAndVals(chainID, untrustedHeader, untrustedVals, trustedHeader, now); err != nil { + if err := verifyNewHeaderAndVals( + chainID, + untrustedHeader, untrustedVals, + trustedHeader, + now, maxClockDrift); err != nil { return ErrInvalidHeader{err} } @@ -86,13 +90,17 @@ func VerifyNonAdjacent( // d) more than 2/3 of new validators (untrustedVals) have signed h2 // (otherwise, ErrInvalidHeader is returned) // e) headers are adjacent. +// +// maxClockDrift defines how much untrustedHeader.Time can drift into the +// future. func VerifyAdjacent( chainID string, - trustedHeader *types.SignedHeader, // height=X - untrustedHeader *types.SignedHeader, // height=X+1 - untrustedVals *types.ValidatorSet, // height=X+1 + trustedHeader *types.SignedHeader, + untrustedHeader *types.SignedHeader, + untrustedVals *types.VoterSet, trustingPeriod time.Duration, - now time.Time) error { + now time.Time, + maxClockDrift time.Duration) error { if untrustedHeader.Height != trustedHeader.Height+1 { return errors.New("headers must be adjacent in height") @@ -102,15 +110,19 @@ func VerifyAdjacent( return ErrOldHeaderExpired{trustedHeader.Time.Add(trustingPeriod), now} } - if err := verifyNewHeaderAndVals(chainID, untrustedHeader, untrustedVals, trustedHeader, now); err != nil { + if err := verifyNewHeaderAndVals( + chainID, + untrustedHeader, untrustedVals, + trustedHeader, + now, maxClockDrift); err != nil { return ErrInvalidHeader{err} } // Check the validator hashes are the same - if !bytes.Equal(untrustedHeader.ValidatorsHash, trustedHeader.NextValidatorsHash) { + if !bytes.Equal(untrustedHeader.VotersHash, trustedHeader.NextVotersHash) { err := errors.Errorf("expected old header next validators (%X) to match those from new header (%X)", - trustedHeader.NextValidatorsHash, - untrustedHeader.ValidatorsHash, + trustedHeader.NextVotersHash, + untrustedHeader.VotersHash, ) return err } @@ -127,28 +139,30 @@ func VerifyAdjacent( // Verify combines both VerifyAdjacent and VerifyNonAdjacent functions. func Verify( chainID string, - trustedHeader *types.SignedHeader, // height=X - trustedVals *types.ValidatorSet, // height=X or height=X+1 - untrustedHeader *types.SignedHeader, // height=Y - untrustedVals *types.ValidatorSet, // height=Y + trustedHeader *types.SignedHeader, + trustedVals *types.VoterSet, + untrustedHeader *types.SignedHeader, + untrustedVals *types.VoterSet, trustingPeriod time.Duration, now time.Time, + maxClockDrift time.Duration, trustLevel tmmath.Fraction) error { if untrustedHeader.Height != trustedHeader.Height+1 { return VerifyNonAdjacent(chainID, trustedHeader, trustedVals, untrustedHeader, untrustedVals, - trustingPeriod, now, trustLevel) + trustingPeriod, now, maxClockDrift, trustLevel) } - return VerifyAdjacent(chainID, trustedHeader, untrustedHeader, untrustedVals, trustingPeriod, now) + return VerifyAdjacent(chainID, trustedHeader, untrustedHeader, untrustedVals, trustingPeriod, now, maxClockDrift) } func verifyNewHeaderAndVals( chainID string, untrustedHeader *types.SignedHeader, - untrustedVals *types.ValidatorSet, + untrustedVals *types.VoterSet, trustedHeader *types.SignedHeader, - now time.Time) error { + now time.Time, + maxClockDrift time.Duration) error { if err := untrustedHeader.ValidateBasic(chainID); err != nil { return errors.Wrap(err, "untrustedHeader.ValidateBasic failed") @@ -173,10 +187,11 @@ func verifyNewHeaderAndVals( maxClockDrift) } - if !bytes.Equal(untrustedHeader.ValidatorsHash, untrustedVals.Hash()) { - return errors.Errorf("expected new header validators (%X) to match those that were supplied (%X)", - untrustedHeader.ValidatorsHash, + if !bytes.Equal(untrustedHeader.VotersHash, untrustedVals.Hash()) { + return errors.Errorf("expected new header voters (%X) to match those that were supplied (%X) at height %d", + untrustedHeader.VotersHash, untrustedVals.Hash(), + untrustedHeader.Height, ) } diff --git a/lite2/verifier_test.go b/lite2/verifier_test.go index adc671516..398574f43 100644 --- a/lite2/verifier_test.go +++ b/lite2/verifier_test.go @@ -1,4 +1,4 @@ -package lite +package lite_test import ( "fmt" @@ -8,9 +8,14 @@ import ( "github.com/stretchr/testify/assert" tmmath "github.com/tendermint/tendermint/libs/math" + lite "github.com/tendermint/tendermint/lite2" "github.com/tendermint/tendermint/types" ) +const ( + maxClockDrift = 10 * time.Second +) + func TestVerifyAdjacentHeaders(t *testing.T) { const ( chainID = "TestVerifyAdjacentHeaders" @@ -21,7 +26,7 @@ func TestVerifyAdjacentHeaders(t *testing.T) { var ( keys = genPrivKeys(4) // 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do! - vals = keys.ToValidators(20, 10) + vals = types.ToVoterAll(keys.ToValidators(20, 10).Validators) bTime, _ = time.Parse(time.RFC3339, "2006-01-02T15:04:05Z") header = keys.GenSignedHeader(chainID, lastHeight, bTime, nil, vals, vals, []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)) @@ -29,7 +34,7 @@ func TestVerifyAdjacentHeaders(t *testing.T) { testCases := []struct { newHeader *types.SignedHeader - newVals *types.ValidatorSet + newVals *types.VoterSet trustingPeriod time.Duration now time.Time expErr error @@ -113,14 +118,15 @@ func TestVerifyAdjacentHeaders(t *testing.T) { vals, 3 * time.Hour, bTime.Add(2 * time.Hour), - ErrInvalidHeader{Reason: types.ErrNotEnoughVotingPowerSigned{Got: 50, Needed: 93}}, + lite.ErrInvalidHeader{Reason: types.ErrNotEnoughVotingPowerSigned{Got: 50, Needed: 93}}, "", }, // vals does not match with what we have -> error 8: { - keys.GenSignedHeader(chainID, nextHeight, bTime.Add(1*time.Hour), nil, keys.ToValidators(10, 1), vals, - []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)), - keys.ToValidators(10, 1), + keys.GenSignedHeader(chainID, nextHeight, bTime.Add(1*time.Hour), nil, + types.ToVoterAll(keys.ToValidators(10, 1).Validators), vals, []byte("app_hash"), []byte("cons_hash"), + []byte("results_hash"), 0, len(keys)), + types.ToVoterAll(keys.ToValidators(10, 1).Validators), 3 * time.Hour, bTime.Add(2 * time.Hour), nil, @@ -130,7 +136,7 @@ func TestVerifyAdjacentHeaders(t *testing.T) { 9: { keys.GenSignedHeader(chainID, nextHeight, bTime.Add(1*time.Hour), nil, vals, vals, []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)), - keys.ToValidators(10, 1), + types.ToVoterAll(keys.ToValidators(10, 1).Validators), 3 * time.Hour, bTime.Add(2 * time.Hour), nil, @@ -140,7 +146,7 @@ func TestVerifyAdjacentHeaders(t *testing.T) { 10: { keys.GenSignedHeader(chainID, nextHeight, bTime.Add(1*time.Hour), nil, vals, vals, []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)), - keys.ToValidators(10, 1), + types.ToVoterAll(keys.ToValidators(10, 1).Validators), 1 * time.Hour, bTime.Add(1 * time.Hour), nil, @@ -151,7 +157,7 @@ func TestVerifyAdjacentHeaders(t *testing.T) { for i, tc := range testCases { tc := tc t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { - err := VerifyAdjacent(chainID, header, tc.newHeader, tc.newVals, tc.trustingPeriod, tc.now) + err := lite.VerifyAdjacent(chainID, header, tc.newHeader, tc.newVals, tc.trustingPeriod, tc.now, maxClockDrift) switch { case tc.expErr != nil && assert.Error(t, err): assert.Equal(t, tc.expErr, err) @@ -174,27 +180,27 @@ func TestVerifyNonAdjacentHeaders(t *testing.T) { var ( keys = genPrivKeys(4) // 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do! - vals = keys.ToValidators(20, 10) + vals = types.ToVoterAll(keys.ToValidators(20, 10).Validators) bTime, _ = time.Parse(time.RFC3339, "2006-01-02T15:04:05Z") header = keys.GenSignedHeader(chainID, lastHeight, bTime, nil, vals, vals, []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)) // 30, 40, 50 twoThirds = keys[1:] - twoThirdsVals = twoThirds.ToValidators(30, 10) + twoThirdsVals = types.ToVoterAll(twoThirds.ToValidators(30, 10).Validators) // 50 oneThird = keys[len(keys)-1:] - oneThirdVals = oneThird.ToValidators(50, 10) + oneThirdVals = types.ToVoterAll(oneThird.ToValidators(50, 10).Validators) // 20 lessThanOneThird = keys[0:1] - lessThanOneThirdVals = lessThanOneThird.ToValidators(20, 10) + lessThanOneThirdVals = types.ToVoterAll(lessThanOneThird.ToValidators(20, 10).Validators) ) testCases := []struct { newHeader *types.SignedHeader - newVals *types.ValidatorSet + newVals *types.VoterSet trustingPeriod time.Duration now time.Time expErr error @@ -227,7 +233,7 @@ func TestVerifyNonAdjacentHeaders(t *testing.T) { vals, 3 * time.Hour, bTime.Add(2 * time.Hour), - ErrInvalidHeader{types.ErrNotEnoughVotingPowerSigned{Got: 50, Needed: 93}}, + lite.ErrInvalidHeader{types.ErrNotEnoughVotingPowerSigned{Got: 50, Needed: 93}}, "", }, // 3/3 new vals signed, 2/3 old vals present -> no error @@ -257,7 +263,7 @@ func TestVerifyNonAdjacentHeaders(t *testing.T) { lessThanOneThirdVals, 3 * time.Hour, bTime.Add(2 * time.Hour), - ErrNewValSetCantBeTrusted{types.ErrNotEnoughVotingPowerSigned{Got: 20, Needed: 46}}, + lite.ErrNewValSetCantBeTrusted{types.ErrNotEnoughVotingPowerSigned{Got: 20, Needed: 46}}, "", }, } @@ -265,8 +271,9 @@ func TestVerifyNonAdjacentHeaders(t *testing.T) { for i, tc := range testCases { tc := tc t.Run(fmt.Sprintf("#%d", i), func(t *testing.T) { - err := VerifyNonAdjacent(chainID, header, vals, tc.newHeader, tc.newVals, tc.trustingPeriod, tc.now, - DefaultTrustLevel) + err := lite.VerifyNonAdjacent(chainID, header, vals, tc.newHeader, tc.newVals, tc.trustingPeriod, + tc.now, maxClockDrift, + lite.DefaultTrustLevel) switch { case tc.expErr != nil && assert.Error(t, err): @@ -289,13 +296,13 @@ func TestVerifyReturnsErrorIfTrustLevelIsInvalid(t *testing.T) { var ( keys = genPrivKeys(4) // 20, 30, 40, 50 - the first 3 don't have 2/3, the last 3 do! - vals = keys.ToValidators(20, 10) + vals = types.ToVoterAll(keys.ToValidators(20, 10).Validators) bTime, _ = time.Parse(time.RFC3339, "2006-01-02T15:04:05Z") header = keys.GenSignedHeader(chainID, lastHeight, bTime, nil, vals, vals, []byte("app_hash"), []byte("cons_hash"), []byte("results_hash"), 0, len(keys)) ) - err := Verify(chainID, header, vals, header, vals, 2*time.Hour, time.Now(), + err := lite.Verify(chainID, header, vals, header, vals, 2*time.Hour, time.Now(), maxClockDrift, tmmath.Fraction{Numerator: 2, Denominator: 1}) assert.Error(t, err) } @@ -322,7 +329,7 @@ func TestValidateTrustLevel(t *testing.T) { } for _, tc := range testCases { - err := ValidateTrustLevel(tc.lvl) + err := lite.ValidateTrustLevel(tc.lvl) if !tc.valid { assert.Error(t, err) } else { diff --git a/node/codec.go b/node/codec.go index 7607b0dd0..e172b9696 100644 --- a/node/codec.go +++ b/node/codec.go @@ -2,6 +2,7 @@ package node import ( amino "github.com/tendermint/go-amino" + cryptoamino "github.com/tendermint/tendermint/crypto/encoding/amino" ) diff --git a/node/node.go b/node/node.go index 7242995ca..5fb0664ea 100644 --- a/node/node.go +++ b/node/node.go @@ -17,9 +17,12 @@ import ( "github.com/rs/cors" amino "github.com/tendermint/go-amino" + dbm "github.com/tendermint/tm-db" + abci "github.com/tendermint/tendermint/abci/types" bcv0 "github.com/tendermint/tendermint/blockchain/v0" bcv1 "github.com/tendermint/tendermint/blockchain/v1" + bcv2 "github.com/tendermint/tendermint/blockchain/v2" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/consensus" cs "github.com/tendermint/tendermint/consensus" @@ -45,7 +48,6 @@ import ( "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" "github.com/tendermint/tendermint/version" - dbm "github.com/tendermint/tm-db" ) //------------------------------------------------------------------------------ @@ -309,12 +311,12 @@ func logNodeStartupInfo(state sm.State, pubKey crypto.PubKey, logger, consensusL } } -func onlyValidatorIsUs(state sm.State, privVal types.PrivValidator) bool { +func onlyValidatorIsUs(state sm.State, pubKey crypto.PubKey) bool { if state.Validators.Size() > 1 { return false } addr, _ := state.Validators.GetByIndex(0) - return bytes.Equal(privVal.GetPubKey().Address(), addr) + return bytes.Equal(pubKey.Address(), addr) } func createMempoolAndMempoolReactor(config *cfg.Config, proxyApp proxy.AppConns, @@ -365,6 +367,8 @@ func createBlockchainReactor(config *cfg.Config, bcReactor = bcv0.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync) case "v1": bcReactor = bcv1.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync) + case "v2": + bcReactor = bcv2.NewBlockchainReactor(state.Copy(), blockExec, blockStore, fastSync) default: return nil, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version) } @@ -618,17 +622,16 @@ func NewNode(config *cfg.Config, } } - pubKey := privValidator.GetPubKey() - if pubKey == nil { - // TODO: GetPubKey should return errors - https://github.com/tendermint/tendermint/issues/3602 - return nil, errors.New("could not retrieve public key from private validator") + pubKey, err := privValidator.GetPubKey() + if err != nil { + return nil, errors.Wrap(err, "can't get pubkey") } logNodeStartupInfo(state, pubKey, logger, consensusLogger) // Decide whether to fast-sync or not // We don't fast-sync when the only validator is us. - fastSync := config.FastSyncMode && !onlyValidatorIsUs(state, privValidator) + fastSync := config.FastSyncMode && !onlyValidatorIsUs(state, pubKey) csMetrics, p2pMetrics, memplMetrics, smMetrics := metricsProvider(genDoc.ChainID) @@ -861,7 +864,10 @@ func (n *Node) ConfigureRPC() { rpccore.SetEvidencePool(n.evidencePool) rpccore.SetP2PPeers(n.sw) rpccore.SetP2PTransport(n) - pubKey := n.privValidator.GetPubKey() + pubKey, err := n.privValidator.GetPubKey() + if err != nil { + panic(err) + } rpccore.SetPubKey(pubKey) rpccore.SetGenesisDoc(n.genesisDoc) rpccore.SetProxyAppQuery(n.proxyApp.Query()) @@ -1096,6 +1102,8 @@ func makeNodeInfo( bcChannel = bcv0.BlockchainChannel case "v1": bcChannel = bcv1.BlockchainChannel + case "v2": + bcChannel = bcv2.BlockchainChannel default: return nil, fmt.Errorf("unknown fastsync version %s", config.FastSync.Version) } diff --git a/node/node_test.go b/node/node_test.go index 4a65849a1..113418614 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -12,6 +12,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" + "github.com/tendermint/tendermint/abci/example/kvstore" cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto/ed25519" @@ -27,7 +29,6 @@ import ( "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" "github.com/tendermint/tendermint/version" - dbm "github.com/tendermint/tm-db" ) func TestNodeStartStop(t *testing.T) { @@ -355,7 +356,7 @@ func state(nVals int, height int64) (sm.State, dbm.DB, types.PrivValidator) { for i := 1; i < int(height); i++ { s.LastBlockHeight++ - s.LastValidators = s.Validators.Copy() + s.LastVoters = s.Voters.Copy() sm.SaveState(stateDB, s) } return s, stateDB, privVal diff --git a/p2p/codec.go b/p2p/codec.go index 6368b7d68..463276318 100644 --- a/p2p/codec.go +++ b/p2p/codec.go @@ -2,6 +2,7 @@ package p2p import ( amino "github.com/tendermint/go-amino" + cryptoamino "github.com/tendermint/tendermint/crypto/encoding/amino" ) diff --git a/p2p/conn/codec.go b/p2p/conn/codec.go index 149a09638..0625c7a38 100644 --- a/p2p/conn/codec.go +++ b/p2p/conn/codec.go @@ -2,6 +2,7 @@ package conn import ( amino "github.com/tendermint/go-amino" + cryptoamino "github.com/tendermint/tendermint/crypto/encoding/amino" ) diff --git a/p2p/conn/connection.go b/p2p/conn/connection.go index 1c2088636..0436e115c 100644 --- a/p2p/conn/connection.go +++ b/p2p/conn/connection.go @@ -16,6 +16,7 @@ import ( "github.com/pkg/errors" amino "github.com/tendermint/go-amino" + flow "github.com/tendermint/tendermint/libs/flowrate" "github.com/tendermint/tendermint/libs/log" tmmath "github.com/tendermint/tendermint/libs/math" diff --git a/p2p/conn/connection_test.go b/p2p/conn/connection_test.go index 7daa6076d..29d29fc6e 100644 --- a/p2p/conn/connection_test.go +++ b/p2p/conn/connection_test.go @@ -11,6 +11,7 @@ import ( "github.com/stretchr/testify/require" amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/libs/log" ) diff --git a/p2p/conn/secret_connection_test.go b/p2p/conn/secret_connection_test.go index 5ac3b8509..9044d73be 100644 --- a/p2p/conn/secret_connection_test.go +++ b/p2p/conn/secret_connection_test.go @@ -16,6 +16,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/secp256k1" diff --git a/p2p/key_test.go b/p2p/key_test.go index e0579dde6..6f8e9b0f8 100644 --- a/p2p/key_test.go +++ b/p2p/key_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + tmrand "github.com/tendermint/tendermint/libs/rand" ) diff --git a/p2p/mock/reactor.go b/p2p/mock/reactor.go index cfce12bd1..40f918e9f 100644 --- a/p2p/mock/reactor.go +++ b/p2p/mock/reactor.go @@ -12,7 +12,7 @@ type Reactor struct { func NewReactor() *Reactor { r := &Reactor{} - r.BaseReactor = *p2p.NewBaseReactor("Reactor", r) + r.BaseReactor = *p2p.NewBaseReactor("Mock-PEX", r) r.SetLogger(log.TestingLogger()) return r } diff --git a/p2p/node_info_test.go b/p2p/node_info_test.go index 6937affb8..8896efe1d 100644 --- a/p2p/node_info_test.go +++ b/p2p/node_info_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/crypto/ed25519" ) diff --git a/p2p/pex/addrbook.go b/p2p/pex/addrbook.go index dbba71345..c9c372638 100644 --- a/p2p/pex/addrbook.go +++ b/p2p/pex/addrbook.go @@ -59,9 +59,12 @@ type AddrBook interface { // Mark address MarkGood(p2p.ID) MarkAttempt(*p2p.NetAddress) - MarkBad(*p2p.NetAddress) + MarkBad(*p2p.NetAddress, time.Duration) // Move peer to bad peers list + // Add bad peers back to addrBook + ReinstateBadPeers() IsGood(*p2p.NetAddress) bool + IsBanned(*p2p.NetAddress) bool // Send a selection of addresses to peers GetSelection() []*p2p.NetAddress @@ -87,6 +90,7 @@ type addrBook struct { ourAddrs map[string]struct{} privateIDs map[p2p.ID]struct{} addrLookup map[p2p.ID]*knownAddress // new & old + badPeers map[p2p.ID]*knownAddress // blacklisted peers bucketsOld []map[string]*knownAddress bucketsNew []map[string]*knownAddress nOld int @@ -108,6 +112,7 @@ func NewAddrBook(filePath string, routabilityStrict bool) AddrBook { ourAddrs: make(map[string]struct{}), privateIDs: make(map[p2p.ID]struct{}), addrLookup: make(map[p2p.ID]*knownAddress), + badPeers: make(map[p2p.ID]*knownAddress), filePath: filePath, routabilityStrict: routabilityStrict, } @@ -205,12 +210,7 @@ func (a *addrBook) RemoveAddress(addr *p2p.NetAddress) { a.mtx.Lock() defer a.mtx.Unlock() - ka := a.addrLookup[addr.ID] - if ka == nil { - return - } - a.Logger.Info("Remove address from book", "addr", addr) - a.removeFromAllBuckets(ka) + a.removeAddress(addr) } // IsGood returns true if peer was ever marked as good and haven't @@ -222,6 +222,15 @@ func (a *addrBook) IsGood(addr *p2p.NetAddress) bool { return a.addrLookup[addr.ID].isOld() } +// IsBanned returns true if the peer is currently banned +func (a *addrBook) IsBanned(addr *p2p.NetAddress) bool { + a.mtx.Lock() + _, ok := a.badPeers[addr.ID] + a.mtx.Unlock() + + return ok +} + // HasAddress returns true if the address is in the book. func (a *addrBook) HasAddress(addr *p2p.NetAddress) bool { a.mtx.Lock() @@ -324,10 +333,28 @@ func (a *addrBook) MarkAttempt(addr *p2p.NetAddress) { ka.markAttempt() } -// MarkBad implements AddrBook. Currently it just ejects the address. -// TODO: black list for some amount of time -func (a *addrBook) MarkBad(addr *p2p.NetAddress) { - a.RemoveAddress(addr) +// MarkBad implements AddrBook. Kicks address out from book, places +// the address in the badPeers pool. +func (a *addrBook) MarkBad(addr *p2p.NetAddress, banTime time.Duration) { + a.mtx.Lock() + defer a.mtx.Unlock() + + if a.addBadPeer(addr, banTime) { + a.removeAddress(addr) + } +} + +func (a *addrBook) ReinstateBadPeers() { + a.mtx.Lock() + defer a.mtx.Unlock() + for _, ka := range a.badPeers { + if !ka.isBanned() { + bucket := a.calcNewBucket(ka.Addr, ka.Src) + a.addToNewBucket(ka, bucket) + delete(a.badPeers, ka.ID()) + a.Logger.Info("Reinstated address", "addr", ka.Addr) + } + } } // GetSelection implements AddrBook. @@ -592,6 +619,10 @@ func (a *addrBook) addAddress(addr, src *p2p.NetAddress) error { return ErrAddrBookInvalidAddr{Addr: addr, AddrErr: err} } + if _, ok := a.badPeers[addr.ID]; ok { + return ErrAddressBanned{addr} + } + if _, ok := a.privateIDs[addr.ID]; ok { return ErrAddrBookPrivate{addr} } @@ -725,6 +756,32 @@ func (a *addrBook) moveToOld(ka *knownAddress) { } } +func (a *addrBook) removeAddress(addr *p2p.NetAddress) { + ka := a.addrLookup[addr.ID] + if ka == nil { + return + } + a.Logger.Info("Remove address from book", "addr", addr) + a.removeFromAllBuckets(ka) +} + +func (a *addrBook) addBadPeer(addr *p2p.NetAddress, banTime time.Duration) bool { + // check it exists in addrbook + ka := a.addrLookup[addr.ID] + // check address is not already there + if ka == nil { + return false + } + + if _, alreadyBadPeer := a.badPeers[addr.ID]; !alreadyBadPeer { + // add to bad peer list + ka.ban(banTime) + a.badPeers[addr.ID] = ka + a.Logger.Info("Add address to blacklist", "addr", addr) + } + return true +} + //--------------------------------------------------------------------- // calculate bucket placements diff --git a/p2p/pex/addrbook_test.go b/p2p/pex/addrbook_test.go index 363958c44..739fff185 100644 --- a/p2p/pex/addrbook_test.go +++ b/p2p/pex/addrbook_test.go @@ -7,6 +7,7 @@ import ( "math" "os" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -343,7 +344,7 @@ func TestAddrBookGetSelectionWithBias(t *testing.T) { } } - got, expected := int((float64(good)/float64(len(selection)))*100), (100 - biasTowardsNewAddrs) + got, expected := int((float64(good)/float64(len(selection)))*100), 100-biasTowardsNewAddrs // compute some slack to protect against small differences due to rounding: slack := int(math.Round(float64(100) / float64(len(selection)))) @@ -396,6 +397,33 @@ func testCreatePrivateAddrs(t *testing.T, numAddrs int) ([]*p2p.NetAddress, []st return addrs, private } +func TestBanBadPeers(t *testing.T) { + fname := createTempFileName("addrbook_test") + defer deleteTempFile(fname) + + book := NewAddrBook(fname, true) + book.SetLogger(log.TestingLogger()) + + addr := randIPv4Address(t) + _ = book.AddAddress(addr, addr) + + book.MarkBad(addr, 1*time.Second) + // addr should not reachable + assert.False(t, book.HasAddress(addr)) + assert.True(t, book.IsBanned(addr)) + + err := book.AddAddress(addr, addr) + // book should not add address from the blacklist + assert.Error(t, err) + + time.Sleep(1 * time.Second) + book.ReinstateBadPeers() + // address should be reinstated in the new bucket + assert.EqualValues(t, 1, book.Size()) + assert.True(t, book.HasAddress(addr)) + assert.False(t, book.IsGood(addr)) +} + func TestAddrBookEmpty(t *testing.T) { fname := createTempFileName("addrbook_test") defer deleteTempFile(fname) diff --git a/p2p/pex/errors.go b/p2p/pex/errors.go index 911389a9e..8f51d4217 100644 --- a/p2p/pex/errors.go +++ b/p2p/pex/errors.go @@ -1,6 +1,7 @@ package pex import ( + "errors" "fmt" "github.com/tendermint/tendermint/p2p" @@ -63,3 +64,15 @@ type ErrAddrBookInvalidAddr struct { func (err ErrAddrBookInvalidAddr) Error() string { return fmt.Sprintf("Cannot add invalid address %v: %v", err.Addr, err.AddrErr) } + +// ErrAddressBanned is thrown when the address has been banned and therefore cannot be used +type ErrAddressBanned struct { + Addr *p2p.NetAddress +} + +func (err ErrAddressBanned) Error() string { + return fmt.Sprintf("Address: %v is currently banned", err.Addr) +} + +// ErrUnsolicitedList is thrown when a peer provides a list of addresses that have not been asked for. +var ErrUnsolicitedList = errors.New("unsolicited pexAddrsMessage") diff --git a/p2p/pex/known_address.go b/p2p/pex/known_address.go index af40d6ff0..e98a9e97e 100644 --- a/p2p/pex/known_address.go +++ b/p2p/pex/known_address.go @@ -16,6 +16,7 @@ type knownAddress struct { BucketType byte `json:"bucket_type"` LastAttempt time.Time `json:"last_attempt"` LastSuccess time.Time `json:"last_success"` + LastBanTime time.Time `json:"last_ban_time"` } func newKnownAddress(addr *p2p.NetAddress, src *p2p.NetAddress) *knownAddress { @@ -54,6 +55,16 @@ func (ka *knownAddress) markGood() { ka.LastSuccess = now } +func (ka *knownAddress) ban(banTime time.Duration) { + if ka.LastBanTime.Before(time.Now().Add(banTime)) { + ka.LastBanTime = time.Now().Add(banTime) + } +} + +func (ka *knownAddress) isBanned() bool { + return ka.LastBanTime.After(time.Now()) +} + func (ka *knownAddress) addBucketRef(bucketIdx int) int { for _, bucket := range ka.Buckets { if bucket == bucketIdx { diff --git a/p2p/pex/pex_reactor.go b/p2p/pex/pex_reactor.go index 6dc38a921..467ae008b 100644 --- a/p2p/pex/pex_reactor.go +++ b/p2p/pex/pex_reactor.go @@ -8,7 +8,8 @@ import ( "github.com/pkg/errors" - amino "github.com/tendermint/go-amino" + "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/libs/cmap" tmmath "github.com/tendermint/tendermint/libs/math" "github.com/tendermint/tendermint/libs/rand" @@ -50,6 +51,9 @@ const ( // Especially in the beginning, node should have more trusted peers than // untrusted. biasToSelectNewPeers = 30 // 70 to select good peers + + // if a peer is marked bad, it will be banned for at least this time period + defaultBanTime = 24 * time.Hour ) type errMaxAttemptsToDial struct { @@ -272,6 +276,7 @@ func (r *Reactor) Receive(chID byte, src Peer, msgBytes []byte) { // Check we're not receiving requests too frequently. if err := r.receiveRequest(src); err != nil { r.Switch.StopPeerForError(src, err) + r.book.MarkBad(src.SocketAddr(), defaultBanTime) return } r.SendAddrs(src, r.book.GetSelection()) @@ -281,6 +286,9 @@ func (r *Reactor) Receive(chID byte, src Peer, msgBytes []byte) { // If we asked for addresses, add them to the book if err := r.ReceiveAddrs(msg.Addrs, src); err != nil { r.Switch.StopPeerForError(src, err) + if err == ErrUnsolicitedList { + r.book.MarkBad(src.SocketAddr(), defaultBanTime) + } return } default: @@ -340,7 +348,7 @@ func (r *Reactor) RequestAddrs(p Peer) { func (r *Reactor) ReceiveAddrs(addrs []*p2p.NetAddress, src Peer) error { id := string(src.ID()) if !r.requestsSent.Has(id) { - return errors.New("unsolicited pexAddrsMessage") + return ErrUnsolicitedList } r.requestsSent.Delete(id) @@ -494,6 +502,12 @@ func (r *Reactor) ensurePeers() { } if r.book.NeedMoreAddrs() { + // Check if banned nodes can be reinstated + r.book.ReinstateBadPeers() + } + + if r.book.NeedMoreAddrs() { + // 1) Pick a random peer and ask for more. peers := r.Switch.Peers().List() peersCount := len(peers) @@ -525,18 +539,14 @@ func (r *Reactor) dialAttemptsInfo(addr *p2p.NetAddress) (attempts int, lastDial func (r *Reactor) dialPeer(addr *p2p.NetAddress) error { attempts, lastDialed := r.dialAttemptsInfo(addr) if !r.Switch.IsPeerPersistent(addr) && attempts > maxAttemptsToDial { - // TODO(melekes): have a blacklist in the addrbook with peers whom we've - // failed to connect to. Then we can clean up attemptsToDial, which acts as - // a blacklist currently. - // https://github.com/tendermint/tendermint/issues/3572 - r.book.MarkBad(addr) + r.book.MarkBad(addr, defaultBanTime) return errMaxAttemptsToDial{} } // exponential backoff if it's not our first attempt to dial given address if attempts > 0 { - jitterSeconds := time.Duration(tmrand.Float64() * float64(time.Second)) // 1s == (1e9 ns) - backoffDuration := jitterSeconds + ((1 << uint(attempts)) * time.Second) + jitterSecond := time.Duration(tmrand.Float64() * float64(time.Second)) // 1s == (1e9 ns) + backoffDuration := jitterSecond + ((1 << uint(attempts)) * time.Second) backoffDuration = r.maxBackoffDurationForPeer(addr, backoffDuration) sinceLastDialed := time.Since(lastDialed) if sinceLastDialed < backoffDuration { @@ -741,7 +751,7 @@ func markAddrInBookBasedOnErr(addr *p2p.NetAddress, book AddrBook, err error) { // TODO: detect more "bad peer" scenarios switch err.(type) { case p2p.ErrSwitchAuthenticationFailure: - book.MarkBad(addr) + book.MarkBad(addr, defaultBanTime) default: book.MarkAttempt(addr) } diff --git a/p2p/pex/pex_reactor_test.go b/p2p/pex/pex_reactor_test.go index 4cddf6352..04f4149eb 100644 --- a/p2p/pex/pex_reactor_test.go +++ b/p2p/pex/pex_reactor_test.go @@ -144,8 +144,11 @@ func TestPEXReactorRequestMessageAbuse(t *testing.T) { sw.SetAddrBook(book) peer := mock.NewPeer(nil) + peerAddr := peer.SocketAddr() p2p.AddPeerToSwitchPeerSet(sw, peer) assert.True(t, sw.Peers().Has(peer.ID())) + book.AddAddress(peerAddr, peerAddr) + require.True(t, book.HasAddress(peerAddr)) id := string(peer.ID()) msg := cdc.MustMarshalBinaryBare(&pexRequestMessage{}) @@ -164,6 +167,7 @@ func TestPEXReactorRequestMessageAbuse(t *testing.T) { r.Receive(PexChannel, peer, msg) assert.False(t, r.lastReceivedRequests.Has(id)) assert.False(t, sw.Peers().Has(peer.ID())) + assert.True(t, book.IsBanned(peerAddr)) } func TestPEXReactorAddrsMessageAbuse(t *testing.T) { @@ -192,9 +196,10 @@ func TestPEXReactorAddrsMessageAbuse(t *testing.T) { assert.False(t, r.requestsSent.Has(id)) assert.True(t, sw.Peers().Has(peer.ID())) - // receiving more addrs causes a disconnect + // receiving more unsolicited addrs causes a disconnect and ban r.Receive(PexChannel, peer, msg) assert.False(t, sw.Peers().Has(peer.ID())) + assert.True(t, book.IsBanned(peer.SocketAddr())) } func TestCheckSeeds(t *testing.T) { @@ -373,9 +378,7 @@ func TestPEXReactorDialsPeerUpToMaxAttemptsInSeedMode(t *testing.T) { sw := createSwitchAndAddReactors(pexR) sw.SetAddrBook(book) - err = sw.Start() - require.NoError(t, err) - defer sw.Stop() + // No need to start sw since crawlPeers is called manually here. peer := mock.NewPeer(nil) addr := peer.SocketAddr() @@ -384,9 +387,11 @@ func TestPEXReactorDialsPeerUpToMaxAttemptsInSeedMode(t *testing.T) { require.NoError(t, err) assert.True(t, book.HasAddress(addr)) + // imitate maxAttemptsToDial reached pexR.attemptsToDial.Store(addr.DialString(), _attemptsToDial{maxAttemptsToDial + 1, time.Now()}) pexR.crawlPeers([]*p2p.NetAddress{addr}) + assert.False(t, book.HasAddress(addr)) } diff --git a/p2p/trust/store.go b/p2p/trust/store.go index 502c88f90..166b26b1c 100644 --- a/p2p/trust/store.go +++ b/p2p/trust/store.go @@ -9,8 +9,9 @@ import ( "sync" "time" - "github.com/tendermint/tendermint/libs/service" dbm "github.com/tendermint/tm-db" + + "github.com/tendermint/tendermint/libs/service" ) const defaultStorePeriodicSaveInterval = 1 * time.Minute diff --git a/p2p/trust/store_test.go b/p2p/trust/store_test.go index 76dbaac1c..1cd83916c 100644 --- a/p2p/trust/store_test.go +++ b/p2p/trust/store_test.go @@ -10,8 +10,9 @@ import ( "testing" "github.com/stretchr/testify/assert" - "github.com/tendermint/tendermint/libs/log" dbm "github.com/tendermint/tm-db" + + "github.com/tendermint/tendermint/libs/log" ) func TestTrustMetricStoreSaveLoad(t *testing.T) { diff --git a/privval/codec.go b/privval/codec.go index 9edcc7741..d1f2eafa2 100644 --- a/privval/codec.go +++ b/privval/codec.go @@ -2,6 +2,7 @@ package privval import ( amino "github.com/tendermint/go-amino" + cryptoamino "github.com/tendermint/tendermint/crypto/encoding/amino" ) diff --git a/privval/file.go b/privval/file.go index 7f22b21c4..ae825bde5 100644 --- a/privval/file.go +++ b/privval/file.go @@ -239,8 +239,8 @@ func (pv *FilePV) GetAddress() types.Address { // GetPubKey returns the public key of the validator. // Implements PrivValidator. -func (pv *FilePV) GetPubKey() crypto.PubKey { - return pv.Key.PubKey +func (pv *FilePV) GetPubKey() (crypto.PubKey, error) { + return pv.Key.PubKey, nil } // SignVote signs a canonical representation of the vote, along with the diff --git a/privval/file_deprecated_test.go b/privval/file_deprecated_test.go index ca0e1e508..f850c23f1 100644 --- a/privval/file_deprecated_test.go +++ b/privval/file_deprecated_test.go @@ -61,7 +61,9 @@ func assertEqualPV(t *testing.T, oldPV *privval.OldFilePV, newPV *privval.FilePV assert.Equal(t, oldPV.Address, newPV.Key.Address) assert.Equal(t, oldPV.Address, newPV.GetAddress()) assert.Equal(t, oldPV.PubKey, newPV.Key.PubKey) - assert.Equal(t, oldPV.PubKey, newPV.GetPubKey()) + npv, err := newPV.GetPubKey() + require.NoError(t, err) + assert.Equal(t, oldPV.PubKey, npv) assert.Equal(t, oldPV.PrivKey, newPV.Key.PrivKey) assert.Equal(t, oldPV.LastHeight, newPV.LastSignState.Height) diff --git a/privval/file_test.go b/privval/file_test.go index 06418bfc8..71f273ddf 100644 --- a/privval/file_test.go +++ b/privval/file_test.go @@ -13,6 +13,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" diff --git a/privval/messages.go b/privval/messages.go index 1fdc7f939..a5f218a1f 100644 --- a/privval/messages.go +++ b/privval/messages.go @@ -1,7 +1,8 @@ package privval import ( - "github.com/tendermint/go-amino" + amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/vrf" "github.com/tendermint/tendermint/types" diff --git a/privval/signer_client.go b/privval/signer_client.go index 5bd2d95a8..593cbeddf 100644 --- a/privval/signer_client.go +++ b/privval/signer_client.go @@ -1,6 +1,7 @@ package privval import ( + "fmt" "time" "github.com/pkg/errors" @@ -67,25 +68,26 @@ func (sc *SignerClient) Ping() error { } // GetPubKey retrieves a public key from a remote signer -func (sc *SignerClient) GetPubKey() crypto.PubKey { +// returns an error if client is not able to provide the key +func (sc *SignerClient) GetPubKey() (crypto.PubKey, error) { response, err := sc.endpoint.SendRequest(&PubKeyRequest{}) if err != nil { sc.endpoint.Logger.Error("SignerClient::GetPubKey", "err", err) - return nil + return nil, errors.Wrap(err, "send") } pubKeyResp, ok := response.(*PubKeyResponse) if !ok { sc.endpoint.Logger.Error("SignerClient::GetPubKey", "err", "response != PubKeyResponse") - return nil + return nil, errors.Errorf("unexpected response type %T", response) } if pubKeyResp.Error != nil { sc.endpoint.Logger.Error("failed to get private validator's public key", "err", pubKeyResp.Error) - return nil + return nil, fmt.Errorf("remote error: %w", pubKeyResp.Error) } - return pubKeyResp.PubKey + return pubKeyResp.PubKey, nil } // SignVote requests a remote signer to sign a vote diff --git a/privval/signer_client_test.go b/privval/signer_client_test.go index ffd1e5ef2..47207949b 100644 --- a/privval/signer_client_test.go +++ b/privval/signer_client_test.go @@ -77,15 +77,20 @@ func TestSignerGetPubKey(t *testing.T) { defer tc.signerServer.Stop() defer tc.signerClient.Close() - pubKey := tc.signerClient.GetPubKey() - expectedPubKey := tc.mockPV.GetPubKey() + pubKey, err := tc.signerClient.GetPubKey() + require.NoError(t, err) + expectedPubKey, err := tc.mockPV.GetPubKey() + require.NoError(t, err) assert.Equal(t, expectedPubKey, pubKey) - addr := tc.signerClient.GetPubKey().Address() - expectedAddr := tc.mockPV.GetPubKey().Address() + pubKey, err = tc.signerClient.GetPubKey() + require.NoError(t, err) + expectedpk, err := tc.mockPV.GetPubKey() + require.NoError(t, err) + expectedAddr := expectedpk.Address() - assert.Equal(t, expectedAddr, addr) + assert.Equal(t, expectedAddr, pubKey.Address()) } } @@ -115,9 +120,11 @@ func TestSignerGenerateVRFProof(t *testing.T) { require.Nil(t, err) _, err = vrf.ProofToHash(proof) require.Nil(t, err) - pubKey, ok := tc.signerClient.GetPubKey().(ed25519.PubKeyEd25519) + pubKey, err2 := tc.signerClient.GetPubKey() + require.NoError(t, err2) + pubKeyEd25519, ok := pubKey.(ed25519.PubKeyEd25519) require.True(t, ok) - expected, err := vrf.Verify(pubKey, proof, message) + expected, err := vrf.Verify(pubKeyEd25519, proof, message) require.Nil(t, err) assert.True(t, expected) } diff --git a/privval/signer_requestHandler.go b/privval/signer_requestHandler.go index b6cf4c01e..df49343c1 100644 --- a/privval/signer_requestHandler.go +++ b/privval/signer_requestHandler.go @@ -17,9 +17,13 @@ func DefaultValidationRequestHandler( switch r := req.(type) { case *PubKeyRequest: - var p crypto.PubKey - p = privVal.GetPubKey() - res = &PubKeyResponse{p, nil} + var pubKey crypto.PubKey + pubKey, err = privVal.GetPubKey() + if err != nil { + res = &PubKeyResponse{nil, &RemoteSignerError{0, err.Error()}} + } else { + res = &PubKeyResponse{pubKey, nil} + } case *SignVoteRequest: err = privVal.SignVote(chainID, r.Vote) diff --git a/privval/socket_dialers.go b/privval/socket_dialers.go index 1945e7728..f9e5c7879 100644 --- a/privval/socket_dialers.go +++ b/privval/socket_dialers.go @@ -5,6 +5,7 @@ import ( "time" "github.com/pkg/errors" + "github.com/tendermint/tendermint/crypto" tmnet "github.com/tendermint/tendermint/libs/net" p2pconn "github.com/tendermint/tendermint/p2p/conn" diff --git a/rpc/client/codec.go b/rpc/client/codec.go index ef1a00ec4..2dc0f6319 100644 --- a/rpc/client/codec.go +++ b/rpc/client/codec.go @@ -2,6 +2,7 @@ package client import ( amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/types" ) diff --git a/rpc/client/event_test.go b/rpc/client/event_test.go index 5c9d902fd..a25b6ebb2 100644 --- a/rpc/client/event_test.go +++ b/rpc/client/event_test.go @@ -1,11 +1,13 @@ package client_test import ( + "context" "fmt" "reflect" "testing" "time" + "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" abci "github.com/tendermint/tendermint/abci/types" @@ -135,3 +137,21 @@ func testTxEventsSent(t *testing.T, broadcastMethod string) { func TestClientsResubscribe(t *testing.T) { // TODO(melekes) } + +func TestHTTPReturnsErrorIfClientIsNotRunning(t *testing.T) { + c := getHTTPClient() + + // on Subscribe + _, err := c.Subscribe(context.Background(), "TestHeaderEvents", + types.QueryForEvent(types.EventNewBlockHeader).String()) + assert.Error(t, err) + + // on Unsubscribe + err = c.Unsubscribe(context.Background(), "TestHeaderEvents", + types.QueryForEvent(types.EventNewBlockHeader).String()) + assert.Error(t, err) + + // on UnsubscribeAll + err = c.UnsubscribeAll(context.Background(), "TestHeaderEvents") + assert.Error(t, err) +} diff --git a/rpc/client/examples_test.go b/rpc/client/examples_test.go index a543de70d..5d87a3a98 100644 --- a/rpc/client/examples_test.go +++ b/rpc/client/examples_test.go @@ -3,9 +3,10 @@ package client_test import ( "bytes" "fmt" + "log" "github.com/tendermint/tendermint/abci/example/kvstore" - "github.com/tendermint/tendermint/rpc/client" + rpchttp "github.com/tendermint/tendermint/rpc/client/http" ctypes "github.com/tendermint/tendermint/rpc/core/types" rpctest "github.com/tendermint/tendermint/rpc/test" ) @@ -18,9 +19,9 @@ func ExampleHTTP_simple() { // Create our RPC client rpcAddr := rpctest.GetConfig().RPC.ListenAddress - c, err := client.NewHTTP(rpcAddr, "/websocket") + c, err := rpchttp.New(rpcAddr, "/websocket") if err != nil { - panic(err) + log.Fatal(err) } // Create a transaction @@ -29,28 +30,28 @@ func ExampleHTTP_simple() { tx := append(k, append([]byte("="), v...)...) // Broadcast the transaction and wait for it to commit (rather use - // c.BroadcastTxSync though in production) + // c.BroadcastTxSync though in production). bres, err := c.BroadcastTxCommit(tx) if err != nil { - panic(err) + log.Fatal(err) } if bres.CheckTx.IsErr() || bres.DeliverTx.IsErr() { - panic("BroadcastTxCommit transaction failed") + log.Fatal("BroadcastTxCommit transaction failed") } // Now try to fetch the value for the key qres, err := c.ABCIQuery("/key", k) if err != nil { - panic(err) + log.Fatal(err) } if qres.Response.IsErr() { - panic("ABCIQuery failed") + log.Fatal("ABCIQuery failed") } if !bytes.Equal(qres.Response.Key, k) { - panic("returned key does not match queried key") + log.Fatal("returned key does not match queried key") } if !bytes.Equal(qres.Response.Value, v) { - panic("returned value does not match sent value") + log.Fatal("returned value does not match sent value") } fmt.Println("Sent tx :", string(tx)) @@ -71,9 +72,9 @@ func ExampleHTTP_batching() { // Create our RPC client rpcAddr := rpctest.GetConfig().RPC.ListenAddress - c, err := client.NewHTTP(rpcAddr, "/websocket") + c, err := rpchttp.New(rpcAddr, "/websocket") if err != nil { - panic(err) + log.Fatal(err) } // Create our two transactions @@ -92,28 +93,30 @@ func ExampleHTTP_batching() { // Queue up our transactions for _, tx := range txs { + // Broadcast the transaction and wait for it to commit (rather use + // c.BroadcastTxSync though in production). if _, err := batch.BroadcastTxCommit(tx); err != nil { - panic(err) + log.Fatal(err) } } // Send the batch of 2 transactions if _, err := batch.Send(); err != nil { - panic(err) + log.Fatal(err) } // Now let's query for the original results as a batch keys := [][]byte{k1, k2} for _, key := range keys { if _, err := batch.ABCIQuery("/key", key); err != nil { - panic(err) + log.Fatal(err) } } // Send the 2 queries and keep the results results, err := batch.Send() if err != nil { - panic(err) + log.Fatal(err) } // Each result in the returned list is the deserialized result of each @@ -121,7 +124,7 @@ func ExampleHTTP_batching() { for _, result := range results { qr, ok := result.(*ctypes.ResultABCIQuery) if !ok { - panic("invalid result type from ABCIQuery request") + log.Fatal("invalid result type from ABCIQuery request") } fmt.Println(string(qr.Response.Key), "=", string(qr.Response.Value)) } diff --git a/rpc/client/helpers.go b/rpc/client/helpers.go index 756ba2818..0e54ec03b 100644 --- a/rpc/client/helpers.go +++ b/rpc/client/helpers.go @@ -5,6 +5,7 @@ import ( "time" "github.com/pkg/errors" + "github.com/tendermint/tendermint/types" ) diff --git a/rpc/client/helpers_test.go b/rpc/client/helpers_test.go index 8b843fcdb..3b78dfe5f 100644 --- a/rpc/client/helpers_test.go +++ b/rpc/client/helpers_test.go @@ -7,6 +7,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/client/mock" ctypes "github.com/tendermint/tendermint/rpc/core/types" diff --git a/rpc/client/httpclient.go b/rpc/client/http/http.go similarity index 84% rename from rpc/client/httpclient.go rename to rpc/client/http/http.go index 98875c91e..09214d66b 100644 --- a/rpc/client/httpclient.go +++ b/rpc/client/http/http.go @@ -1,4 +1,4 @@ -package client +package http import ( "context" @@ -15,8 +15,9 @@ import ( "github.com/tendermint/tendermint/libs/log" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" "github.com/tendermint/tendermint/libs/service" + rpcclient "github.com/tendermint/tendermint/rpc/client" ctypes "github.com/tendermint/tendermint/rpc/core/types" - rpcclient "github.com/tendermint/tendermint/rpc/lib/client" + rpcclientlib "github.com/tendermint/tendermint/rpc/lib/client" "github.com/tendermint/tendermint/types" ) @@ -37,10 +38,31 @@ indefinitely until successful. Request batching is available for JSON RPC requests over HTTP, which conforms to the JSON RPC specification (https://www.jsonrpc.org/specification#batch). See the example for more details. + +Example: + + c, err := New("http://192.168.1.10:26657", "/websocket") + if err != nil { + // handle error + } + + // call Start/Stop if you're subscribing to events + err = c.Start() + if err != nil { + // handle error + } + defer c.Stop() + + res, err := c.Status() + if err != nil { + // handle error + } + + // handle result */ type HTTP struct { remote string - rpc *rpcclient.JSONRPCClient + rpc *rpcclientlib.JSONRPCClient *baseRPCClient *WSEvents @@ -57,7 +79,7 @@ type HTTP struct { // batch, but ordering of transactions in the batch cannot be guaranteed in such // an example. type BatchHTTP struct { - rpcBatch *rpcclient.JSONRPCRequestBatch + rpcBatch *rpcclientlib.JSONRPCRequestBatch *baseRPCClient } @@ -65,17 +87,17 @@ type BatchHTTP struct { // non-batch) must conform. Acts as an additional code-level sanity check to // make sure the implementations stay coherent. type rpcClient interface { - ABCIClient - HistoryClient - NetworkClient - SignClient - StatusClient + rpcclient.ABCIClient + rpcclient.HistoryClient + rpcclient.NetworkClient + rpcclient.SignClient + rpcclient.StatusClient } // baseRPCClient implements the basic RPC method logic without the actual // underlying RPC call functionality, which is provided by `caller`. type baseRPCClient struct { - caller rpcclient.JSONRPCCaller + caller rpcclientlib.JSONRPCCaller } var _ rpcClient = (*HTTP)(nil) @@ -85,35 +107,35 @@ var _ rpcClient = (*baseRPCClient)(nil) //----------------------------------------------------------------------------- // HTTP -// NewHTTP takes a remote endpoint in the form ://: and +// New takes a remote endpoint in the form ://: and // the websocket path (which always seems to be "/websocket") // An error is returned on invalid remote. The function panics when remote is nil. -func NewHTTP(remote, wsEndpoint string) (*HTTP, error) { - httpClient, err := rpcclient.DefaultHTTPClient(remote) +func New(remote, wsEndpoint string) (*HTTP, error) { + httpClient, err := rpcclientlib.DefaultHTTPClient(remote) if err != nil { return nil, err } - return NewHTTPWithClient(remote, wsEndpoint, httpClient) + return NewWithClient(remote, wsEndpoint, httpClient) } // Create timeout enabled http client -func NewHTTPWithTimeout(remote, wsEndpoint string, timeout uint) (*HTTP, error) { - httpClient, err := rpcclient.DefaultHTTPClient(remote) +func NewWithTimeout(remote, wsEndpoint string, timeout uint) (*HTTP, error) { + httpClient, err := rpcclientlib.DefaultHTTPClient(remote) if err != nil { return nil, err } httpClient.Timeout = time.Duration(timeout) * time.Second - return NewHTTPWithClient(remote, wsEndpoint, httpClient) + return NewWithClient(remote, wsEndpoint, httpClient) } -// NewHTTPWithClient allows for setting a custom http client (See NewHTTP). +// NewWithClient allows for setting a custom http client (See New). // An error is returned on invalid remote. The function panics when remote is nil. -func NewHTTPWithClient(remote, wsEndpoint string, client *http.Client) (*HTTP, error) { +func NewWithClient(remote, wsEndpoint string, client *http.Client) (*HTTP, error) { if client == nil { panic("nil http.Client provided") } - rc, err := rpcclient.NewJSONRPCClientWithHTTPClient(remote, client) + rc, err := rpcclientlib.NewJSONRPCClientWithHTTPClient(remote, client) if err != nil { return nil, err } @@ -121,17 +143,22 @@ func NewHTTPWithClient(remote, wsEndpoint string, client *http.Client) (*HTTP, e ctypes.RegisterAmino(cdc) rc.SetCodec(cdc) + wsEvents, err := newWSEvents(cdc, remote, wsEndpoint) + if err != nil { + return nil, err + } + httpClient := &HTTP{ rpc: rc, remote: remote, baseRPCClient: &baseRPCClient{caller: rc}, - WSEvents: newWSEvents(cdc, remote, wsEndpoint), + WSEvents: wsEvents, } return httpClient, nil } -var _ Client = (*HTTP)(nil) +var _ rpcclient.Client = (*HTTP)(nil) // SetLogger sets a logger. func (c *HTTP) SetLogger(l log.Logger) { @@ -198,13 +225,13 @@ func (c *baseRPCClient) ABCIInfo() (*ctypes.ResultABCIInfo, error) { } func (c *baseRPCClient) ABCIQuery(path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) { - return c.ABCIQueryWithOptions(path, data, DefaultABCIQueryOptions) + return c.ABCIQueryWithOptions(path, data, rpcclient.DefaultABCIQueryOptions) } func (c *baseRPCClient) ABCIQueryWithOptions( path string, data bytes.HexBytes, - opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { result := new(ctypes.ResultABCIQuery) _, err := c.caller.Call("abci_query", map[string]interface{}{"path": path, "data": data, "height": opts.Height, "prove": opts.Prove}, @@ -381,15 +408,15 @@ func (c *baseRPCClient) TxSearch(query string, prove bool, page, perPage int, or return result, nil } -func (c *baseRPCClient) Validators(height *int64, page, perPage int) (*ctypes.ResultValidators, error) { - result := new(ctypes.ResultValidators) - _, err := c.caller.Call("validators", map[string]interface{}{ +func (c *baseRPCClient) Voters(height *int64, page, perPage int) (*ctypes.ResultVoters, error) { + result := new(ctypes.ResultVoters) + _, err := c.caller.Call("voters", map[string]interface{}{ "height": height, "page": page, "per_page": perPage, }, result) if err != nil { - return nil, errors.Wrap(err, "Validators") + return nil, errors.Wrap(err, "Voters") } return result, nil } @@ -406,48 +433,51 @@ func (c *baseRPCClient) BroadcastEvidence(ev types.Evidence) (*ctypes.ResultBroa //----------------------------------------------------------------------------- // WSEvents +var errNotRunning = errors.New("client is not running. Use .Start() method to start") + +// WSEvents is a wrapper around WSClient, which implements EventsClient. type WSEvents struct { service.BaseService cdc *amino.Codec remote string endpoint string - ws *rpcclient.WSClient + ws *rpcclientlib.WSClient - mtx sync.RWMutex - // query -> chan - subscriptions map[string]chan ctypes.ResultEvent + mtx sync.RWMutex + subscriptions map[string]chan ctypes.ResultEvent // query -> chan } -func newWSEvents(cdc *amino.Codec, remote, endpoint string) *WSEvents { - wsEvents := &WSEvents{ +func newWSEvents(cdc *amino.Codec, remote, endpoint string) (*WSEvents, error) { + w := &WSEvents{ cdc: cdc, endpoint: endpoint, remote: remote, subscriptions: make(map[string]chan ctypes.ResultEvent), } + w.BaseService = *service.NewBaseService(nil, "WSEvents", w) - wsEvents.BaseService = *service.NewBaseService(nil, "WSEvents", wsEvents) - return wsEvents -} - -// OnStart implements service.Service by starting WSClient and event loop. -func (w *WSEvents) OnStart() (err error) { - w.ws, err = rpcclient.NewWSClient(w.remote, w.endpoint, rpcclient.OnReconnect(func() { + var err error + w.ws, err = rpcclientlib.NewWSClient(w.remote, w.endpoint, rpcclientlib.OnReconnect(func() { // resubscribe immediately w.redoSubscriptionsAfter(0 * time.Second) })) if err != nil { - return err + return nil, err } w.ws.SetCodec(w.cdc) w.ws.SetLogger(w.Logger) - err = w.ws.Start() - if err != nil { + return w, nil +} + +// OnStart implements service.Service by starting WSClient and event loop. +func (w *WSEvents) OnStart() error { + if err := w.ws.Start(); err != nil { return err } go w.eventListener() + return nil } @@ -459,10 +489,17 @@ func (w *WSEvents) OnStop() { // Subscribe implements EventsClient by using WSClient to subscribe given // subscriber to query. By default, returns a channel with cap=1. Error is // returned if it fails to subscribe. -// Channel is never closed to prevent clients from seeing an erroneus event. +// +// Channel is never closed to prevent clients from seeing an erroneous event. +// +// It returns an error if WSEvents is not running. func (w *WSEvents) Subscribe(ctx context.Context, subscriber, query string, outCapacity ...int) (out <-chan ctypes.ResultEvent, err error) { + if !w.IsRunning() { + return nil, errNotRunning + } + if err := w.ws.Subscribe(ctx, query); err != nil { return nil, err } @@ -484,7 +521,13 @@ func (w *WSEvents) Subscribe(ctx context.Context, subscriber, query string, // Unsubscribe implements EventsClient by using WSClient to unsubscribe given // subscriber from query. +// +// It returns an error if WSEvents is not running. func (w *WSEvents) Unsubscribe(ctx context.Context, subscriber, query string) error { + if !w.IsRunning() { + return errNotRunning + } + if err := w.ws.Unsubscribe(ctx, query); err != nil { return err } @@ -501,7 +544,13 @@ func (w *WSEvents) Unsubscribe(ctx context.Context, subscriber, query string) er // UnsubscribeAll implements EventsClient by using WSClient to unsubscribe // given subscriber from all the queries. +// +// It returns an error if WSEvents is not running. func (w *WSEvents) UnsubscribeAll(ctx context.Context, subscriber string) error { + if !w.IsRunning() { + return errNotRunning + } + if err := w.ws.UnsubscribeAll(ctx); err != nil { return err } diff --git a/rpc/client/interface.go b/rpc/client/interface.go index 408d803c8..a443b6026 100644 --- a/rpc/client/interface.go +++ b/rpc/client/interface.go @@ -67,7 +67,7 @@ type SignClient interface { Block(height *int64) (*ctypes.ResultBlock, error) BlockResults(height *int64) (*ctypes.ResultBlockResults, error) Commit(height *int64) (*ctypes.ResultCommit, error) - Validators(height *int64, page, perPage int) (*ctypes.ResultValidators, error) + Voters(height *int64, page, perPage int) (*ctypes.ResultVoters, error) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) TxSearch(query string, prove bool, page, perPage int, orderBy string) (*ctypes.ResultTxSearch, error) } diff --git a/rpc/client/localclient.go b/rpc/client/local/local.go similarity index 94% rename from rpc/client/localclient.go rename to rpc/client/local/local.go index e6b0eb937..95ad1d3b1 100644 --- a/rpc/client/localclient.go +++ b/rpc/client/local/local.go @@ -1,4 +1,4 @@ -package client +package local import ( "context" @@ -11,6 +11,7 @@ import ( tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmquery "github.com/tendermint/tendermint/libs/pubsub/query" nm "github.com/tendermint/tendermint/node" + rpcclient "github.com/tendermint/tendermint/rpc/client" "github.com/tendermint/tendermint/rpc/core" ctypes "github.com/tendermint/tendermint/rpc/core/types" rpctypes "github.com/tendermint/tendermint/rpc/lib/types" @@ -49,7 +50,7 @@ type Local struct { // you can only have one node per process. So make sure test cases // don't run in parallel, or try to simulate an entire network in // one process... -func NewLocal(node *nm.Node) *Local { +func New(node *nm.Node) *Local { node.ConfigureRPC() return &Local{ EventBus: node.EventBus(), @@ -58,7 +59,7 @@ func NewLocal(node *nm.Node) *Local { } } -var _ Client = (*Local)(nil) +var _ rpcclient.Client = (*Local)(nil) // SetLogger allows to set a logger on the client. func (c *Local) SetLogger(l log.Logger) { @@ -74,13 +75,13 @@ func (c *Local) ABCIInfo() (*ctypes.ResultABCIInfo, error) { } func (c *Local) ABCIQuery(path string, data bytes.HexBytes) (*ctypes.ResultABCIQuery, error) { - return c.ABCIQueryWithOptions(path, data, DefaultABCIQueryOptions) + return c.ABCIQueryWithOptions(path, data, rpcclient.DefaultABCIQueryOptions) } func (c *Local) ABCIQueryWithOptions( path string, data bytes.HexBytes, - opts ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { + opts rpcclient.ABCIQueryOptions) (*ctypes.ResultABCIQuery, error) { return core.ABCIQuery(c.ctx, path, data, opts.Height, opts.Prove) } @@ -152,8 +153,8 @@ func (c *Local) Commit(height *int64) (*ctypes.ResultCommit, error) { return core.Commit(c.ctx, height) } -func (c *Local) Validators(height *int64, page, perPage int) (*ctypes.ResultValidators, error) { - return core.Validators(c.ctx, height, page, perPage) +func (c *Local) Voters(height *int64, page, perPage int) (*ctypes.ResultVoters, error) { + return core.Voters(c.ctx, height, page, perPage) } func (c *Local) Tx(hash []byte, prove bool) (*ctypes.ResultTx, error) { diff --git a/rpc/client/mock/abci.go b/rpc/client/mock/abci.go index d1f84f2b1..ebee8b4e8 100644 --- a/rpc/client/mock/abci.go +++ b/rpc/client/mock/abci.go @@ -63,7 +63,13 @@ func (a ABCIApp) BroadcastTxAsync(tx types.Tx) (*ctypes.ResultBroadcastTx, error if !c.IsErr() { go func() { a.App.DeliverTx(abci.RequestDeliverTx{Tx: tx}) }() // nolint: errcheck } - return &ctypes.ResultBroadcastTx{Code: c.Code, Data: c.Data, Log: c.Log, Hash: tx.Hash()}, nil + return &ctypes.ResultBroadcastTx{ + Code: c.Code, + Data: c.Data, + Log: c.Log, + Codespace: c.Codespace, + Hash: tx.Hash(), + }, nil } func (a ABCIApp) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) { @@ -72,7 +78,13 @@ func (a ABCIApp) BroadcastTxSync(tx types.Tx) (*ctypes.ResultBroadcastTx, error) if !c.IsErr() { go func() { a.App.DeliverTx(abci.RequestDeliverTx{Tx: tx}) }() // nolint: errcheck } - return &ctypes.ResultBroadcastTx{Code: c.Code, Data: c.Data, Log: c.Log, Hash: tx.Hash()}, nil + return &ctypes.ResultBroadcastTx{ + Code: c.Code, + Data: c.Data, + Log: c.Log, + Codespace: c.Codespace, + Hash: tx.Hash(), + }, nil } // ABCIMock will send all abci related request to the named app, diff --git a/rpc/client/mock/client.go b/rpc/client/mock/client.go index 869d7b3e9..6fc895a23 100644 --- a/rpc/client/mock/client.go +++ b/rpc/client/mock/client.go @@ -154,8 +154,8 @@ func (c Client) Commit(height *int64) (*ctypes.ResultCommit, error) { return core.Commit(&rpctypes.Context{}, height) } -func (c Client) Validators(height *int64, page, perPage int) (*ctypes.ResultValidators, error) { - return core.Validators(&rpctypes.Context{}, height, page, perPage) +func (c Client) Voters(height *int64, page, perPage int) (*ctypes.ResultVoters, error) { + return core.Voters(&rpctypes.Context{}, height, page, perPage) } func (c Client) BroadcastEvidence(ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { diff --git a/rpc/client/rpc_test.go b/rpc/client/rpc_test.go index 5e83675e3..d7431d38a 100644 --- a/rpc/client/rpc_test.go +++ b/rpc/client/rpc_test.go @@ -23,15 +23,17 @@ import ( mempl "github.com/tendermint/tendermint/mempool" "github.com/tendermint/tendermint/privval" "github.com/tendermint/tendermint/rpc/client" + rpchttp "github.com/tendermint/tendermint/rpc/client/http" + rpclocal "github.com/tendermint/tendermint/rpc/client/local" ctypes "github.com/tendermint/tendermint/rpc/core/types" rpcclient "github.com/tendermint/tendermint/rpc/lib/client" rpctest "github.com/tendermint/tendermint/rpc/test" "github.com/tendermint/tendermint/types" ) -func getHTTPClient() *client.HTTP { +func getHTTPClient() *rpchttp.HTTP { rpcAddr := rpctest.GetConfig().RPC.ListenAddress - c, err := client.NewHTTP(rpcAddr, "/websocket") + c, err := rpchttp.New(rpcAddr, "/websocket") if err != nil { panic(err) } @@ -39,9 +41,9 @@ func getHTTPClient() *client.HTTP { return c } -func getHTTPClientWithTimeout(timeout uint) *client.HTTP { +func getHTTPClientWithTimeout(timeout uint) *rpchttp.HTTP { rpcAddr := rpctest.GetConfig().RPC.ListenAddress - c, err := client.NewHTTPWithTimeout(rpcAddr, "/websocket", timeout) + c, err := rpchttp.NewWithTimeout(rpcAddr, "/websocket", timeout) if err != nil { panic(err) } @@ -49,8 +51,8 @@ func getHTTPClientWithTimeout(timeout uint) *client.HTTP { return c } -func getLocalClient() *client.Local { - return client.NewLocal(node) +func getLocalClient() *rpclocal.Local { + return rpclocal.New(node) } // GetClients returns a slice of clients for table-driven tests @@ -63,7 +65,7 @@ func GetClients() []client.Client { func TestNilCustomHTTPClient(t *testing.T) { require.Panics(t, func() { - _, _ = client.NewHTTPWithClient("http://example.com", "/websocket", nil) + _, _ = rpchttp.NewWithClient("http://example.com", "/websocket", nil) }) require.Panics(t, func() { _, _ = rpcclient.NewJSONRPCClientWithHTTPClient("http://example.com", nil) @@ -72,7 +74,7 @@ func TestNilCustomHTTPClient(t *testing.T) { func TestCustomHTTPClient(t *testing.T) { remote := rpctest.GetConfig().RPC.ListenAddress - c, err := client.NewHTTPWithClient(remote, "/websocket", http.DefaultClient) + c, err := rpchttp.NewWithClient(remote, "/websocket", http.DefaultClient) require.Nil(t, err) status, err := c.Status() require.NoError(t, err) @@ -171,13 +173,13 @@ func TestGenesisAndValidators(t *testing.T) { gval := gen.Genesis.Validators[0] // get the current validators - vals, err := c.Validators(nil, 0, 0) + vals, err := c.Voters(nil, 0, 0) require.Nil(t, err, "%d: %+v", i, err) - require.Equal(t, 1, len(vals.Validators)) - val := vals.Validators[0] + require.Equal(t, 1, len(vals.Voters)) + val := vals.Voters[0] // make sure the current set is also the genesis set - assert.Equal(t, gval.Power, val.VotingPower) + assert.Equal(t, gval.Power, val.StakingPower) assert.Equal(t, gval.PubKey, val.PubKey) } } @@ -701,7 +703,7 @@ func TestBatchedJSONRPCCalls(t *testing.T) { testBatchedJSONRPCCalls(t, c) } -func testBatchedJSONRPCCalls(t *testing.T, c *client.HTTP) { +func testBatchedJSONRPCCalls(t *testing.T, c *rpchttp.HTTP) { k1, v1, tx1 := MakeTxKV() k2, v2, tx2 := MakeTxKV() diff --git a/rpc/core/blocks.go b/rpc/core/blocks.go index e340d4dfb..1d608534a 100644 --- a/rpc/core/blocks.go +++ b/rpc/core/blocks.go @@ -17,7 +17,7 @@ func BlockchainInfo(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes. // maximum 20 block metas const limit int64 = 20 var err error - minHeight, maxHeight, err = filterMinMax(blockStore.Height(), minHeight, maxHeight, limit) + minHeight, maxHeight, err = filterMinMax(blockStore.Base(), blockStore.Height(), minHeight, maxHeight, limit) if err != nil { return nil, err } @@ -34,11 +34,10 @@ func BlockchainInfo(ctx *rpctypes.Context, minHeight, maxHeight int64) (*ctypes. BlockMetas: blockMetas}, nil } -// error if either min or max are negative or min < max -// if 0, use 1 for min, latest block height for max +// error if either min or max are negative or min > max +// if 0, use blockstore base for min, latest block height for max // enforce limit. -// error if min > max -func filterMinMax(height, min, max, limit int64) (int64, int64, error) { +func filterMinMax(base, height, min, max, limit int64) (int64, int64, error) { // filter negatives if min < 0 || max < 0 { return min, max, fmt.Errorf("heights must be non-negative") @@ -55,6 +54,9 @@ func filterMinMax(height, min, max, limit int64) (int64, int64, error) { // limit max to the height max = tmmath.MinInt64(height, max) + // limit min to the base + min = tmmath.MaxInt64(base, min) + // limit min to within `limit` of max // so the total number of blocks returned will be `limit` min = tmmath.MaxInt64(min, max-limit+1) @@ -69,8 +71,7 @@ func filterMinMax(height, min, max, limit int64) (int64, int64, error) { // If no height is provided, it will fetch the latest block. // More: https://docs.tendermint.com/master/rpc/#/Info/block func Block(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlock, error) { - storeHeight := blockStore.Height() - height, err := getHeight(storeHeight, heightPtr) + height, err := getHeight(blockStore.Base(), blockStore.Height(), heightPtr) if err != nil { return nil, err } @@ -99,8 +100,7 @@ func BlockByHash(ctx *rpctypes.Context, hash []byte) (*ctypes.ResultBlock, error // If no height is provided, it will fetch the commit for the latest block. // More: https://docs.tendermint.com/master/rpc/#/Info/commit func Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCommit, error) { - storeHeight := blockStore.Height() - height, err := getHeight(storeHeight, heightPtr) + height, err := getHeight(blockStore.Base(), blockStore.Height(), heightPtr) if err != nil { return nil, err } @@ -113,7 +113,7 @@ func Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCommit, erro // If the next block has not been committed yet, // use a non-canonical commit - if height == storeHeight { + if height == blockStore.Height() { commit := blockStore.LoadSeenCommit(height) return ctypes.NewResultCommit(&header, commit, false), nil } @@ -131,8 +131,7 @@ func Commit(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultCommit, erro // getBlock(h).Txs[5] // More: https://docs.tendermint.com/master/rpc/#/Info/block_results func BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlockResults, error) { - storeHeight := blockStore.Height() - height, err := getHeight(storeHeight, heightPtr) + height, err := getHeight(blockStore.Base(), blockStore.Height(), heightPtr) if err != nil { return nil, err } @@ -152,7 +151,7 @@ func BlockResults(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultBlockR }, nil } -func getHeight(currentHeight int64, heightPtr *int64) (int64, error) { +func getHeight(currentBase int64, currentHeight int64, heightPtr *int64) (int64, error) { if heightPtr != nil { height := *heightPtr if height <= 0 { @@ -161,6 +160,10 @@ func getHeight(currentHeight int64, heightPtr *int64) (int64, error) { if height > currentHeight { return 0, fmt.Errorf("height must be less than or equal to the current blockchain height") } + if height < currentBase { + return 0, fmt.Errorf("height %v is not available, blocks pruned at height %v", + height, currentBase) + } return height, nil } return currentHeight, nil diff --git a/rpc/core/blocks_test.go b/rpc/core/blocks_test.go index d537f6c3a..c0561647f 100644 --- a/rpc/core/blocks_test.go +++ b/rpc/core/blocks_test.go @@ -7,53 +7,58 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" + abci "github.com/tendermint/tendermint/abci/types" ctypes "github.com/tendermint/tendermint/rpc/core/types" rpctypes "github.com/tendermint/tendermint/rpc/lib/types" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) func TestBlockchainInfo(t *testing.T) { cases := []struct { min, max int64 - height int64 + base, height int64 limit int64 resultLength int64 wantErr bool }{ // min > max - {0, 0, 0, 10, 0, true}, // min set to 1 - {0, 1, 0, 10, 0, true}, // max set to height (0) - {0, 0, 1, 10, 1, false}, // max set to height (1) - {2, 0, 1, 10, 0, true}, // max set to height (1) - {2, 1, 5, 10, 0, true}, + {0, 0, 0, 0, 10, 0, true}, // min set to 1 + {0, 1, 0, 0, 10, 0, true}, // max set to height (0) + {0, 0, 0, 1, 10, 1, false}, // max set to height (1) + {2, 0, 0, 1, 10, 0, true}, // max set to height (1) + {2, 1, 0, 5, 10, 0, true}, // negative - {1, 10, 14, 10, 10, false}, // control - {-1, 10, 14, 10, 0, true}, - {1, -10, 14, 10, 0, true}, - {-9223372036854775808, -9223372036854775788, 100, 20, 0, true}, + {1, 10, 0, 14, 10, 10, false}, // control + {-1, 10, 0, 14, 10, 0, true}, + {1, -10, 0, 14, 10, 0, true}, + {-9223372036854775808, -9223372036854775788, 0, 100, 20, 0, true}, + + // check base + {1, 1, 1, 1, 1, 1, false}, + {2, 5, 3, 5, 5, 3, false}, // check limit and height - {1, 1, 1, 10, 1, false}, - {1, 1, 5, 10, 1, false}, - {2, 2, 5, 10, 1, false}, - {1, 2, 5, 10, 2, false}, - {1, 5, 1, 10, 1, false}, - {1, 5, 10, 10, 5, false}, - {1, 15, 10, 10, 10, false}, - {1, 15, 15, 10, 10, false}, - {1, 15, 15, 20, 15, false}, - {1, 20, 15, 20, 15, false}, - {1, 20, 20, 20, 20, false}, + {1, 1, 0, 1, 10, 1, false}, + {1, 1, 0, 5, 10, 1, false}, + {2, 2, 0, 5, 10, 1, false}, + {1, 2, 0, 5, 10, 2, false}, + {1, 5, 0, 1, 10, 1, false}, + {1, 5, 0, 10, 10, 5, false}, + {1, 15, 0, 10, 10, 10, false}, + {1, 15, 0, 15, 10, 10, false}, + {1, 15, 0, 15, 20, 15, false}, + {1, 20, 0, 15, 20, 15, false}, + {1, 20, 0, 20, 20, 20, false}, } for i, c := range cases { caseString := fmt.Sprintf("test %d failed", i) - min, max, err := filterMinMax(c.height, c.min, c.max, c.limit) + min, max, err := filterMinMax(c.base, c.height, c.min, c.max, c.limit) if c.wantErr { require.Error(t, err, caseString) } else { @@ -111,12 +116,15 @@ type mockBlockStore struct { height int64 } +func (mockBlockStore) Base() int64 { return 1 } func (store mockBlockStore) Height() int64 { return store.height } +func (store mockBlockStore) Size() int64 { return store.height } func (mockBlockStore) LoadBlockMeta(height int64) *types.BlockMeta { return nil } func (mockBlockStore) LoadBlock(height int64) *types.Block { return nil } func (mockBlockStore) LoadBlockByHash(hash []byte) *types.Block { return nil } func (mockBlockStore) LoadBlockPart(height int64, index int) *types.Part { return nil } func (mockBlockStore) LoadBlockCommit(height int64) *types.Commit { return nil } func (mockBlockStore) LoadSeenCommit(height int64) *types.Commit { return nil } +func (mockBlockStore) PruneBlocks(height int64) (uint64, error) { return 0, nil } func (mockBlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) { } diff --git a/rpc/core/consensus.go b/rpc/core/consensus.go index a2a619ea5..a28de0358 100644 --- a/rpc/core/consensus.go +++ b/rpc/core/consensus.go @@ -7,28 +7,37 @@ import ( rpctypes "github.com/tendermint/tendermint/rpc/lib/types" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" + dbm "github.com/tendermint/tm-db" ) // Validators gets the validator set at the given block height. -// If no height is provided, it will fetch the current validator set. -// Note the validators are sorted by their address - this is the canonical -// order for the validators in the set as used in computing their Merkle root. +// +// If no height is provided, it will fetch the current validator set. Note the +// voters are sorted by their address - this is the canonical order for the +// voters in the set as used in computing their Merkle root. +// // More: https://docs.tendermint.com/master/rpc/#/Info/validators -func Validators(ctx *rpctypes.Context, heightPtr *int64, page, perPage int) (*ctypes.ResultValidators, error) { +func Voters(ctx *rpctypes.Context, heightPtr *int64, page, perPage int) (*ctypes.ResultVoters, error) { + return voters(ctx, heightPtr, page, perPage, sm.LoadVoters) +} + +func voters(ctx *rpctypes.Context, heightPtr *int64, page, perPage int, + loadFunc func(db dbm.DB, height int64, voterParams *types.VoterParams) (*types.VoterSet, error)) ( + *ctypes.ResultVoters, error) { // The latest validator that we know is the // NextValidator of the last block. height := consensusState.GetState().LastBlockHeight + 1 - height, err := getHeight(height, heightPtr) + height, err := getHeight(blockStore.Base(), height, heightPtr) if err != nil { return nil, err } - validators, err := sm.LoadValidators(stateDB, height) + voters, err := loadFunc(stateDB, height, consensusState.GetState().VoterParams) if err != nil { return nil, err } - totalCount := len(validators.Validators) + totalCount := len(voters.Voters) perPage = validatePerPage(perPage) page, err = validatePage(page, perPage, totalCount) if err != nil { @@ -37,11 +46,11 @@ func Validators(ctx *rpctypes.Context, heightPtr *int64, page, perPage int) (*ct skipCount := validateSkipCount(page, perPage) - v := validators.Validators[skipCount : skipCount+tmmath.MinInt(perPage, totalCount-skipCount)] + v := voters.Voters[skipCount : skipCount+tmmath.MinInt(perPage, totalCount-skipCount)] - return &ctypes.ResultValidators{ + return &ctypes.ResultVoters{ BlockHeight: height, - Validators: v}, nil + Voters: v}, nil } // DumpConsensusState dumps consensus state. @@ -91,7 +100,7 @@ func ConsensusState(ctx *rpctypes.Context) (*ctypes.ResultConsensusState, error) // More: https://docs.tendermint.com/master/rpc/#/Info/consensus_params func ConsensusParams(ctx *rpctypes.Context, heightPtr *int64) (*ctypes.ResultConsensusParams, error) { height := consensusState.GetState().LastBlockHeight + 1 - height, err := getHeight(height, heightPtr) + height, err := getHeight(blockStore.Base(), height, heightPtr) if err != nil { return nil, err } diff --git a/rpc/core/doc.go b/rpc/core/doc.go index a51a02982..c1971b855 100644 --- a/rpc/core/doc.go +++ b/rpc/core/doc.go @@ -93,7 +93,7 @@ Available endpoints: /unconfirmed_txs /unsafe_flush_mempool /unsafe_stop_cpu_profiler -/validators +/voters Endpoints that require arguments: /abci_query?path=_&data=_&prove=_ diff --git a/rpc/core/evidence.go b/rpc/core/evidence.go index 4ae138e7e..7d7ac2ec7 100644 --- a/rpc/core/evidence.go +++ b/rpc/core/evidence.go @@ -1,6 +1,7 @@ package core import ( + "github.com/tendermint/tendermint/evidence" ctypes "github.com/tendermint/tendermint/rpc/core/types" rpctypes "github.com/tendermint/tendermint/rpc/lib/types" "github.com/tendermint/tendermint/types" @@ -10,8 +11,8 @@ import ( // More: https://docs.tendermint.com/master/rpc/#/Info/broadcast_evidence func BroadcastEvidence(ctx *rpctypes.Context, ev types.Evidence) (*ctypes.ResultBroadcastEvidence, error) { err := evidencePool.AddEvidence(ev) - if err != nil { - return nil, err + if _, ok := err.(evidence.ErrEvidenceAlreadyStored); err == nil || ok { + return &ctypes.ResultBroadcastEvidence{Hash: ev.Hash()}, nil } - return &ctypes.ResultBroadcastEvidence{Hash: ev.Hash()}, nil + return nil, err } diff --git a/rpc/core/mempool.go b/rpc/core/mempool.go index 28b73ab33..0e6ce7a2c 100644 --- a/rpc/core/mempool.go +++ b/rpc/core/mempool.go @@ -43,10 +43,11 @@ func BroadcastTxSync(ctx *rpctypes.Context, tx types.Tx) (*ctypes.ResultBroadcas res := <-resCh r := res.GetCheckTx() return &ctypes.ResultBroadcastTx{ - Code: r.Code, - Data: r.Data, - Log: r.Log, - Hash: tx.Hash(), + Code: r.Code, + Data: r.Data, + Log: r.Log, + Codespace: r.Codespace, + Hash: tx.Hash(), }, nil } diff --git a/rpc/core/pipe.go b/rpc/core/pipe.go index 532493451..4fb3b9b13 100644 --- a/rpc/core/pipe.go +++ b/rpc/core/pipe.go @@ -4,6 +4,8 @@ import ( "fmt" "time" + dbm "github.com/tendermint/tm-db" + cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/consensus" "github.com/tendermint/tendermint/crypto" @@ -14,7 +16,6 @@ import ( sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/state/txindex" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) const ( diff --git a/rpc/core/routes.go b/rpc/core/routes.go index aa0403f87..bc7b9b8c1 100644 --- a/rpc/core/routes.go +++ b/rpc/core/routes.go @@ -25,7 +25,7 @@ var Routes = map[string]*rpc.RPCFunc{ "commit": rpc.NewRPCFunc(Commit, "height"), "tx": rpc.NewRPCFunc(Tx, "hash,prove"), "tx_search": rpc.NewRPCFunc(TxSearch, "query,prove,page,per_page,order_by"), - "validators": rpc.NewRPCFunc(Validators, "height,page,per_page"), + "voters": rpc.NewRPCFunc(Voters, "height,page,per_page"), "dump_consensus_state": rpc.NewRPCFunc(DumpConsensusState, ""), "consensus_state": rpc.NewRPCFunc(ConsensusState, ""), "consensus_params": rpc.NewRPCFunc(ConsensusParams, "height"), diff --git a/rpc/core/status.go b/rpc/core/status.go index e6438009a..b745f61e1 100644 --- a/rpc/core/status.go +++ b/rpc/core/status.go @@ -16,6 +16,20 @@ import ( // hash, app hash, block height and time. // More: https://docs.tendermint.com/master/rpc/#/Info/status func Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { + var ( + earliestBlockMeta *types.BlockMeta + earliestBlockHash tmbytes.HexBytes + earliestAppHash tmbytes.HexBytes + earliestBlockTimeNano int64 + ) + earliestBlockHeight := blockStore.Base() + earliestBlockMeta = blockStore.LoadBlockMeta(earliestBlockHeight) + if earliestBlockMeta != nil { + earliestAppHash = earliestBlockMeta.Header.AppHash + earliestBlockHash = earliestBlockMeta.BlockID.Hash + earliestBlockTimeNano = earliestBlockMeta.Header.Time.UnixNano() + } + var latestHeight int64 if consensusReactor.FastSync() { latestHeight = blockStore.Height() @@ -36,26 +50,29 @@ func Status(ctx *rpctypes.Context) (*ctypes.ResultStatus, error) { latestBlockTimeNano = latestBlockMeta.Header.Time.UnixNano() } - latestBlockTime := time.Unix(0, latestBlockTimeNano) + var stakingPower int64 - var votingPower int64 if val := validatorAtHeight(latestHeight); val != nil { - votingPower = val.VotingPower + stakingPower = val.StakingPower } result := &ctypes.ResultStatus{ NodeInfo: p2pTransport.NodeInfo().(p2p.DefaultNodeInfo), SyncInfo: ctypes.SyncInfo{ - LatestBlockHash: latestBlockHash, - LatestAppHash: latestAppHash, - LatestBlockHeight: latestHeight, - LatestBlockTime: latestBlockTime, - CatchingUp: consensusReactor.FastSync(), + LatestBlockHash: latestBlockHash, + LatestAppHash: latestAppHash, + LatestBlockHeight: latestHeight, + LatestBlockTime: time.Unix(0, latestBlockTimeNano), + EarliestBlockHash: earliestBlockHash, + EarliestAppHash: earliestAppHash, + EarliestBlockHeight: earliestBlockHeight, + EarliestBlockTime: time.Unix(0, earliestBlockTimeNano), + CatchingUp: consensusReactor.FastSync(), }, ValidatorInfo: ctypes.ValidatorInfo{ - Address: pubKey.Address(), - PubKey: pubKey, - VotingPower: votingPower, + Address: pubKey.Address(), + PubKey: pubKey, + StakingPower: stakingPower, }, } @@ -66,6 +83,7 @@ func validatorAtHeight(h int64) *types.Validator { privValAddress := pubKey.Address() // If we're still at height h, search in the current validator set. + // ValidatorOrVoter: validator lastBlockHeight, vals := consensusState.GetValidators() if lastBlockHeight == h { for _, val := range vals { @@ -77,6 +95,7 @@ func validatorAtHeight(h int64) *types.Validator { // If we've moved to the next height, retrieve the validator set from DB. if lastBlockHeight > h { + // ValidatorOrVoter: validator vals, err := sm.LoadValidators(stateDB, h) if err != nil { return nil // should not happen diff --git a/rpc/core/types/codec.go b/rpc/core/types/codec.go index 82543ff6e..8e0b5303f 100644 --- a/rpc/core/types/codec.go +++ b/rpc/core/types/codec.go @@ -2,6 +2,7 @@ package coretypes import ( amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/types" ) diff --git a/rpc/core/types/responses.go b/rpc/core/types/responses.go index 9aee485e9..2249f75bd 100644 --- a/rpc/core/types/responses.go +++ b/rpc/core/types/responses.go @@ -65,14 +65,20 @@ type SyncInfo struct { LatestAppHash bytes.HexBytes `json:"latest_app_hash"` LatestBlockHeight int64 `json:"latest_block_height"` LatestBlockTime time.Time `json:"latest_block_time"` - CatchingUp bool `json:"catching_up"` + + EarliestBlockHash bytes.HexBytes `json:"earliest_block_hash"` + EarliestAppHash bytes.HexBytes `json:"earliest_app_hash"` + EarliestBlockHeight int64 `json:"earliest_block_height"` + EarliestBlockTime time.Time `json:"earliest_block_time"` + + CatchingUp bool `json:"catching_up"` } // Info about the node's validator type ValidatorInfo struct { - Address bytes.HexBytes `json:"address"` - PubKey crypto.PubKey `json:"pub_key"` - VotingPower int64 `json:"voting_power"` + Address bytes.HexBytes `json:"address"` + PubKey crypto.PubKey `json:"pub_key"` + StakingPower int64 `json:"staking_power"` } // Node Status @@ -116,10 +122,14 @@ type Peer struct { RemoteIP string `json:"remote_ip"` } -// Validators for a height -type ResultValidators struct { +// Voters for a height +type ResultVoters struct { BlockHeight int64 `json:"block_height"` - Validators []*types.Validator `json:"validators"` + Voters []*types.Validator `json:"voters"` + // Count of actual validators in this result + Count int `json:"count"` + // Total number of validators + Total int `json:"total"` } // ConsensusParams for given height @@ -148,9 +158,10 @@ type ResultConsensusState struct { // CheckTx result type ResultBroadcastTx struct { - Code uint32 `json:"code"` - Data bytes.HexBytes `json:"data"` - Log string `json:"log"` + Code uint32 `json:"code"` + Data bytes.HexBytes `json:"data"` + Log string `json:"log"` + Codespace string `json:"codespace"` Hash bytes.HexBytes `json:"hash"` } diff --git a/rpc/lib/client/integration_test.go b/rpc/lib/client/integration_test.go index 393783c51..5fee3752b 100644 --- a/rpc/lib/client/integration_test.go +++ b/rpc/lib/client/integration_test.go @@ -14,6 +14,7 @@ import ( "time" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/log" ) diff --git a/rpc/lib/client/ws_client.go b/rpc/lib/client/ws_client.go index 4d8a58b8e..ddddc97cf 100644 --- a/rpc/lib/client/ws_client.go +++ b/rpc/lib/client/ws_client.go @@ -14,6 +14,7 @@ import ( metrics "github.com/rcrowley/go-metrics" amino "github.com/tendermint/go-amino" + tmrand "github.com/tendermint/tendermint/libs/rand" "github.com/tendermint/tendermint/libs/service" types "github.com/tendermint/tendermint/rpc/lib/types" diff --git a/rpc/lib/client/ws_client_test.go b/rpc/lib/client/ws_client_test.go index 33a65dcbe..a4f033867 100644 --- a/rpc/lib/client/ws_client_test.go +++ b/rpc/lib/client/ws_client_test.go @@ -11,6 +11,7 @@ import ( "github.com/gorilla/websocket" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/libs/log" types "github.com/tendermint/tendermint/rpc/lib/types" diff --git a/rpc/lib/rpc_test.go b/rpc/lib/rpc_test.go index 5b95666a7..aef795d3e 100644 --- a/rpc/lib/rpc_test.go +++ b/rpc/lib/rpc_test.go @@ -17,6 +17,7 @@ import ( "github.com/stretchr/testify/require" amino "github.com/tendermint/go-amino" + tmbytes "github.com/tendermint/tendermint/libs/bytes" "github.com/tendermint/tendermint/libs/log" tmrand "github.com/tendermint/tendermint/libs/rand" diff --git a/rpc/lib/server/http_json_handler_test.go b/rpc/lib/server/http_json_handler_test.go index e4ae2f8bf..ef1fcc9f5 100644 --- a/rpc/lib/server/http_json_handler_test.go +++ b/rpc/lib/server/http_json_handler_test.go @@ -13,6 +13,7 @@ import ( "github.com/stretchr/testify/require" amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/libs/log" types "github.com/tendermint/tendermint/rpc/lib/types" ) diff --git a/rpc/lib/server/parse_test.go b/rpc/lib/server/parse_test.go index 3780861e4..09a3d18ee 100644 --- a/rpc/lib/server/parse_test.go +++ b/rpc/lib/server/parse_test.go @@ -9,6 +9,7 @@ import ( "github.com/stretchr/testify/assert" amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/libs/bytes" types "github.com/tendermint/tendermint/rpc/lib/types" ) diff --git a/rpc/swagger/swagger.yaml b/rpc/swagger/swagger.yaml index 40b6e0169..03e439d1f 100644 --- a/rpc/swagger/swagger.yaml +++ b/rpc/swagger/swagger.yaml @@ -52,7 +52,7 @@ paths: required: true schema: type: string - example: "456" + example: "0x343536" description: The transaction responses: 200: @@ -96,7 +96,7 @@ paths: required: true schema: type: string - example: "123" + example: "0x313233" description: The transaction responses: 200: @@ -138,7 +138,7 @@ paths: required: true schema: type: string - example: "785" + example: "0x373835" description: The transaction responses: 200: @@ -253,9 +253,10 @@ paths: https://godoc.org/github.com/tendermint/tendermint/libs/pubsub/query. ```go + import rpchttp "github.com/tendermint/rpc/client/http" import "github.com/tendermint/tendermint/types" - client := client.NewHTTP("tcp:0.0.0.0:26657", "/websocket") + client := rpchttp.New("tcp:0.0.0.0:26657", "/websocket") err := client.Start() if err != nil { handle error @@ -281,7 +282,7 @@ paths: required: true schema: type: string - example: tm.event = 'Tx' AND tx.height = 5 + example: tm.event = 'Tx' AND tx.height = 5 description: | query is a string, which has a form: "condition AND condition ..." (no OR at the moment). condition has a form: "key operation operand". key is a string with @@ -309,7 +310,7 @@ paths: operationId: unsubscribe description: | ```go - client := client.NewHTTP("tcp:0.0.0.0:26657", "/websocket") + client := rpchttp.New("tcp:0.0.0.0:26657", "/websocket") err := client.Start() if err != nil { handle error @@ -327,7 +328,7 @@ paths: required: true schema: type: string - example: tm.event = 'Tx' AND tx.height = 5 + example: tm.event = 'Tx' AND tx.height = 5 description: | query is a string, which has a form: "condition AND condition ..." (no OR at the moment). condition has a form: "key operation operand". key is a string with @@ -497,13 +498,13 @@ paths: description: Minimum block height to return schema: type: number - example: 1 + example: 1 - in: query name: maxHeight description: Maximum block height to return schema: type: number - example: 2 + example: 2 tags: - Info description: | @@ -531,7 +532,7 @@ paths: schema: type: number default: 0 - example: 1 + example: 1 description: height to return. If no height is provided, it will fetch the latest block. tags: - Info @@ -561,7 +562,7 @@ paths: required: true schema: type: string - example: "0xD70952032620CC4E2737EB8AC379806359D8E0B17B0488F627997A0B043ABDED" + example: "0xD70952032620CC4E2737EB8AC379806359D8E0B17B0488F627997A0B043ABDED" tags: - Info description: | @@ -590,7 +591,7 @@ paths: schema: type: number default: 0 - example: 1 + example: 1 tags: - Info description: | @@ -619,7 +620,7 @@ paths: schema: type: number default: 0 - example: 1 + example: 1 tags: - Info description: | @@ -637,10 +638,10 @@ paths: application/json: schema: $ref: "#/components/schemas/ErrorResponse" - /validators: + /voters: get: - summary: Get validator set at a specified height - operationId: validators + summary: Get voter set at a specified height + operationId: voters parameters: - in: query name: height @@ -648,7 +649,7 @@ paths: schema: type: number default: 0 - example: 1 + example: 1 - in: query name: page description: "Page number (1-based)" @@ -656,26 +657,26 @@ paths: schema: type: number default: 0 - example: 1 + example: 1 - in: query name: per_page description: "Number of entries per page (max: 100)" required: false schema: type: number - example: 30 default: 30 + example: 30 tags: - Info description: | - Get Validators. + Get Voters. responses: 200: description: Commit results. content: application/json: schema: - $ref: "#/components/schemas/ValidatorsResponse" + $ref: "#/components/schemas/VotersResponse" 500: description: Error content: @@ -756,7 +757,7 @@ paths: schema: type: number default: 0 - example: 1 + example: 1 tags: - Info description: | @@ -784,7 +785,7 @@ paths: description: Maximum number of unconfirmed transactions to return schema: type: number - example: 1 + example: 1 tags: - Info description: | @@ -834,7 +835,7 @@ paths: required: true schema: type: string - example: "tx.height=1000" + example: "\"tx.height=1000\"" - in: query name: prove description: Include proofs of the transactions inclusion in the block @@ -842,7 +843,7 @@ paths: schema: type: boolean default: false - example: true + example: true - in: query name: page description: "Page number (1-based)" @@ -850,7 +851,7 @@ paths: schema: type: number default: 1 - example: 1 + example: 1 - in: query name: per_page description: "Number of entries per page (max: 100)" @@ -858,7 +859,7 @@ paths: schema: type: number default: 30 - example: 30 + example: 30 - in: query name: order_by description: Order in which transactions are sorted ("asc" or "desc"), by height & index. If empty, default sorting will be still applied. @@ -866,7 +867,7 @@ paths: schema: type: string default: "asc" - example: "asc" + example: "\"asc\"" tags: - Info description: | @@ -895,15 +896,15 @@ paths: required: true schema: type: string - example: "0xD70952032620CC4E2737EB8AC379806359D8E0B17B0488F627997A0B043ABDED" + example: "0xD70952032620CC4E2737EB8AC379806359D8E0B17B0488F627997A0B043ABDED" - in: query name: prove description: Include proofs of the transactions inclusion in the block required: false schema: type: boolean - example: true default: false + example: true tags: - Info description: | @@ -953,30 +954,30 @@ paths: required: true schema: type: string - example: "/a/b/c" + example: "/a/b/c" - in: query name: data description: Data required: true schema: type: string - example: "IHAVENOIDEA" + example: "IHAVENOIDEA" - in: query name: height description: Height (0 means latest) required: false schema: type: number - example: 1 default: 0 + example: 1 - in: query name: prove description: Include proofs of the transactions inclusion in the block required: false schema: type: boolean - example: true default: false + example: true tags: - ABCI description: | @@ -1005,7 +1006,7 @@ paths: required: true schema: type: string - example: "JSON_EVIDENCE_Amino_encoded" + example: "JSON_EVIDENCE_Amino_encoded" tags: - Info description: | @@ -1122,6 +1123,18 @@ components: latest_block_time: type: string example: "2019-08-01T11:52:22.818762194Z" + earliest_block_hash: + type: string + example: "790BA84C3545FCCC49A5C629CEE6EA58A6E875C3862175BDC11EE7AF54703501" + earliest_app_hash: + type: string + example: "C9AEBB441B787D9F1D846DE51F3826F4FD386108B59B08239653ABF59455C3F8" + earliest_block_height: + type: string + example: "1262196" + earliest_block_time: + type: string + example: "2019-08-01T11:52:22.818762194Z" catching_up: type: boolean example: false @@ -1297,8 +1310,8 @@ components: - "last_block_id" - "last_commit_hash" - "data_hash" - - "validators_hash" - - "next_validators_hash" + - "voters_hash" + - "next_voters_hash" - "consensus_hash" - "app_hash" - "last_results_hash" @@ -1334,10 +1347,10 @@ components: data_hash: type: string example: "970886F99E77ED0D60DA8FCE0447C2676E59F2F77302B0C4AA10E1D02F18EF73" - validators_hash: + voters_hash: type: string example: "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0" - next_validators_hash: + next_voters_hash: type: string example: "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0" consensus_hash: @@ -1727,8 +1740,8 @@ components: - "last_block_id" - "last_commit_hash" - "data_hash" - - "validators_hash" - - "next_validators_hash" + - "voters_hash" + - "next_voters_hash" - "consensus_hash" - "app_hash" - "last_results_hash" @@ -1783,10 +1796,10 @@ components: data_hash: type: "string" example: "970886F99E77ED0D60DA8FCE0447C2676E59F2F77302B0C4AA10E1D02F18EF73" - validators_hash: + voters_hash: type: "string" example: "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0" - next_validators_hash: + next_voters_hash: type: "string" example: "D658BFD100CA8025CFD3BECFE86194322731D387286FBD26E059115FD5F2BCA0" consensus_hash: @@ -1862,7 +1875,7 @@ components: type: "boolean" example: true type: "object" - ValidatorsResponse: + VotersResponse: type: object required: - "jsonrpc" @@ -1878,12 +1891,12 @@ components: result: required: - "block_height" - - "validators" + - "voters" properties: block_height: type: "string" example: "55" - validators: + voters: type: "array" items: type: "object" @@ -1909,6 +1922,12 @@ components: proposer_priority: type: "string" example: "13769415" + count: + type: "number" + example: 1 + total: + type: "number" + example: 25 type: "object" GenesisResponse: type: object @@ -1969,11 +1988,15 @@ components: evidence: type: "object" required: - - "max_age" + - "max_age_num_blocks" + - "max_age_duration" properties: - max_age: + max_age_num_blocks: type: "string" example: "100000" + max_age_duration: + type: "string" + example: "172800000000000" validator: type: "object" required: @@ -2043,7 +2066,8 @@ components: - "step" - "start_time" - "commit_time" - - "validators" + - "voters" + - "proposer" - "proposal" - "proposal_block" - "proposal_block_parts" @@ -2056,7 +2080,7 @@ components: - "votes" - "commit_round" - "last_commit" - - "last_validators" + - "last_voters" - "triggered_timeout_precommit" properties: height: @@ -2074,12 +2098,11 @@ components: commit_time: type: "string" example: "2019-08-05T11:28:44.064658805Z" - validators: + voters: required: - - "validators" - - "proposer" + - "voters" properties: - validators: + voters: type: "array" items: type: "object" @@ -2105,35 +2128,35 @@ components: proposer_priority: type: "string" example: "-11896414" - proposer: + type: "object" + proposer: + required: + - "address" + - "pub_key" + - "voting_power" + - "proposer_priority" + properties: + address: + type: "string" + example: "708FDDCE121CDADA502F2B0252FEF13FDAA31E50" + pub_key: required: - - "address" - - "pub_key" - - "voting_power" - - "proposer_priority" + - "type" + - "value" properties: - address: - type: "string" - example: "708FDDCE121CDADA502F2B0252FEF13FDAA31E50" - pub_key: - required: - - "type" - - "value" - properties: - type: - type: "string" - example: "tendermint/PubKeyEd25519" - value: - type: "string" - example: "VNMNfw7mrQBSpEvCtA9ykOe6BoR00RM9b/a9v3vXZhY=" - type: "object" - voting_power: + type: type: "string" - example: "295360" - proposer_priority: + example: "tendermint/PubKeyEd25519" + value: type: "string" - example: "-88886833" + example: "VNMNfw7mrQBSpEvCtA9ykOe6BoR00RM9b/a9v3vXZhY=" type: "object" + voting_power: + type: "string" + example: "295360" + proposer_priority: + type: "string" + example: "-88886833" type: "object" locked_round: type: "string" @@ -2193,12 +2216,11 @@ components: properties: {} type: "object" type: "object" - last_validators: + last_voters: required: - - "validators" - - "proposer" + - "voters" properties: - validators: + voters: type: "array" items: type: "object" @@ -2224,35 +2246,6 @@ components: proposer_priority: type: "string" example: "-12136141" - proposer: - required: - - "address" - - "pub_key" - - "voting_power" - - "proposer_priority" - properties: - address: - type: "string" - example: "B00A6323737F321EB0B8D59C6FD497A14B60938A" - pub_key: - required: - - "type" - - "value" - properties: - type: - type: "string" - example: "tendermint/PubKeyEd25519" - value: - type: "string" - example: "cOQZvh/h9ZioSeUMZB/1Vy1Xo5x2sjrVjlE/qHnYifM=" - type: "object" - voting_power: - type: "string" - example: "8590153" - proposer_priority: - type: "string" - example: "-79515145" - type: "object" type: "object" triggered_timeout_precommit: type: "boolean" @@ -2492,11 +2485,15 @@ components: evidence: type: "object" required: - - "max_age" + - "max_age_num_blocks" + - "max_age_duration" properties: - max_age: + max_age_num_blocks: type: "string" example: "100000" + max_age_duration: + type: "string" + example: "172800000000000" validator: type: "object" required: @@ -2947,6 +2944,9 @@ components: log: type: "string" example: "" + codespace: + type: "string" + example: "ibc" hash: type: "string" example: "0D33F2F03A5234F38706E43004489E061AC40A2E" diff --git a/scripts/json2wal/main.go b/scripts/json2wal/main.go index 1d1f6256c..9f6cdb2b6 100644 --- a/scripts/json2wal/main.go +++ b/scripts/json2wal/main.go @@ -15,6 +15,7 @@ import ( "strings" amino "github.com/tendermint/go-amino" + cs "github.com/tendermint/tendermint/consensus" "github.com/tendermint/tendermint/types" ) diff --git a/scripts/prepare_dredd_test.sh b/scripts/prepare_dredd_test.sh new file mode 100755 index 000000000..3d9e9bf6d --- /dev/null +++ b/scripts/prepare_dredd_test.sh @@ -0,0 +1,17 @@ +#!/usr/bin/env bash + + echo "Install compile tools" + apt-get update + apt-get install -y make wget git + + VERSION=1.14.2 + OS=linux + GOLANG_FULL=go${VERSION}.${OS}-amd64.tar.gz + + echo "Install golang" + wget https://dl.google.com/go/${GOLANG_FULL} + tar -C /usr/local -xzf ${GOLANG_FULL} + export PATH=$PATH:/usr/local/go/bin + + echo "Build contract-tests" + make build-contract-tests-hooks diff --git a/scripts/privValUpgrade_test.go b/scripts/privValUpgrade_test.go index d62d4ceee..287c4fc50 100644 --- a/scripts/privValUpgrade_test.go +++ b/scripts/privValUpgrade_test.go @@ -8,6 +8,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/privval" ) @@ -100,7 +101,9 @@ func TestLoadAndUpgrade(t *testing.T) { assert.Equal(t, oldPV.Address, upgradedPV.Key.Address) assert.Equal(t, oldPV.Address, upgradedPV.GetAddress()) assert.Equal(t, oldPV.PubKey, upgradedPV.Key.PubKey) - assert.Equal(t, oldPV.PubKey, upgradedPV.GetPubKey()) + upv, err := upgradedPV.GetPubKey() + require.NoError(t, err) + assert.Equal(t, oldPV.PubKey, upv) assert.Equal(t, oldPV.PrivKey, upgradedPV.Key.PrivKey) assert.Equal(t, oldPV.LastHeight, upgradedPV.LastSignState.Height) diff --git a/scripts/wal2json/main.go b/scripts/wal2json/main.go index 48195eead..181f40c75 100644 --- a/scripts/wal2json/main.go +++ b/scripts/wal2json/main.go @@ -13,6 +13,7 @@ import ( "os" amino "github.com/tendermint/go-amino" + cs "github.com/tendermint/tendermint/consensus" "github.com/tendermint/tendermint/types" ) diff --git a/state/codec.go b/state/codec.go index abbec6e39..df2c15545 100644 --- a/state/codec.go +++ b/state/codec.go @@ -2,6 +2,7 @@ package state import ( amino "github.com/tendermint/go-amino" + cryptoamino "github.com/tendermint/tendermint/crypto/encoding/amino" ) diff --git a/state/errors.go b/state/errors.go index cd4cd7824..6e0cdfa47 100644 --- a/state/errors.go +++ b/state/errors.go @@ -21,6 +21,11 @@ type ( AppHeight int64 } + ErrAppBlockHeightTooLow struct { + AppHeight int64 + StoreBase int64 + } + ErrLastStateMismatch struct { Height int64 Core []byte @@ -46,12 +51,12 @@ type ( ) func (e ErrUnknownBlock) Error() string { - return fmt.Sprintf("Could not find block #%d", e.Height) + return fmt.Sprintf("could not find block #%d", e.Height) } func (e ErrBlockHashMismatch) Error() string { return fmt.Sprintf( - "App block hash (%X) does not match core block hash (%X) for height %d", + "app block hash (%X) does not match core block hash (%X) for height %d", e.AppHash, e.CoreHash, e.Height, @@ -59,11 +64,16 @@ func (e ErrBlockHashMismatch) Error() string { } func (e ErrAppBlockHeightTooHigh) Error() string { - return fmt.Sprintf("App block height (%d) is higher than core (%d)", e.AppHeight, e.CoreHeight) + return fmt.Sprintf("app block height (%d) is higher than core (%d)", e.AppHeight, e.CoreHeight) } + +func (e ErrAppBlockHeightTooLow) Error() string { + return fmt.Sprintf("app block height (%d) is too far below block store base (%d)", e.AppHeight, e.StoreBase) +} + func (e ErrLastStateMismatch) Error() string { return fmt.Sprintf( - "Latest tendermint block (%d) LastAppHash (%X) does not match app's AppHash (%X)", + "latest tendermint block (%d) LastAppHash (%X) does not match app's AppHash (%X)", e.Height, e.Core, e.App, @@ -72,20 +82,20 @@ func (e ErrLastStateMismatch) Error() string { func (e ErrStateMismatch) Error() string { return fmt.Sprintf( - "State after replay does not match saved state. Got ----\n%v\nExpected ----\n%v\n", + "state after replay does not match saved state. Got ----\n%v\nExpected ----\n%v\n", e.Got, e.Expected, ) } func (e ErrNoValSetForHeight) Error() string { - return fmt.Sprintf("Could not find validator set for height #%d", e.Height) + return fmt.Sprintf("could not find validator set for height #%d", e.Height) } func (e ErrNoConsensusParamsForHeight) Error() string { - return fmt.Sprintf("Could not find consensus params for height #%d", e.Height) + return fmt.Sprintf("could not find consensus params for height #%d", e.Height) } func (e ErrNoABCIResponsesForHeight) Error() string { - return fmt.Sprintf("Could not find results for height #%d", e.Height) + return fmt.Sprintf("could not find results for height #%d", e.Height) } diff --git a/state/execution.go b/state/execution.go index 170beaa7f..df2f13632 100644 --- a/state/execution.go +++ b/state/execution.go @@ -4,6 +4,8 @@ import ( "fmt" "time" + dbm "github.com/tendermint/tm-db" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto/vrf" "github.com/tendermint/tendermint/libs/fail" @@ -11,7 +13,6 @@ import ( mempl "github.com/tendermint/tendermint/mempool" "github.com/tendermint/tendermint/proxy" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) //----------------------------------------------------------------------------- @@ -105,7 +106,7 @@ func (blockExec *BlockExecutor) CreateProposalBlock( evidence := blockExec.evpool.PendingEvidence(maxNumEvidence) // Fetch a limited amount of valid txs - maxDataBytes := types.MaxDataBytes(maxBytes, state.Validators.Size(), len(evidence)) + maxDataBytes := types.MaxDataBytes(maxBytes, state.Voters.Size(), len(evidence)) txs := blockExec.mempool.ReapMaxBytesMaxGas(maxDataBytes, maxGas) return state.MakeBlock(height, txs, commit, evidence, proposerAddr, round, proof) @@ -121,23 +122,27 @@ func (blockExec *BlockExecutor) ValidateBlock(state State, round int, block *typ // ApplyBlock validates the block against the state, executes it against the app, // fires the relevant events, commits the app, and saves the new state and responses. +// It returns the new state and the block height to retain (pruning older blocks). // It's the only function that needs to be called // from outside this package to process and commit an entire block. // It takes a blockID to avoid recomputing the parts hash. -func (blockExec *BlockExecutor) ApplyBlock(state State, blockID types.BlockID, block *types.Block) (State, error) { +func (blockExec *BlockExecutor) ApplyBlock( + state State, blockID types.BlockID, block *types.Block, +) (State, int64, error) { // When doing ApplyBlock, we don't need to check whether the block.Round is same to current round, // so we just put block.Round for the current round parameter if err := blockExec.ValidateBlock(state, block.Round, block); err != nil { - return state, ErrInvalidBlock(err) + return state, 0, ErrInvalidBlock(err) } startTime := time.Now().UnixNano() - abciResponses, err := execBlockOnProxyApp(blockExec.logger, blockExec.proxyApp, block, blockExec.db) + abciResponses, err := execBlockOnProxyApp(blockExec.logger, blockExec.proxyApp, block, blockExec.db, + state.VoterParams) endTime := time.Now().UnixNano() blockExec.metrics.BlockProcessingTime.Observe(float64(endTime-startTime) / 1000000) if err != nil { - return state, ErrProxyAppConn(err) + return state, 0, ErrProxyAppConn(err) } fail.Fail() // XXX @@ -151,11 +156,11 @@ func (blockExec *BlockExecutor) ApplyBlock(state State, blockID types.BlockID, b abciValUpdates := abciResponses.EndBlock.ValidatorUpdates err = validateValidatorUpdates(abciValUpdates, state.ConsensusParams.Validator) if err != nil { - return state, fmt.Errorf("error in validator updates: %v", err) + return state, 0, fmt.Errorf("error in validator updates: %v", err) } validatorUpdates, err := types.PB2TM.ValidatorUpdates(abciValUpdates) if err != nil { - return state, err + return state, 0, err } if len(validatorUpdates) > 0 { blockExec.logger.Info("Updates to validators", "updates", types.ValidatorListString(validatorUpdates)) @@ -164,13 +169,13 @@ func (blockExec *BlockExecutor) ApplyBlock(state State, blockID types.BlockID, b // Update the state with the block and responses. state, err = updateState(state, blockID, &block.Header, abciResponses, validatorUpdates) if err != nil { - return state, fmt.Errorf("commit failed for application: %v", err) + return state, 0, fmt.Errorf("commit failed for application: %v", err) } // Lock mempool, commit app state, update mempoool. - appHash, err := blockExec.Commit(state, block, abciResponses.DeliverTxs) + appHash, retainHeight, err := blockExec.Commit(state, block, abciResponses.DeliverTxs) if err != nil { - return state, fmt.Errorf("commit failed for application: %v", err) + return state, 0, fmt.Errorf("commit failed for application: %v", err) } // Update evpool with the block and state. @@ -188,12 +193,12 @@ func (blockExec *BlockExecutor) ApplyBlock(state State, blockID types.BlockID, b // NOTE: if we crash between Commit and Save, events wont be fired during replay fireEvents(blockExec.logger, blockExec.eventBus, block, abciResponses, validatorUpdates) - return state, nil + return state, retainHeight, nil } // Commit locks the mempool, runs the ABCI Commit message, and updates the // mempool. -// It returns the result of calling abci.Commit (the AppHash), and an error. +// It returns the result of calling abci.Commit (the AppHash) and the height to retain (if any). // The Mempool must be locked during commit and update because state is // typically reset on Commit and old txs must be replayed against committed // state before new txs are run in the mempool, lest they be invalid. @@ -201,7 +206,7 @@ func (blockExec *BlockExecutor) Commit( state State, block *types.Block, deliverTxResponses []*abci.ResponseDeliverTx, -) ([]byte, error) { +) ([]byte, int64, error) { blockExec.mempool.Lock() defer blockExec.mempool.Unlock() @@ -210,7 +215,7 @@ func (blockExec *BlockExecutor) Commit( err := blockExec.mempool.FlushAppConn() if err != nil { blockExec.logger.Error("Client error during mempool.FlushAppConn", "err", err) - return nil, err + return nil, 0, err } // Commit block, get hash back @@ -220,7 +225,7 @@ func (blockExec *BlockExecutor) Commit( "Client error during proxyAppConn.CommitSync", "err", err, ) - return nil, err + return nil, 0, err } // ResponseCommit has no error code - just data @@ -240,7 +245,7 @@ func (blockExec *BlockExecutor) Commit( TxPostCheck(state), ) - return res.Data, err + return res.Data, res.RetainHeight, err } //--------------------------------------------------------- @@ -253,6 +258,7 @@ func execBlockOnProxyApp( proxyAppConn proxy.AppConnConsensus, block *types.Block, stateDB dbm.DB, + voterParams *types.VoterParams, ) (*ABCIResponses, error) { var validTxs, invalidTxs = 0, 0 @@ -278,7 +284,7 @@ func execBlockOnProxyApp( } proxyAppConn.SetResponseCallback(proxyCb) - commitInfo, byzVals := getBeginBlockValidatorInfo(block, stateDB) + commitInfo, byzVals := getBeginBlockValidatorInfo(block, stateDB, voterParams) // Begin block var err error @@ -313,13 +319,14 @@ func execBlockOnProxyApp( return abciResponses, nil } -func getBeginBlockValidatorInfo(block *types.Block, stateDB dbm.DB) (abci.LastCommitInfo, []abci.Evidence) { +func getBeginBlockValidatorInfo(block *types.Block, stateDB dbm.DB, voterParams *types.VoterParams) ( + abci.LastCommitInfo, []abci.Evidence) { voteInfos := make([]abci.VoteInfo, block.LastCommit.Size()) // block.Height=1 -> LastCommitInfo.Votes are empty. // Remember that the first LastCommit is intentionally empty, so it makes // sense for LastCommitInfo.Votes to also be empty. if block.Height > 1 { - lastValSet, err := LoadValidators(stateDB, block.Height-1) + lastVoterSet, err := LoadVoters(stateDB, block.Height-1, voterParams) if err != nil { panic(err) } @@ -327,15 +334,16 @@ func getBeginBlockValidatorInfo(block *types.Block, stateDB dbm.DB) (abci.LastCo // Sanity check that commit size matches validator set size - only applies // after first block. var ( - commitSize = block.LastCommit.Size() - valSetLen = len(lastValSet.Validators) + commitSize = block.LastCommit.Size() + voterSetLen = lastVoterSet.Size() ) - if commitSize != valSetLen { - panic(fmt.Sprintf("commit size (%d) doesn't match valset length (%d) at height %d\n\n%v\n\n%v", - commitSize, valSetLen, block.Height, block.LastCommit.Signatures, lastValSet.Validators)) + + if commitSize != voterSetLen { + panic(fmt.Sprintf("commit size (%d) doesn't match voterset length (%d) at height %d\n\n%v\n\n%v", + commitSize, voterSetLen, block.Height, block.LastCommit.Signatures, lastVoterSet.Voters)) } - for i, val := range lastValSet.Validators { + for i, val := range lastVoterSet.Voters { commitSig := block.LastCommit.Signatures[i] voteInfos[i] = abci.VoteInfo{ Validator: types.TM2PB.Validator(val), @@ -349,11 +357,11 @@ func getBeginBlockValidatorInfo(block *types.Block, stateDB dbm.DB) (abci.LastCo // We need the validator set. We already did this in validateBlock. // TODO: Should we instead cache the valset in the evidence itself and add // `SetValidatorSet()` and `ToABCI` methods ? - valset, err := LoadValidators(stateDB, ev.Height()) + voterSet, err := LoadVoters(stateDB, ev.Height(), voterParams) if err != nil { panic(err) } - byzVals[i] = types.TM2PB.Evidence(ev, valset, block.Time) + byzVals[i] = types.TM2PB.Evidence(ev, voterSet, block.Time) } return abci.LastCommitInfo{ @@ -393,7 +401,7 @@ func updateState( ) (State, error) { // Copy the valset so we can apply changes from EndBlock - // and update s.LastValidators and s.Validators. + // and update s.LastVoters and s.Validators. nValSet := state.NextValidators.Copy() // Update the validator set with the latest abciResponses. @@ -433,18 +441,23 @@ func updateState( return state, fmt.Errorf("error get proof of hash: %v", err) } + nextVoters := types.SelectVoter(nValSet, proofHash, state.VoterParams) + // NOTE: the AppHash has not been populated. // It will be filled on state.Save. return State{ Version: nextVersion, ChainID: state.ChainID, + VoterParams: state.VoterParams, LastBlockHeight: header.Height, LastBlockID: blockID, LastBlockTime: header.Time, LastProofHash: proofHash, NextValidators: nValSet, + NextVoters: nextVoters, Validators: state.NextValidators.Copy(), - LastValidators: state.Validators.Copy(), + Voters: state.NextVoters.Copy(), + LastVoters: state.Voters.Copy(), LastHeightValidatorsChanged: lastHeightValsChanged, ConsensusParams: nextParams, LastHeightConsensusParamsChanged: lastHeightParamsChanged, @@ -500,8 +513,9 @@ func ExecCommitBlock( block *types.Block, logger log.Logger, stateDB dbm.DB, + voterParams *types.VoterParams, ) ([]byte, error) { - _, err := execBlockOnProxyApp(logger, appConnConsensus, block, stateDB) + _, err := execBlockOnProxyApp(logger, appConnConsensus, block, stateDB, voterParams) if err != nil { logger.Error("Error executing block on proxy app", "height", block.Height, "err", err) return nil, err diff --git a/state/execution_test.go b/state/execution_test.go index b9d6ab1bb..8676f08ca 100644 --- a/state/execution_test.go +++ b/state/execution_test.go @@ -7,6 +7,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/abci/example/kvstore" abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto/ed25519" @@ -26,7 +27,9 @@ var ( ) func TestApplyBlock(t *testing.T) { - cc := proxy.NewLocalClientCreator(kvstore.NewApplication()) + app := kvstore.NewApplication() + app.RetainBlocks = 1 + cc := proxy.NewLocalClientCreator(app) proxyApp := proxy.NewAppConns(cc) err := proxyApp.Start() require.Nil(t, err) @@ -40,9 +43,9 @@ func TestApplyBlock(t *testing.T) { block := makeBlockWithPrivVal(state, privVals[state.Validators.Validators[0].Address.String()], 1) blockID := types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()} - //nolint:ineffassign - state, err = blockExec.ApplyBlock(state, blockID, block) + _, retainHeight, err := blockExec.ApplyBlock(state, blockID, block) require.Nil(t, err) + assert.EqualValues(t, retainHeight, 1) // TODO check state and mempool } @@ -88,14 +91,14 @@ func TestBeginBlockValidators(t *testing.T) { for _, tc := range testCases { lastCommit := types.NewCommit(1, 0, prevBlockID, tc.lastCommitSigs) - proposer := types.SelectProposer(state.Validators, state.LastProofHash, 1, 0) + proposer := state.Validators.SelectProposer(state.LastProofHash, 1, 0) message := state.MakeHashMessage(0) proof, _ := privVals[proposer.Address.String()].GenerateVRFProof(message) // block for height 2 block, _ := state.MakeBlock(2, makeTxs(2), lastCommit, nil, proposer.Address, 0, proof) - _, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), stateDB) + _, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), stateDB, state.VoterParams) require.Nil(t, err, tc.desc) // -> app receives a list of validators with a bool indicating if they signed @@ -134,7 +137,7 @@ func TestBeginBlockByzantineValidators(t *testing.T) { ev2 := types.NewMockEvidence(height2, time.Now(), idx2, val2) now := tmtime.Now() - valSet := state.Validators + valSet := state.Voters testCases := []struct { desc string evidence []types.Evidence @@ -161,12 +164,12 @@ func TestBeginBlockByzantineValidators(t *testing.T) { lastCommit := types.NewCommit(9, 0, prevBlockID, commitSigs) for _, tc := range testCases { message := state.MakeHashMessage(0) - proposer := types.SelectProposer(state.Validators, state.LastProofHash, 1, 0) + proposer := state.Validators.SelectProposer(state.LastProofHash, 1, 0) proof, _ := privVals[proposer.Address.String()].GenerateVRFProof(message) block, _ := state.MakeBlock(10, makeTxs(2), lastCommit, nil, proposer.Address, 0, proof) block.Time = now block.Evidence.Evidence = tc.evidence - _, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), stateDB) + _, err = sm.ExecCommitBlock(proxyApp.Consensus(), block, log.TestingLogger(), stateDB, state.VoterParams) require.Nil(t, err, tc.desc) // -> app must receive an index of the byzantine validator @@ -310,7 +313,7 @@ func TestUpdateValidators(t *testing.T) { assert.NoError(t, err) require.Equal(t, tc.resultingSet.Size(), tc.currentSet.Size()) - assert.Equal(t, tc.resultingSet.TotalVotingPower(), tc.currentSet.TotalVotingPower()) + assert.Equal(t, tc.resultingSet.TotalStakingPower(), tc.currentSet.TotalStakingPower()) assert.Equal(t, tc.resultingSet.Validators[0].Address, tc.currentSet.Validators[0].Address) if tc.resultingSet.Size() > 1 { @@ -361,7 +364,7 @@ func TestEndBlockValidatorUpdates(t *testing.T) { {PubKey: types.TM2PB.PubKey(pubkey), Power: 10}, } - state, err = blockExec.ApplyBlock(state, blockID, block) + state, _, err = blockExec.ApplyBlock(state, blockID, block) require.Nil(t, err) // test new validator was added to NextValidators @@ -379,7 +382,7 @@ func TestEndBlockValidatorUpdates(t *testing.T) { require.True(t, ok, "Expected event of type EventDataValidatorSetUpdates, got %T", msg.Data()) if assert.NotEmpty(t, event.ValidatorUpdates) { assert.Equal(t, pubkey, event.ValidatorUpdates[0].PubKey) - assert.EqualValues(t, 10, event.ValidatorUpdates[0].VotingPower) + assert.EqualValues(t, 10, event.ValidatorUpdates[0].StakingPower) } case <-updatesSub.Cancelled(): t.Fatalf("updatesSub was cancelled (reason: %v)", updatesSub.Err()) @@ -415,7 +418,7 @@ func TestEndBlockValidatorUpdatesResultingInEmptySet(t *testing.T) { {PubKey: types.TM2PB.PubKey(state.Validators.Validators[0].PubKey), Power: 0}, } - assert.NotPanics(t, func() { state, err = blockExec.ApplyBlock(state, blockID, block) }) + assert.NotPanics(t, func() { state, _, err = blockExec.ApplyBlock(state, blockID, block) }) assert.NotNil(t, err) assert.NotEmpty(t, state.NextValidators.Validators) diff --git a/state/export_test.go b/state/export_test.go index 1f3990bbd..204dc163e 100644 --- a/state/export_test.go +++ b/state/export_test.go @@ -1,9 +1,10 @@ package state import ( + dbm "github.com/tendermint/tm-db" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) // @@ -45,6 +46,6 @@ func SaveConsensusParamsInfo(db dbm.DB, nextHeight, changeHeight int64, params t // SaveValidatorsInfo is an alias for the private saveValidatorsInfo method in // store.go, exported exclusively and explicitly for testing. -func SaveValidatorsInfo(db dbm.DB, height, lastHeightChanged int64, valSet *types.ValidatorSet) { - saveValidatorsInfo(db, height, lastHeightChanged, valSet) +func SaveValidatorsInfo(db dbm.DB, height, lastHeightChanged int64, proofHash []byte, valSet *types.ValidatorSet) { + saveValidatorsInfo(db, height, lastHeightChanged, proofHash, valSet) } diff --git a/state/helpers_test.go b/state/helpers_test.go index 8bfe803c5..28e96431d 100644 --- a/state/helpers_test.go +++ b/state/helpers_test.go @@ -46,7 +46,7 @@ func makeAndCommitGoodBlock( evidence []types.Evidence) (sm.State, types.BlockID, *types.Commit, error) { // A good block passes state, blockID, err := makeAndApplyGoodBlock(state, - privVals[types.SelectProposer(state.Validators, state.LastProofHash, height, 0).Address.String()], + privVals[state.Validators.SelectProposer(state.LastProofHash, height, 0).Address.String()], height, lastCommit, proposerAddr, blockExec, evidence) if err != nil { return state, types.BlockID{}, nil, err @@ -70,7 +70,7 @@ func makeAndApplyGoodBlock(state sm.State, privVal types.PrivValidator, height i } blockID := types.BlockID{Hash: block.Hash(), PartsHeader: types.PartSetHeader{Total: 3, Hash: tmrand.Bytes(32)}} - state, err := blockExec.ApplyBlock(state, blockID, block) + state, _, err := blockExec.ApplyBlock(state, blockID, block) if err != nil { return state, types.BlockID{}, err } @@ -134,7 +134,7 @@ func makeState(nVals, height int) (sm.State, dbm.DB, map[string]types.PrivValida for i := 1; i < height; i++ { s.LastBlockHeight++ - s.LastValidators = s.Validators.Copy() + s.LastVoters = s.Voters.Copy() sm.SaveState(stateDB, s) } return s, stateDB, privVals @@ -147,12 +147,13 @@ func makeBlock(state sm.State, height int64) *types.Block { func makeBlockWithPrivVal(state sm.State, privVal types.PrivValidator, height int64) *types.Block { message := state.MakeHashMessage(0) proof, _ := privVal.GenerateVRFProof(message) + pubKey, _ := privVal.GetPubKey() block, _ := state.MakeBlock( height, makeTxs(state.LastBlockHeight), new(types.Commit), nil, - privVal.GetPubKey().Address(), + pubKey.Address(), 0, proof, ) @@ -221,7 +222,7 @@ func makeHeaderPartsResponsesValPowerChange( // If the pubkey is new, remove the old and add the new. _, val := state.NextValidators.GetByIndex(0) - if val.VotingPower != power { + if val.StakingPower != power { abciResponses.EndBlock = &abci.ResponseEndBlock{ ValidatorUpdates: []abci.ValidatorUpdate{ types.TM2PB.NewValidatorUpdate(val.PubKey, power), diff --git a/state/services.go b/state/services.go index d83a410c9..a30956bdc 100644 --- a/state/services.go +++ b/state/services.go @@ -14,13 +14,17 @@ import ( // BlockStore defines the interface used by the ConsensusState. type BlockStore interface { + Base() int64 Height() int64 + Size() int64 LoadBlockMeta(height int64) *types.BlockMeta LoadBlock(height int64) *types.Block SaveBlock(block *types.Block, blockParts *types.PartSet, seenCommit *types.Commit) + PruneBlocks(height int64) (uint64, error) + LoadBlockByHash(hash []byte) *types.Block LoadBlockPart(height int64, index int) *types.Part diff --git a/state/state.go b/state/state.go index e1e748419..ebcebae86 100644 --- a/state/state.go +++ b/state/state.go @@ -12,8 +12,8 @@ import ( "github.com/tendermint/tendermint/version" ) -// database keys var ( + // database keys stateKey = []byte("stateKey") ) @@ -53,7 +53,8 @@ type State struct { Version Version // immutable - ChainID string + ChainID string + VoterParams *types.VoterParams // LastBlockHeight=0 at genesis (ie. block(H=0) does not exist) LastBlockHeight int64 @@ -63,7 +64,7 @@ type State struct { // vrf hash from proof LastProofHash []byte - // LastValidators is used to validate block.LastCommit. + // LastVoters is used to validate block.LastCommit. // Validators are persisted to the database separately every time they change, // so we can query for historical validator sets. // Note that if s.LastBlockHeight causes a valset change, @@ -71,7 +72,9 @@ type State struct { // Extra +1 due to nextValSet delay. NextValidators *types.ValidatorSet Validators *types.ValidatorSet - LastValidators *types.ValidatorSet + NextVoters *types.VoterSet + Voters *types.VoterSet + LastVoters *types.VoterSet LastHeightValidatorsChanged int64 // Consensus parameters used for validating blocks. @@ -93,8 +96,9 @@ func (state State) MakeHashMessage(round int) []byte { // Copy makes a copy of the State for mutating. func (state State) Copy() State { return State{ - Version: state.Version, - ChainID: state.ChainID, + Version: state.Version, + ChainID: state.ChainID, + VoterParams: state.VoterParams, LastBlockHeight: state.LastBlockHeight, LastBlockID: state.LastBlockID, @@ -103,8 +107,10 @@ func (state State) Copy() State { LastProofHash: state.LastProofHash, NextValidators: state.NextValidators.Copy(), + NextVoters: state.NextVoters.Copy(), Validators: state.Validators.Copy(), - LastValidators: state.LastValidators.Copy(), + Voters: state.Voters.Copy(), + LastVoters: state.LastVoters.Copy(), LastHeightValidatorsChanged: state.LastHeightValidatorsChanged, ConsensusParams: state.ConsensusParams, @@ -156,14 +162,14 @@ func (state State) MakeBlock( if height == 1 { timestamp = state.LastBlockTime // genesis time } else { - timestamp = MedianTime(commit, state.LastValidators) + timestamp = MedianTime(commit, state.LastVoters) } // Fill rest of header with state data. block.Header.Populate( state.Version.Consensus, state.ChainID, timestamp, state.LastBlockID, - state.Validators.Hash(), state.NextValidators.Hash(), + state.Voters.Hash(), state.NextVoters.Hash(), state.ConsensusParams.Hash(), state.AppHash, state.LastResultsHash, proposerAddress, round, @@ -177,7 +183,7 @@ func (state State) MakeBlock( // corresponding validator set. The computed time is always between timestamps of // the votes sent by honest processes, i.e., a faulty processes can not arbitrarily increase or decrease the // computed value. -func MedianTime(commit *types.Commit, validators *types.ValidatorSet) time.Time { +func MedianTime(commit *types.Commit, voters *types.VoterSet) time.Time { weightedTimes := make([]*tmtime.WeightedTime, len(commit.Signatures)) totalVotingPower := int64(0) @@ -185,11 +191,11 @@ func MedianTime(commit *types.Commit, validators *types.ValidatorSet) time.Time if commitSig.Absent() { continue } - _, validator := validators.GetByAddress(commitSig.ValidatorAddress) + _, validator := voters.GetByAddress(commitSig.ValidatorAddress) // If there's no condition, TestValidateBlockCommit panics; not needed normally. if validator != nil { - totalVotingPower += validator.VotingPower - weightedTimes[i] = tmtime.NewWeightedTime(commitSig.Timestamp, validator.VotingPower) + totalVotingPower += validator.StakingPower + weightedTimes[i] = tmtime.NewWeightedTime(commitSig.Timestamp, validator.StakingPower) } } @@ -245,8 +251,9 @@ func MakeGenesisState(genDoc *types.GenesisDoc) (State, error) { } return State{ - Version: initStateVersion, - ChainID: genDoc.ChainID, + Version: initStateVersion, + ChainID: genDoc.ChainID, + VoterParams: genDoc.VoterParams, LastBlockHeight: 0, LastBlockID: types.BlockID{}, @@ -256,8 +263,10 @@ func MakeGenesisState(genDoc *types.GenesisDoc) (State, error) { LastProofHash: genDoc.Hash(), NextValidators: nextValidatorSet, + NextVoters: types.SelectVoter(nextValidatorSet, genDoc.Hash(), genDoc.VoterParams), Validators: validatorSet, - LastValidators: types.NewValidatorSet(nil), + Voters: types.ToVoterAll(validatorSet.Validators), + LastVoters: &types.VoterSet{}, LastHeightValidatorsChanged: 1, ConsensusParams: *genDoc.ConsensusParams, diff --git a/state/state_test.go b/state/state_test.go index d9ce994b1..b985893d1 100644 --- a/state/state_test.go +++ b/state/state_test.go @@ -11,16 +11,17 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + dbm "github.com/tendermint/tm-db" + abci "github.com/tendermint/tendermint/abci/types" + cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/vrf" "github.com/tendermint/tendermint/libs/kv" "github.com/tendermint/tendermint/libs/rand" tmrand "github.com/tendermint/tendermint/libs/rand" sm "github.com/tendermint/tendermint/state" - dbm "github.com/tendermint/tm-db" - - cfg "github.com/tendermint/tendermint/config" "github.com/tendermint/tendermint/types" ) @@ -184,26 +185,27 @@ func TestValidatorSimpleSaveLoad(t *testing.T) { assert := assert.New(t) // Can't load anything for height 0. - _, err := sm.LoadValidators(stateDB, 0) + _, err := sm.LoadVoters(stateDB, 0, state.VoterParams) assert.IsType(sm.ErrNoValSetForHeight{}, err, "expected err at height 0") // Should be able to load for height 1. - v, err := sm.LoadValidators(stateDB, 1) + v, err := sm.LoadVoters(stateDB, 1, state.VoterParams) assert.Nil(err, "expected no err at height 1") assert.Equal(v.Hash(), state.Validators.Hash(), "expected validator hashes to match") // Should be able to load for height 2. - v, err = sm.LoadValidators(stateDB, 2) + v, err = sm.LoadVoters(stateDB, 2, state.VoterParams) assert.Nil(err, "expected no err at height 2") assert.Equal(v.Hash(), state.NextValidators.Hash(), "expected validator hashes to match") // Increment height, save; should be able to load for next & next next height. state.LastBlockHeight++ nextHeight := state.LastBlockHeight + 1 - sm.SaveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators) - vp0, err := sm.LoadValidators(stateDB, nextHeight+0) + sm.SaveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, + state.LastProofHash, state.NextValidators) + vp0, err := sm.LoadVoters(stateDB, nextHeight+0, state.VoterParams) assert.Nil(err, "expected no err") - vp1, err := sm.LoadValidators(stateDB, nextHeight+1) + vp1, err := sm.LoadVoters(stateDB, nextHeight+1, state.VoterParams) assert.Nil(err, "expected no err") assert.Equal(vp0.Hash(), state.Validators.Hash(), "expected validator hashes to match") assert.Equal(vp1.Hash(), state.NextValidators.Hash(), "expected next validator hashes to match") @@ -223,7 +225,7 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { highestHeight := changeHeights[N-1] + 5 changeIndex := 0 _, val := state.Validators.GetByIndex(0) - power := val.VotingPower + power := val.StakingPower var err error var validatorUpdates []*types.Validator for i := int64(1); i < highestHeight; i++ { @@ -238,13 +240,14 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { state, err = sm.UpdateState(state, blockID, &header, responses, validatorUpdates) require.NoError(t, err) nextHeight := state.LastBlockHeight + 1 - sm.SaveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators) + sm.SaveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, + state.LastProofHash, state.NextValidators) } // On each height change, increment the power by one. testCases := make([]int64, highestHeight) changeIndex = 0 - power = val.VotingPower + power = val.StakingPower for i := int64(1); i < highestHeight+1; i++ { // We get to the height after a change height use the next pubkey (note // our counter starts at 0 this time). @@ -256,12 +259,12 @@ func TestOneValidatorChangesSaveLoad(t *testing.T) { } for i, power := range testCases { - v, err := sm.LoadValidators(stateDB, int64(i+1+1)) // +1 because vset changes delayed by 1 block. + v, err := sm.LoadVoters(stateDB, int64(i+1+1), state.VoterParams) // +1 because vset changes delayed by 1 block. assert.Nil(t, err, fmt.Sprintf("expected no err at height %d", i)) assert.Equal(t, v.Size(), 1, "validator set size is greater than 1: %d", v.Size()) _, val := v.GetByIndex(0) - assert.Equal(t, val.VotingPower, power, fmt.Sprintf(`unexpected powerat + assert.Equal(t, val.StakingPower, power, fmt.Sprintf(`unexpected powerat height %d`, i)) } } @@ -328,7 +331,8 @@ func TestProposerFrequency(t *testing.T) { votePower := int64(tmrand.Int()%maxPower) + 1 totalVotePower += votePower privVal := types.NewMockPV() - pubKey := privVal.GetPubKey() + pubKey, err := privVal.GetPubKey() + require.NoError(t, err) val := types.NewValidator(pubKey, votePower) val.ProposerPriority = tmrand.Int64() vals[j] = val @@ -357,15 +361,16 @@ func genValSetWithPowers(powers []int64) *types.ValidatorSet { // test a proposer appears as frequently as expected func testProposerFreq(t *testing.T, caseNum int, valSet *types.ValidatorSet) { - N := valSet.Size() - totalPower := valSet.TotalVotingPower() + voterSet := types.ToVoterAll(valSet.Validators) + N := voterSet.Size() + totalPower := voterSet.TotalVotingPower() // run the proposer selection and track frequencies runMult := 1 runs := int(totalPower) * runMult freqs := make([]int, N) for i := 0; i < runs; i++ { - prop := types.SelectProposer(valSet, []byte{}, 1, i) + prop := valSet.SelectProposer([]byte{}, 1, i) idx, _ := valSet.GetByAddress(prop.Address) freqs[idx]++ valSet.IncrementProposerPriority(1) @@ -374,7 +379,7 @@ func testProposerFreq(t *testing.T, caseNum int, valSet *types.ValidatorSet) { // assert frequencies match expected (max off by 1) for i, freq := range freqs { _, val := valSet.GetByIndex(i) - expectFreq := int(val.VotingPower) * runMult + expectFreq := int(val.StakingPower) * runMult gotFreq := freq abs := int(math.Abs(float64(expectFreq - gotFreq))) @@ -396,9 +401,9 @@ func testProposerFreq(t *testing.T, caseNum int, valSet *types.ValidatorSet) { func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) { tearDown, _, state := setupTestCase(t) defer tearDown(t) - val1VotingPower := int64(10) + val1StakingPower := int64(10) val1PubKey := ed25519.GenPrivKey().PubKey() - val1 := &types.Validator{Address: val1PubKey.Address(), PubKey: val1PubKey, VotingPower: val1VotingPower} + val1 := &types.Validator{Address: val1PubKey.Address(), PubKey: val1PubKey, StakingPower: val1StakingPower} state.Validators = types.NewValidatorSet([]*types.Validator{val1}) state.NextValidators = state.Validators @@ -415,14 +420,14 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) { require.NoError(t, err) updatedState, err := sm.UpdateState(state, blockID, &block.Header, abciResponses, validatorUpdates) assert.NoError(t, err) - curTotal := val1VotingPower + curTotal := val1StakingPower // one increment step and one validator: 0 + power - total_power == 0 - assert.Equal(t, 0+val1VotingPower-curTotal, updatedState.NextValidators.Validators[0].ProposerPriority) + assert.Equal(t, 0+val1StakingPower-curTotal, updatedState.NextValidators.Validators[0].ProposerPriority) // add a validator val2PubKey := ed25519.GenPrivKey().PubKey() - val2VotingPower := int64(100) - updateAddVal := abci.ValidatorUpdate{PubKey: types.TM2PB.PubKey(val2PubKey), Power: val2VotingPower} + val2StakingPower := int64(100) + updateAddVal := abci.ValidatorUpdate{PubKey: types.TM2PB.PubKey(val2PubKey), Power: val2StakingPower} validatorUpdates, err = types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{updateAddVal}) assert.NoError(t, err) updatedState2, err := sm.UpdateState(updatedState, blockID, &block.Header, abciResponses, validatorUpdates) @@ -437,7 +442,7 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) { // Steps from adding new validator: // 0 - val1 prio is 0, TVP after add: wantVal1Prio := int64(0) - totalPowerAfter := val1VotingPower + val2VotingPower + totalPowerAfter := val1StakingPower + val2StakingPower // 1. Add - Val2 should be initially added with (-123) => wantVal2Prio := -(totalPowerAfter + (totalPowerAfter >> 3)) // 2. Scale - noop @@ -448,22 +453,22 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) { wantVal1Prio -= avg.Int64() // 62 // 4. Steps from IncrementProposerPriority - wantVal1Prio += val1VotingPower // 72 - wantVal2Prio += val2VotingPower // 39 - wantVal1Prio -= totalPowerAfter // -38 as val1 is proposer + wantVal1Prio += val1StakingPower // 72 + wantVal2Prio += val2StakingPower // 39 + wantVal1Prio -= totalPowerAfter // -38 as val1 is proposer assert.Equal(t, wantVal1Prio, updatedVal1.ProposerPriority) assert.Equal(t, wantVal2Prio, addedVal2.ProposerPriority) // Updating a validator does not reset the ProposerPriority to zero: - // 1. Add - Val2 VotingPower change to 1 => + // 1. Add - Val2 StakingPower change to 1 => updatedVotingPowVal2 := int64(1) updateVal := abci.ValidatorUpdate{PubKey: types.TM2PB.PubKey(val2PubKey), Power: updatedVotingPowVal2} validatorUpdates, err = types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{updateVal}) assert.NoError(t, err) // this will cause the diff of priorities (77) - // to be larger than threshold == 2*totalVotingPower (22): + // to be larger than threshold == 2*totalStakingPower (22): updatedState3, err := sm.UpdateState(updatedState2, blockID, &block.Header, abciResponses, validatorUpdates) assert.NoError(t, err) @@ -479,7 +484,7 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) { wantVal2Prio = prevVal2.ProposerPriority // scale to diffMax = 22 = 2 * tvp, diff=39-(-38)=77 // new totalPower - totalPower := updatedVal1.VotingPower + updatedVal2.VotingPower + totalPower := updatedVal1.StakingPower + updatedVal2.StakingPower dist := wantVal2Prio - wantVal1Prio // ratio := (dist + 2*totalPower - 1) / 2*totalPower = 98/22 = 4 ratio := (dist + 2*totalPower - 1) / (2 * totalPower) @@ -491,9 +496,9 @@ func TestProposerPriorityDoesNotGetResetToZero(t *testing.T) { // 4. IncrementProposerPriority() -> // v1(10):-9+10, v2(1):9+1 -> v2 proposer so subsract tvp(11) // v1(10):1, v2(1):-1 - wantVal2Prio += updatedVal2.VotingPower // 10 -> prop - wantVal1Prio += updatedVal1.VotingPower // 1 - wantVal2Prio -= totalPower // -1 + wantVal2Prio += updatedVal2.StakingPower // 10 -> prop + wantVal1Prio += updatedVal1.StakingPower // 1 + wantVal2Prio -= totalPower // -1 assert.Equal(t, wantVal2Prio, updatedVal2.ProposerPriority) assert.Equal(t, wantVal1Prio, updatedVal1.ProposerPriority) @@ -507,16 +512,16 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { // have the same voting power (and the 2nd was added later). tearDown, _, state := setupTestCase(t) defer tearDown(t) - val1VotingPower := int64(10) + val1StakingPower := int64(10) val1PubKey := ed25519.GenPrivKey().PubKey() - val1 := &types.Validator{Address: val1PubKey.Address(), PubKey: val1PubKey, VotingPower: val1VotingPower} + val1 := &types.Validator{Address: val1PubKey.Address(), PubKey: val1PubKey, StakingPower: val1StakingPower} // reset state validators to above validator state.Validators = types.NewValidatorSet([]*types.Validator{val1}) state.NextValidators = state.Validators // we only have one validator: assert.Equal(t, val1PubKey.Address(), - types.SelectProposer(state.Validators, []byte{}, state.LastBlockHeight+1, 0).Address) + state.Validators.SelectProposer([]byte{}, state.LastBlockHeight+1, 0).Address) block := makeBlock(state, state.LastBlockHeight+1) blockID := types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()} @@ -531,14 +536,14 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { assert.NoError(t, err) // 0 + 10 (initial prio) - 10 (avg) - 10 (mostest - total) = -10 - totalPower := val1VotingPower - wantVal1Prio := 0 + val1VotingPower - totalPower + totalPower := val1StakingPower + wantVal1Prio := 0 + val1StakingPower - totalPower assert.Equal(t, wantVal1Prio, updatedState.NextValidators.Validators[0].ProposerPriority) assert.Equal(t, val1PubKey.Address(), updatedState.NextValidators.Validators[0].Address) // add a validator with the same voting power as the first val2PubKey := ed25519.GenPrivKey().PubKey() - updateAddVal := abci.ValidatorUpdate{PubKey: types.TM2PB.PubKey(val2PubKey), Power: val1VotingPower} + updateAddVal := abci.ValidatorUpdate{PubKey: types.TM2PB.PubKey(val2PubKey), Power: val1StakingPower} validatorUpdates, err = types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{updateAddVal}) assert.NoError(t, err) @@ -559,8 +564,8 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { _, updatedVal2 := updatedState2.NextValidators.GetByAddress(val2PubKey.Address()) // 1. Add - val2VotingPower := val1VotingPower - totalPower = val1VotingPower + val2VotingPower // 20 + val2StakingPower := val1StakingPower + totalPower = val1StakingPower + val2StakingPower // 20 v2PrioWhenAddedVal2 := -(totalPower + (totalPower >> 3)) // -22 // 2. Scale - noop // 3. Center @@ -569,9 +574,9 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { expectedVal2Prio := v2PrioWhenAddedVal2 - avg.Int64() // -11 expectedVal1Prio := oldVal1.ProposerPriority - avg.Int64() // 11 // 4. Increment - expectedVal2Prio += val2VotingPower // -11 + 10 = -1 - expectedVal1Prio += val1VotingPower // 11 + 10 == 21 - expectedVal1Prio -= totalPower // 1, val1 proposer + expectedVal2Prio += val2StakingPower // -11 + 10 = -1 + expectedVal1Prio += val1StakingPower // 11 + 10 == 21 + expectedVal1Prio -= totalPower // 1, val1 proposer assert.EqualValues(t, expectedVal1Prio, updatedVal1.ProposerPriority) assert.EqualValues( @@ -599,9 +604,9 @@ func TestProposerPriorityProposerAlternates(t *testing.T) { // check if expected proposer prio is matched: // Increment - expectedVal2Prio2 := expectedVal2Prio + val2VotingPower // -1 + 10 = 9 - expectedVal1Prio2 := expectedVal1Prio + val1VotingPower // 1 + 10 == 11 - expectedVal1Prio2 -= totalPower // -9, val1 proposer + expectedVal2Prio2 := expectedVal2Prio + val2StakingPower // -1 + 10 = 9 + expectedVal1Prio2 := expectedVal1Prio + val1StakingPower // 1 + 10 == 11 + expectedVal1Prio2 -= totalPower // -9, val1 proposer assert.EqualValues( t, @@ -676,13 +681,13 @@ func TestLargeGenesisValidator(t *testing.T) { tearDown, _, state := setupTestCase(t) defer tearDown(t) - genesisVotingPower := types.MaxTotalVotingPower / 1000 + genesisStakingPower := types.MaxTotalStakingPower / 1000 genesisPubKey := ed25519.GenPrivKey().PubKey() // fmt.Println("genesis addr: ", genesisPubKey.Address()) genesisVal := &types.Validator{ - Address: genesisPubKey.Address(), - PubKey: genesisPubKey, - VotingPower: genesisVotingPower, + Address: genesisPubKey.Address(), + PubKey: genesisPubKey, + StakingPower: genesisStakingPower, } // reset state validators to above validator state.Validators = types.NewValidatorSet([]*types.Validator{genesisVal}) @@ -705,12 +710,12 @@ func TestLargeGenesisValidator(t *testing.T) { updatedState, err := sm.UpdateState(oldState, blockID, &block.Header, abciResponses, validatorUpdates) require.NoError(t, err) - // no changes in voting power (ProposerPrio += VotingPower == Voting in 1st round; than shiftByAvg == 0, + // no changes in voting power (ProposerPrio += StakingPower == Voting in 1st round; than shiftByAvg == 0, // than -Total == -Voting) // -> no change in ProposerPrio (stays zero): assert.EqualValues(t, oldState.NextValidators, updatedState.NextValidators) assert.EqualValues(t, 0, - types.SelectProposer(updatedState.NextValidators, []byte{}, block.Height, 0).ProposerPriority) + updatedState.NextValidators.SelectProposer([]byte{}, block.Height, 0).ProposerPriority) oldState = updatedState } @@ -720,8 +725,10 @@ func TestLargeGenesisValidator(t *testing.T) { // see how long it takes until the effect wears off and both begin to alternate // see: https://github.com/tendermint/tendermint/issues/2960 firstAddedValPubKey := ed25519.GenPrivKey().PubKey() - firstAddedValVotingPower := int64(10) - firstAddedVal := abci.ValidatorUpdate{PubKey: types.TM2PB.PubKey(firstAddedValPubKey), Power: firstAddedValVotingPower} + firstAddedValStakingPower := int64(10) + firstAddedVal := abci.ValidatorUpdate{ + PubKey: types.TM2PB.PubKey(firstAddedValPubKey), + Power: firstAddedValStakingPower} validatorUpdates, err := types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{firstAddedVal}) assert.NoError(t, err) abciResponses := &sm.ABCIResponses{ @@ -765,7 +772,7 @@ func TestLargeGenesisValidator(t *testing.T) { for i := 0; i < 10; i++ { addedPubKey := ed25519.GenPrivKey().PubKey() - addedVal := abci.ValidatorUpdate{PubKey: types.TM2PB.PubKey(addedPubKey), Power: firstAddedValVotingPower} + addedVal := abci.ValidatorUpdate{PubKey: types.TM2PB.PubKey(addedPubKey), Power: firstAddedValStakingPower} validatorUpdates, err := types.PB2TM.ValidatorUpdates([]abci.ValidatorUpdate{addedVal}) assert.NoError(t, err) @@ -808,8 +815,8 @@ func TestLargeGenesisValidator(t *testing.T) { blockID = types.BlockID{Hash: block.Hash(), PartsHeader: block.MakePartSet(testPartSize).Header()} curState, err = sm.UpdateState(curState, blockID, &block.Header, abciResponses, validatorUpdates) require.NoError(t, err) - if !bytes.Equal(types.SelectProposer(curState.Validators, []byte{}, int64(count), 0).Address, - types.SelectProposer(curState.NextValidators, []byte{}, int64(count+1), 0).Address) { + if !bytes.Equal(curState.Validators.SelectProposer([]byte{}, int64(count), 0).Address, + curState.NextValidators.SelectProposer([]byte{}, int64(count+1), 0).Address) { isProposerUnchanged = false } count++ @@ -849,9 +856,9 @@ func TestStoreLoadValidatorsIncrementsProposerPriority(t *testing.T) { tearDown, stateDB, state := setupTestCase(t) defer tearDown(t) state.Validators = genValSet(valSetSize) - types.SelectProposer(state.Validators, []byte{}, 1, 0) + state.Validators.SelectProposer([]byte{}, 1, 0) state.NextValidators = state.Validators.Copy() - types.SelectProposer(state.NextValidators, []byte{}, 2, 0) + state.NextValidators.SelectProposer([]byte{}, 2, 0) sm.SaveState(stateDB, state) nextHeight := state.LastBlockHeight + 1 @@ -875,9 +882,9 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) { defer tearDown(t) require.Equal(t, int64(0), state.LastBlockHeight) state.Validators = genValSet(valSetSize) - types.SelectProposer(state.Validators, []byte{}, 1, 0) + state.Validators.SelectProposer([]byte{}, 1, 0) state.NextValidators = state.Validators.Copy() - types.SelectProposer(state.NextValidators, []byte{}, 2, 0) + state.NextValidators.SelectProposer([]byte{}, 2, 0) sm.SaveState(stateDB, state) _, valOld := state.Validators.GetByIndex(0) @@ -895,7 +902,8 @@ func TestManyValidatorChangesSaveLoad(t *testing.T) { state, err = sm.UpdateState(state, blockID, &header, responses, validatorUpdates) require.Nil(t, err) nextHeight := state.LastBlockHeight + 1 - sm.SaveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators) + sm.SaveValidatorsInfo(stateDB, nextHeight+1, state.LastHeightValidatorsChanged, + state.LastProofHash, state.NextValidators) // Load nextheight, it should be the oldpubkey. v0, err := sm.LoadValidators(stateDB, nextHeight) diff --git a/state/store.go b/state/store.go index e49e289f0..f9f75207a 100644 --- a/state/store.go +++ b/state/store.go @@ -3,11 +3,12 @@ package state import ( "fmt" + dbm "github.com/tendermint/tm-db" + abci "github.com/tendermint/tendermint/abci/types" tmmath "github.com/tendermint/tendermint/libs/math" tmos "github.com/tendermint/tendermint/libs/os" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) const ( @@ -104,10 +105,10 @@ func saveState(db dbm.DB, state State, key []byte) { // This extra logic due to Tendermint validator set changes being delayed 1 block. // It may get overwritten due to InitChain validator updates. lastHeightVoteChanged := int64(1) - saveValidatorsInfo(db, nextHeight, lastHeightVoteChanged, state.Validators) + saveValidatorsInfo(db, nextHeight, lastHeightVoteChanged, []byte{}, state.Validators) } // Save next validators. - saveValidatorsInfo(db, nextHeight+1, state.LastHeightValidatorsChanged, state.NextValidators) + saveValidatorsInfo(db, nextHeight+1, state.LastHeightValidatorsChanged, state.LastProofHash, state.NextValidators) // Save next consensus params. saveConsensusParamsInfo(db, nextHeight, state.LastHeightConsensusParamsChanged, state.ConsensusParams) db.SetSync(key, state.Bytes()) @@ -124,6 +125,102 @@ type ABCIResponses struct { BeginBlock *abci.ResponseBeginBlock `json:"begin_block"` } +// PruneStates deletes states between the given heights (including from, excluding to). It is not +// guaranteed to delete all states, since the last checkpointed state and states being pointed to by +// e.g. `LastHeightChanged` must remain. The state at to must also exist. +// +// The from parameter is necessary since we can't do a key scan in a performant way due to the key +// encoding not preserving ordering: https://github.com/tendermint/tendermint/issues/4567 +// This will cause some old states to be left behind when doing incremental partial prunes, +// specifically older checkpoints and LastHeightChanged targets. +func PruneStates(db dbm.DB, from int64, to int64) error { + if from <= 0 || to <= 0 { + return fmt.Errorf("from height %v and to height %v must be greater than 0", from, to) + } + if from >= to { + return fmt.Errorf("from height %v must be lower than to height %v", from, to) + } + valInfo := loadValidatorsInfo(db, calcValidatorsKey(to)) + if valInfo == nil { + return fmt.Errorf("validators at height %v not found", to) + } + paramsInfo := loadConsensusParamsInfo(db, to) + if paramsInfo == nil { + return fmt.Errorf("consensus params at height %v not found", to) + } + + keepVals := make(map[int64]bool) + if valInfo.ValidatorSet == nil { + keepVals[valInfo.LastHeightChanged] = true + keepVals[lastStoredHeightFor(to, valInfo.LastHeightChanged)] = true // keep last checkpoint too + } + keepParams := make(map[int64]bool) + if paramsInfo.ConsensusParams.Equals(&types.ConsensusParams{}) { + keepParams[paramsInfo.LastHeightChanged] = true + } + + batch := db.NewBatch() + defer batch.Close() + pruned := uint64(0) + var err error + + // We have to delete in reverse order, to avoid deleting previous heights that have validator + // sets and consensus params that we may need to retrieve. + for h := to - 1; h >= from; h-- { + // For heights we keep, we must make sure they have the full validator set or consensus + // params, otherwise they will panic if they're retrieved directly (instead of + // indirectly via a LastHeightChanged pointer). + if keepVals[h] { + v := loadValidatorsInfo(db, calcValidatorsKey(h)) + if v.ValidatorSet == nil { + v.ValidatorSet, err = LoadValidators(db, h) + if err != nil { + return err + } + v.LastHeightChanged = h + batch.Set(calcValidatorsKey(h), v.Bytes()) + } + } else { + batch.Delete(calcValidatorsKey(h)) + } + + if keepParams[h] { + p := loadConsensusParamsInfo(db, h) + if p.ConsensusParams.Equals(&types.ConsensusParams{}) { + p.ConsensusParams, err = LoadConsensusParams(db, h) + if err != nil { + return err + } + p.LastHeightChanged = h + batch.Set(calcConsensusParamsKey(h), p.Bytes()) + } + } else { + batch.Delete(calcConsensusParamsKey(h)) + } + + batch.Delete(calcABCIResponsesKey(h)) + pruned++ + + // avoid batches growing too large by flushing to database regularly + if pruned%1000 == 0 && pruned > 0 { + err := batch.Write() + if err != nil { + return err + } + batch.Close() + batch = db.NewBatch() + defer batch.Close() + } + } + + err = batch.WriteSync() + if err != nil { + return err + } + + return nil +} + // NewABCIResponses returns a new ABCIResponses func NewABCIResponses(block *types.Block) *ABCIResponses { resDeliverTxs := make([]*abci.ResponseDeliverTx, len(block.Data.Txs)) @@ -186,6 +283,7 @@ func SaveABCIResponses(db dbm.DB, height int64, abciResponses *ABCIResponses) { type ValidatorsInfo struct { ValidatorSet *types.ValidatorSet LastHeightChanged int64 + ProofHash []byte } // Bytes serializes the ValidatorsInfo using go-amino. @@ -196,13 +294,13 @@ func (valInfo *ValidatorsInfo) Bytes() []byte { // LoadValidators loads the ValidatorSet for a given height. // Returns ErrNoValSetForHeight if the validator set can't be found for this height. func LoadValidators(db dbm.DB, height int64) (*types.ValidatorSet, error) { - valInfo := loadValidatorsInfo(db, height) + valInfo := loadValidatorsInfo(db, calcValidatorsKey(height)) if valInfo == nil { return nil, ErrNoValSetForHeight{height} } if valInfo.ValidatorSet == nil { lastStoredHeight := lastStoredHeightFor(height, valInfo.LastHeightChanged) - valInfo2 := loadValidatorsInfo(db, lastStoredHeight) + valInfo2 := loadValidatorsInfo(db, calcValidatorsKey(lastStoredHeight)) if valInfo2 == nil || valInfo2.ValidatorSet == nil { panic( fmt.Sprintf("Couldn't find validators at height %d (height %d was originally requested)", @@ -218,14 +316,41 @@ func LoadValidators(db dbm.DB, height int64) (*types.ValidatorSet, error) { return valInfo.ValidatorSet, nil } +// LoadVoters loads the VoterSet for a given height. +// Returns ErrNoValSetForHeight if the validator set can't be found for this height. +func LoadVoters(db dbm.DB, height int64, voterParams *types.VoterParams) (*types.VoterSet, error) { + valInfo := loadValidatorsInfo(db, calcValidatorsKey(height)) + if valInfo == nil { + return nil, ErrNoValSetForHeight{height} + } + if valInfo.ValidatorSet == nil { + proofHash := valInfo.ProofHash // store proof hash of the height + lastStoredHeight := lastStoredHeightFor(height, valInfo.LastHeightChanged) + valInfo2 := loadValidatorsInfo(db, calcValidatorsKey(lastStoredHeight)) + if valInfo2 == nil || valInfo2.ValidatorSet == nil { + panic( + fmt.Sprintf("Couldn't find validators at height %d (height %d was originally requested)", + lastStoredHeight, + height, + ), + ) + } + valInfo2.ValidatorSet.IncrementProposerPriority(int(height - lastStoredHeight)) // mutate + valInfo = valInfo2 + valInfo.ProofHash = proofHash // reload proof again + } + + return types.SelectVoter(valInfo.ValidatorSet, valInfo.ProofHash, voterParams), nil +} + func lastStoredHeightFor(height, lastHeightChanged int64) int64 { checkpointHeight := height - height%valSetCheckpointInterval return tmmath.MaxInt64(checkpointHeight, lastHeightChanged) } // CONTRACT: Returned ValidatorsInfo can be mutated. -func loadValidatorsInfo(db dbm.DB, height int64) *ValidatorsInfo { - buf, err := db.Get(calcValidatorsKey(height)) +func loadValidatorsInfo(db dbm.DB, valKey []byte) *ValidatorsInfo { + buf, err := db.Get(valKey) if err != nil { panic(err) } @@ -250,12 +375,13 @@ func loadValidatorsInfo(db dbm.DB, height int64) *ValidatorsInfo { // `height` is the effective height for which the validator is responsible for // signing. It should be called from s.Save(), right before the state itself is // persisted. -func saveValidatorsInfo(db dbm.DB, height, lastHeightChanged int64, valSet *types.ValidatorSet) { +func saveValidatorsInfo(db dbm.DB, height, lastHeightChanged int64, proofHash []byte, valSet *types.ValidatorSet) { if lastHeightChanged > height { panic("LastHeightChanged cannot be greater than ValidatorsInfo height") } valInfo := &ValidatorsInfo{ LastHeightChanged: lastHeightChanged, + ProofHash: proofHash, } // Only persist validator set if it was updated or checkpoint height (see // valSetCheckpointInterval) is reached. diff --git a/state/store_test.go b/state/store_test.go index 596f479ed..eef21c068 100644 --- a/state/store_test.go +++ b/state/store_test.go @@ -8,10 +8,11 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" + cfg "github.com/tendermint/tendermint/config" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) func TestStoreLoadValidators(t *testing.T) { @@ -20,15 +21,15 @@ func TestStoreLoadValidators(t *testing.T) { vals := types.NewValidatorSet([]*types.Validator{val}) // 1) LoadValidators loads validators using a height where they were last changed - sm.SaveValidatorsInfo(stateDB, 1, 1, vals) - sm.SaveValidatorsInfo(stateDB, 2, 1, vals) + sm.SaveValidatorsInfo(stateDB, 1, 1, []byte{}, vals) + sm.SaveValidatorsInfo(stateDB, 2, 1, []byte{}, vals) loadedVals, err := sm.LoadValidators(stateDB, 2) require.NoError(t, err) assert.NotZero(t, loadedVals.Size()) // 2) LoadValidators loads validators using a checkpoint height - sm.SaveValidatorsInfo(stateDB, sm.ValSetCheckpointInterval, 1, vals) + sm.SaveValidatorsInfo(stateDB, sm.ValSetCheckpointInterval, 1, []byte{}, vals) loadedVals, err = sm.LoadValidators(stateDB, sm.ValSetCheckpointInterval) require.NoError(t, err) @@ -47,14 +48,14 @@ func BenchmarkLoadValidators(b *testing.B) { b.Fatal(err) } state.Validators = genValSet(valSetSize) - types.SelectProposer(state.Validators, []byte{}, 1, 0) + state.Validators.SelectProposer([]byte{}, 1, 0) state.NextValidators = state.Validators.Copy() - types.SelectProposer(state.NextValidators, []byte{}, 2, 0) + state.Validators.SelectProposer([]byte{}, 2, 0) sm.SaveState(stateDB, state) for i := 10; i < 10000000000; i *= 10 { // 10, 100, 1000, ... i := i - sm.SaveValidatorsInfo(stateDB, int64(i), state.LastHeightValidatorsChanged, state.NextValidators) + sm.SaveValidatorsInfo(stateDB, int64(i), state.LastHeightValidatorsChanged, []byte{}, state.NextValidators) b.Run(fmt.Sprintf("height=%d", i), func(b *testing.B) { for n := 0; n < b.N; n++ { @@ -66,3 +67,119 @@ func BenchmarkLoadValidators(b *testing.B) { }) } } + +func TestPruneStates(t *testing.T) { + testcases := map[string]struct { + makeHeights int64 + pruneFrom int64 + pruneTo int64 + expectErr bool + expectVals []int64 + expectParams []int64 + expectABCI []int64 + }{ + "error on pruning from 0": {100, 0, 5, true, nil, nil, nil}, + "error when from > to": {100, 3, 2, true, nil, nil, nil}, + "error when from == to": {100, 3, 3, true, nil, nil, nil}, + "error when to does not exist": {100, 1, 101, true, nil, nil, nil}, + "prune all": {100, 1, 100, false, []int64{93, 100}, []int64{95, 100}, []int64{100}}, + "prune some": {10, 2, 8, false, []int64{1, 3, 8, 9, 10}, + []int64{1, 5, 8, 9, 10}, []int64{1, 8, 9, 10}}, + "prune across checkpoint": {100001, 1, 100001, false, []int64{99993, 100000, 100001}, + []int64{99995, 100001}, []int64{100001}}, + } + for name, tc := range testcases { + tc := tc + t.Run(name, func(t *testing.T) { + db := dbm.NewMemDB() + + // Generate a bunch of state data. Validators change for heights ending with 3, and + // parameters when ending with 5. + validator := &types.Validator{Address: []byte{1, 2, 3}, VotingPower: 100} + validatorSet := &types.ValidatorSet{ + Validators: []*types.Validator{validator}, + } + valsChanged := int64(0) + paramsChanged := int64(0) + + for h := int64(1); h <= tc.makeHeights; h++ { + if valsChanged == 0 || h%10 == 2 { + valsChanged = h + 1 // Have to add 1, since NextValidators is what's stored + } + if paramsChanged == 0 || h%10 == 5 { + paramsChanged = h + } + + sm.SaveState(db, sm.State{ + LastBlockHeight: h - 1, + Validators: validatorSet, + NextValidators: validatorSet, + ConsensusParams: types.ConsensusParams{ + Block: types.BlockParams{MaxBytes: 10e6}, + }, + LastHeightValidatorsChanged: valsChanged, + LastHeightConsensusParamsChanged: paramsChanged, + }) + sm.SaveABCIResponses(db, h, sm.NewABCIResponses(&types.Block{ + Header: types.Header{Height: h}, + Data: types.Data{ + Txs: types.Txs{ + []byte{1}, + []byte{2}, + []byte{3}, + }, + }, + })) + } + + // Test assertions + err := sm.PruneStates(db, tc.pruneFrom, tc.pruneTo) + if tc.expectErr { + require.Error(t, err) + return + } + require.NoError(t, err) + + expectVals := sliceToMap(tc.expectVals) + expectParams := sliceToMap(tc.expectParams) + expectABCI := sliceToMap(tc.expectABCI) + + for h := int64(1); h <= tc.makeHeights; h++ { + vals, err := sm.LoadValidators(db, h) + if expectVals[h] { + require.NoError(t, err, "validators height %v", h) + require.NotNil(t, vals) + } else { + require.Error(t, err, "validators height %v", h) + require.Equal(t, sm.ErrNoValSetForHeight{Height: h}, err) + } + + params, err := sm.LoadConsensusParams(db, h) + if expectParams[h] { + require.NoError(t, err, "params height %v", h) + require.False(t, params.Equals(&types.ConsensusParams{})) + } else { + require.Error(t, err, "params height %v", h) + require.Equal(t, sm.ErrNoConsensusParamsForHeight{Height: h}, err) + } + + abci, err := sm.LoadABCIResponses(db, h) + if expectABCI[h] { + require.NoError(t, err, "abci height %v", h) + require.NotNil(t, abci) + } else { + require.Error(t, err, "abci height %v", h) + require.Equal(t, sm.ErrNoABCIResponsesForHeight{Height: h}, err) + } + } + }) + } +} + +func sliceToMap(s []int64) map[int64]bool { + m := make(map[int64]bool, len(s)) + for _, i := range s { + m[i] = true + } + return m +} diff --git a/state/tx_filter_test.go b/state/tx_filter_test.go index 9e666265c..2dac856bd 100644 --- a/state/tx_filter_test.go +++ b/state/tx_filter_test.go @@ -7,10 +7,11 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + dbm "github.com/tendermint/tm-db" + tmrand "github.com/tendermint/tendermint/libs/rand" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) func TestTxFilter(t *testing.T) { diff --git a/state/txindex/indexer_service_test.go b/state/txindex/indexer_service_test.go index 6df7c984a..23968dbca 100644 --- a/state/txindex/indexer_service_test.go +++ b/state/txindex/indexer_service_test.go @@ -7,12 +7,13 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + db "github.com/tendermint/tm-db" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/log" "github.com/tendermint/tendermint/state/txindex" "github.com/tendermint/tendermint/state/txindex/kv" "github.com/tendermint/tendermint/types" - db "github.com/tendermint/tm-db" ) func TestIndexerServiceIndexesBlocks(t *testing.T) { diff --git a/state/txindex/kv/kv_bench_test.go b/state/txindex/kv/kv_bench_test.go index 34d770040..31267f54c 100644 --- a/state/txindex/kv/kv_bench_test.go +++ b/state/txindex/kv/kv_bench_test.go @@ -7,11 +7,12 @@ import ( "io/ioutil" "testing" + dbm "github.com/tendermint/tm-db" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/libs/kv" "github.com/tendermint/tendermint/libs/pubsub/query" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) func BenchmarkTxSearch(b *testing.B) { diff --git a/state/validation.go b/state/validation.go index a2760d532..5b91f8885 100644 --- a/state/validation.go +++ b/state/validation.go @@ -5,11 +5,12 @@ import ( "errors" "fmt" + dbm "github.com/tendermint/tm-db" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/vrf" "github.com/tendermint/tendermint/types" - dbm "github.com/tendermint/tm-db" ) //----------------------------------------------------- @@ -68,16 +69,16 @@ func validateBlock(evidencePool EvidencePool, stateDB dbm.DB, state State, round block.LastResultsHash, ) } - if !bytes.Equal(block.ValidatorsHash, state.Validators.Hash()) { - return fmt.Errorf("wrong Block.Header.ValidatorsHash. Expected %X, got %v", - state.Validators.Hash(), - block.ValidatorsHash, + if !bytes.Equal(block.VotersHash, state.Voters.Hash()) { + return fmt.Errorf("wrong Block.Header.VotersHash. Expected %X, got %v", + state.Voters.Hash(), + block.VotersHash, ) } - if !bytes.Equal(block.NextValidatorsHash, state.NextValidators.Hash()) { - return fmt.Errorf("wrong Block.Header.NextValidatorsHash. Expected %X, got %v", - state.NextValidators.Hash(), - block.NextValidatorsHash, + if !bytes.Equal(block.NextVotersHash, state.NextVoters.Hash()) { + return fmt.Errorf("wrong Block.Header.NextVotersHash. Expected %X, got %v", + state.NextVoters.Hash(), + block.NextVotersHash, ) } @@ -87,10 +88,10 @@ func validateBlock(evidencePool EvidencePool, stateDB dbm.DB, state State, round return errors.New("block at height 1 can't have LastCommit signatures") } } else { - if len(block.LastCommit.Signatures) != state.LastValidators.Size() { - return types.NewErrInvalidCommitSignatures(state.LastValidators.Size(), len(block.LastCommit.Signatures)) + if len(block.LastCommit.Signatures) != state.LastVoters.Size() { + return types.NewErrInvalidCommitSignatures(state.LastVoters.Size(), len(block.LastCommit.Signatures)) } - err := state.LastValidators.VerifyCommit( + err := state.LastVoters.VerifyCommit( state.ChainID, state.LastBlockID, block.Height-1, block.LastCommit) if err != nil { return err @@ -106,7 +107,7 @@ func validateBlock(evidencePool EvidencePool, stateDB dbm.DB, state State, round ) } - medianTime := MedianTime(block.LastCommit, state.LastValidators) + medianTime := MedianTime(block.LastCommit, state.LastVoters) if !block.Time.Equal(medianTime) { return fmt.Errorf("invalid block time. Expected %v, got %v", medianTime, @@ -153,10 +154,10 @@ func validateBlock(evidencePool EvidencePool, stateDB dbm.DB, state State, round // validate proposer if !bytes.Equal(block.ProposerAddress.Bytes(), - types.SelectProposer(state.Validators, state.LastProofHash, block.Height, block.Round).Address.Bytes()) { + state.Validators.SelectProposer(state.LastProofHash, block.Height, block.Round).Address.Bytes()) { return fmt.Errorf("block.ProposerAddress, %X, is not the proposer %X", block.ProposerAddress, - types.SelectProposer(state.Validators, state.LastProofHash, block.Height, block.Round).Address, + state.Validators.SelectProposer(state.LastProofHash, block.Height, block.Round).Address, ) } @@ -192,37 +193,38 @@ func VerifyEvidence(stateDB dbm.DB, state State, evidence types.Evidence) error var ( height = state.LastBlockHeight evidenceParams = state.ConsensusParams.Evidence - ) - ageNumBlocks := height - evidence.Height() - if ageNumBlocks > evidenceParams.MaxAgeNumBlocks { - return fmt.Errorf("evidence from height %d is too old. Min height is %d", - evidence.Height(), height-evidenceParams.MaxAgeNumBlocks) - } + ageDuration = state.LastBlockTime.Sub(evidence.Time()) + ageNumBlocks = height - evidence.Height() + ) - ageDuration := state.LastBlockTime.Sub(evidence.Time()) - if ageDuration > evidenceParams.MaxAgeDuration { - return fmt.Errorf("evidence created at %v has expired. Evidence can not be older than: %v", - evidence.Time(), state.LastBlockTime.Add(evidenceParams.MaxAgeDuration)) + if ageDuration > evidenceParams.MaxAgeDuration && ageNumBlocks > evidenceParams.MaxAgeNumBlocks { + return fmt.Errorf( + "evidence from height %d (created at: %v) is too old; min height is %d and evidence can not be older than %v", + evidence.Height(), + evidence.Time(), + height-evidenceParams.MaxAgeNumBlocks, + state.LastBlockTime.Add(evidenceParams.MaxAgeDuration), + ) } - valset, err := LoadValidators(stateDB, evidence.Height()) + voterSet, err := LoadVoters(stateDB, evidence.Height(), state.VoterParams) if err != nil { // TODO: if err is just that we cant find it cuz we pruned, ignore. // TODO: if its actually bad evidence, punish peer return err } - // The address must have been an active validator at the height. - // NOTE: we will ignore evidence from H if the key was not a validator + // The address must have been an active voter at the height. + // NOTE: we will ignore evidence from H if the key was not a voter // at H, even if it is a validator at some nearby H' // XXX: this makes lite-client bisection as is unsafe // See https://github.com/tendermint/tendermint/issues/3244 ev := evidence height, addr := ev.Height(), ev.Address() - _, val := valset.GetByAddress(addr) + _, val := voterSet.GetByAddress(addr) if val == nil { - return fmt.Errorf("address %X was not a validator at height %d", addr, height) + return fmt.Errorf("address %X was not a voter at height %d", addr, height) } if err := evidence.Verify(state.ChainID, val.PubKey); err != nil { diff --git a/state/validation_test.go b/state/validation_test.go index eae63943d..31c75682e 100644 --- a/state/validation_test.go +++ b/state/validation_test.go @@ -5,11 +5,11 @@ import ( "time" "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/mock" "github.com/tendermint/tendermint/crypto/ed25519" "github.com/tendermint/tendermint/crypto/tmhash" "github.com/tendermint/tendermint/libs/log" + "github.com/tendermint/tendermint/mock" sm "github.com/tendermint/tendermint/state" "github.com/tendermint/tendermint/types" tmtime "github.com/tendermint/tendermint/types/time" @@ -54,8 +54,8 @@ func TestValidateBlockHeader(t *testing.T) { {"LastCommitHash wrong", func(block *types.Block) { block.LastCommitHash = wrongHash }}, {"DataHash wrong", func(block *types.Block) { block.DataHash = wrongHash }}, - {"ValidatorsHash wrong", func(block *types.Block) { block.ValidatorsHash = wrongHash }}, - {"NextValidatorsHash wrong", func(block *types.Block) { block.NextValidatorsHash = wrongHash }}, + {"VotersHash wrong", func(block *types.Block) { block.VotersHash = wrongHash }}, + {"NextVotersHash wrong", func(block *types.Block) { block.NextVotersHash = wrongHash }}, {"ConsensusHash wrong", func(block *types.Block) { block.ConsensusHash = wrongHash }}, {"AppHash wrong", func(block *types.Block) { block.AppHash = wrongHash }}, {"LastResultsHash wrong", func(block *types.Block) { block.LastResultsHash = wrongHash }}, @@ -67,7 +67,7 @@ func TestValidateBlockHeader(t *testing.T) { // Build up state for multiple heights for height := int64(1); height < validationTestsStopHeight; height++ { - proposerAddr := types.SelectProposer(state.Validators, state.LastProofHash, height, 0).Address + proposerAddr := state.Validators.SelectProposer(state.LastProofHash, height, 0).Address /* Invalid blocks don't pass */ @@ -107,10 +107,10 @@ func TestValidateBlockCommit(t *testing.T) { badPrivVal := types.NewMockPV() for height := int64(1); height < validationTestsStopHeight; height++ { - proposerAddr := types.SelectProposer(state.Validators, []byte{}, height, 0).Address + proposerAddr := state.Validators.SelectProposer([]byte{}, height, 0).Address if height > 1 { /* - #2589: ensure state.LastValidators.VerifyCommit fails here + #2589: ensure state.LastVoters.VerifyCommit fails here */ // should be height-1 instead of height wrongHeightVote, err := types.MakeVote( @@ -136,7 +136,7 @@ func TestValidateBlockCommit(t *testing.T) { require.True(t, isErrInvalidCommitHeight, "expected ErrInvalidCommitHeight at height %d but got: %v", height, err) /* - #2589: test len(block.LastCommit.Signatures) == state.LastValidators.Size() + #2589: test len(block.LastCommit.Signatures) == state.LastVoters.Size() */ block, _ = state.MakeBlock(height, makeTxs(height), wrongSigsCommit, nil, proposerAddr, 0, proof) err = blockExec.ValidateBlock(state, 0, block) @@ -175,8 +175,12 @@ func TestValidateBlockCommit(t *testing.T) { time.Now(), ) require.NoError(t, err, "height %d", height) + + bpvPubKey, err := badPrivVal.GetPubKey() + require.NoError(t, err) + badVote := &types.Vote{ - ValidatorAddress: badPrivVal.GetPubKey().Address(), + ValidatorAddress: bpvPubKey.Address(), ValidatorIndex: 0, Height: height, Round: 0, @@ -210,7 +214,7 @@ func TestValidateBlockEvidence(t *testing.T) { lastCommit := types.NewCommit(0, 0, types.BlockID{}, nil) for height := int64(1); height < validationTestsStopHeight; height++ { - proposerAddr := types.SelectProposer(state.Validators, state.LastProofHash, height, 0).Address + proposerAddr := state.Validators.SelectProposer(state.LastProofHash, height, 0).Address proposerIdx, _ := state.Validators.GetByAddress(proposerAddr) goodEvidence := types.NewMockEvidence(height, time.Now(), proposerIdx, proposerAddr) if height > 1 { diff --git a/store/codec.go b/store/codec.go index 4895e8994..29a59948d 100644 --- a/store/codec.go +++ b/store/codec.go @@ -2,6 +2,7 @@ package store import ( amino "github.com/tendermint/go-amino" + "github.com/tendermint/tendermint/types" ) diff --git a/store/store.go b/store/store.go index 2f9ba93fd..cf66daa3b 100644 --- a/store/store.go +++ b/store/store.go @@ -24,6 +24,8 @@ Currently the precommit signatures are duplicated in the Block parts as well as the Commit. In the future this may change, perhaps by moving the Commit data outside the Block. (TODO) +The store can be assumed to contain all contiguous blocks between base and height (inclusive). + // NOTE: BlockStore methods will panic if they encounter errors // deserializing loaded data, indicating probable corruption on disk. */ @@ -31,6 +33,7 @@ type BlockStore struct { db dbm.DB mtx sync.RWMutex + base int64 height int64 } @@ -39,18 +42,36 @@ type BlockStore struct { func NewBlockStore(db dbm.DB) *BlockStore { bsjson := LoadBlockStoreStateJSON(db) return &BlockStore{ + base: bsjson.Base, height: bsjson.Height, db: db, } } -// Height returns the last known contiguous block height. +// Base returns the first known contiguous block height, or 0 for empty block stores. +func (bs *BlockStore) Base() int64 { + bs.mtx.RLock() + defer bs.mtx.RUnlock() + return bs.base +} + +// Height returns the last known contiguous block height, or 0 for empty block stores. func (bs *BlockStore) Height() int64 { bs.mtx.RLock() defer bs.mtx.RUnlock() return bs.height } +// Size returns the number of blocks in the block store. +func (bs *BlockStore) Size() int64 { + bs.mtx.RLock() + defer bs.mtx.RUnlock() + if bs.height == 0 { + return 0 + } + return bs.height - bs.base + 1 +} + // LoadBlock returns the block with the given height. // If no block is found for that height, it returns nil. func (bs *BlockStore) LoadBlock(height int64) *types.Block { @@ -171,6 +192,74 @@ func (bs *BlockStore) LoadSeenCommit(height int64) *types.Commit { return commit } +// PruneBlocks removes block up to (but not including) a height. It returns number of blocks pruned. +func (bs *BlockStore) PruneBlocks(height int64) (uint64, error) { + if height <= 0 { + return 0, fmt.Errorf("height must be greater than 0") + } + bs.mtx.RLock() + if height > bs.height { + bs.mtx.RUnlock() + return 0, fmt.Errorf("cannot prune beyond the latest height %v", bs.height) + } + base := bs.base + bs.mtx.RUnlock() + if height < base { + return 0, fmt.Errorf("cannot prune to height %v, it is lower than base height %v", + height, base) + } + + pruned := uint64(0) + batch := bs.db.NewBatch() + defer batch.Close() + flush := func(batch dbm.Batch, base int64) error { + // We can't trust batches to be atomic, so update base first to make sure noone + // tries to access missing blocks. + bs.mtx.Lock() + bs.base = base + bs.mtx.Unlock() + bs.saveState() + + err := batch.WriteSync() + if err != nil { + return fmt.Errorf("failed to prune up to height %v: %w", base, err) + } + batch.Close() + return nil + } + + for h := base; h < height; h++ { + meta := bs.LoadBlockMeta(h) + if meta == nil { // assume already deleted + continue + } + batch.Delete(calcBlockMetaKey(h)) + batch.Delete(calcBlockHashKey(meta.BlockID.Hash)) + batch.Delete(calcBlockCommitKey(h)) + batch.Delete(calcSeenCommitKey(h)) + for p := 0; p < meta.BlockID.PartsHeader.Total; p++ { + batch.Delete(calcBlockPartKey(h, p)) + } + pruned++ + + // flush every 1000 blocks to avoid batches becoming too large + if pruned%1000 == 0 && pruned > 0 { + err := flush(batch, h) + if err != nil { + return 0, err + } + batch = bs.db.NewBatch() + defer batch.Close() + } + } + + err := flush(batch, height) + if err != nil { + return 0, err + } + return pruned, nil +} + // SaveBlock persists the given block, blockParts, and seenCommit to the underlying db. // blockParts: Must be parts of the block // seenCommit: The +2/3 precommits that were seen which committed at height. @@ -185,7 +274,7 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s height := block.Height hash := block.Hash() - if g, w := height, bs.Height()+1; g != w { + if g, w := height, bs.Height()+1; bs.Base() > 0 && g != w { panic(fmt.Sprintf("BlockStore can only save contiguous blocks. Wanted %v, got %v", w, g)) } if !blockParts.IsComplete() { @@ -213,26 +302,36 @@ func (bs *BlockStore) SaveBlock(block *types.Block, blockParts *types.PartSet, s seenCommitBytes := cdc.MustMarshalBinaryBare(seenCommit) bs.db.Set(calcSeenCommitKey(height), seenCommitBytes) - // Save new BlockStoreStateJSON descriptor - BlockStoreStateJSON{Height: height}.Save(bs.db) - // Done! bs.mtx.Lock() bs.height = height + if bs.base == 0 { + bs.base = height + } bs.mtx.Unlock() + // Save new BlockStoreStateJSON descriptor + bs.saveState() + // Flush bs.db.SetSync(nil, nil) } func (bs *BlockStore) saveBlockPart(height int64, index int, part *types.Part) { - if height != bs.Height()+1 { - panic(fmt.Sprintf("BlockStore can only save contiguous blocks. Wanted %v, got %v", bs.Height()+1, height)) - } partBytes := cdc.MustMarshalBinaryBare(part) bs.db.Set(calcBlockPartKey(height, index), partBytes) } +func (bs *BlockStore) saveState() { + bs.mtx.RLock() + bsJSON := BlockStoreStateJSON{ + Base: bs.base, + Height: bs.height, + } + bs.mtx.RUnlock() + bsJSON.Save(bs.db) +} + //----------------------------------------------------------------------------- func calcBlockMetaKey(height int64) []byte { @@ -261,6 +360,7 @@ var blockStoreKey = []byte("blockStore") // BlockStoreStateJSON is the block store state JSON structure. type BlockStoreStateJSON struct { + Base int64 `json:"base"` Height int64 `json:"height"` } @@ -282,6 +382,7 @@ func LoadBlockStoreStateJSON(db dbm.DB) BlockStoreStateJSON { } if len(bytes) == 0 { return BlockStoreStateJSON{ + Base: 0, Height: 0, } } @@ -290,5 +391,9 @@ func LoadBlockStoreStateJSON(db dbm.DB) BlockStoreStateJSON { if err != nil { panic(fmt.Sprintf("Could not unmarshal bytes: %X", bytes)) } + // Backwards compatibility with persisted data from before Base existed. + if bsj.Height > 0 && bsj.Base == 0 { + bsj.Base = 1 + } return bsj } diff --git a/store/store_test.go b/store/store_test.go index 970628e58..580600663 100644 --- a/store/store_test.go +++ b/store/store_test.go @@ -47,7 +47,7 @@ func makeTxs(height int64) (txs []types.Tx) { func makeBlock(height int64, state sm.State, lastCommit *types.Commit) *types.Block { block, _ := state.MakeBlock(height, makeTxs(height), lastCommit, nil, - types.SelectProposer(state.Validators, state.LastProofHash, height, 0).Address, 0, nil) + state.Validators.SelectProposer(state.LastProofHash, height, 0).Address, 0, nil) return block } @@ -66,20 +66,39 @@ func makeStateAndBlockStore(logger log.Logger) (sm.State, *BlockStore, cleanupFu func TestLoadBlockStoreStateJSON(t *testing.T) { db := db.NewMemDB() + bsj := &BlockStoreStateJSON{Base: 100, Height: 1000} + bsj.Save(db) - bsj := &BlockStoreStateJSON{Height: 1000} + retrBSJ := LoadBlockStoreStateJSON(db) + assert.Equal(t, *bsj, retrBSJ, "expected the retrieved DBs to match") +} + +func TestLoadBlockStoreStateJSON_Empty(t *testing.T) { + db := db.NewMemDB() + + bsj := &BlockStoreStateJSON{} bsj.Save(db) retrBSJ := LoadBlockStoreStateJSON(db) + assert.Equal(t, BlockStoreStateJSON{}, retrBSJ, "expected the retrieved DBs to match") +} - assert.Equal(t, *bsj, retrBSJ, "expected the retrieved DBs to match") +func TestLoadBlockStoreStateJSON_NoBase(t *testing.T) { + db := db.NewMemDB() + + bsj := &BlockStoreStateJSON{Height: 1000} + bsj.Save(db) + + retrBSJ := LoadBlockStoreStateJSON(db) + assert.Equal(t, BlockStoreStateJSON{Base: 1, Height: 1000}, retrBSJ, "expected the retrieved DBs to match") } func TestNewBlockStore(t *testing.T) { db := db.NewMemDB() - err := db.Set(blockStoreKey, []byte(`{"height": "10000"}`)) + err := db.Set(blockStoreKey, []byte(`{"base": "100", "height": "10000"}`)) require.NoError(t, err) bs := NewBlockStore(db) + require.Equal(t, int64(100), bs.Base(), "failed to properly parse blockstore") require.Equal(t, int64(10000), bs.Height(), "failed to properly parse blockstore") panicCausers := []struct { @@ -141,6 +160,7 @@ func TestMain(m *testing.M) { func TestBlockStoreSaveLoadBlock(t *testing.T) { state, bs, cleanup := makeStateAndBlockStore(log.NewTMLogger(new(bytes.Buffer))) defer cleanup() + require.Equal(t, bs.Base(), int64(0), "initially the base should be zero") require.Equal(t, bs.Height(), int64(0), "initially the height should be zero") // check there are no blocks at various heights @@ -156,7 +176,8 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { validPartSet := block.MakePartSet(2) seenCommit := makeTestCommit(10, tmtime.Now()) bs.SaveBlock(block, partSet, seenCommit) - require.Equal(t, bs.Height(), block.Header.Height, "expecting the new height to be changed") + require.EqualValues(t, 1, bs.Base(), "expecting the new height to be changed") + require.EqualValues(t, block.Header.Height, bs.Height(), "expecting the new height to be changed") incompletePartSet := types.NewPartSetFromHeader(types.PartSetHeader{Total: 2}) uncontiguousPartSet := types.NewPartSetFromHeader(types.PartSetHeader{Total: 0}) @@ -167,8 +188,6 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { ChainID: "block_test", Time: tmtime.Now(), } - header2 := header1 - header2.Height = 4 // End of setup, test data @@ -198,9 +217,12 @@ func TestBlockStoreSaveLoadBlock(t *testing.T) { }, { - block: newBlock(header2, commitAtH10), - parts: uncontiguousPartSet, - wantPanic: "only save contiguous blocks", // and incomplete and uncontiguous parts + block: newBlock( // New block at height 5 in empty block store is fine + types.Header{Height: 5, ChainID: "block_test", Time: tmtime.Now()}, + makeTestCommit(5, tmtime.Now()), + ), + parts: validPartSet, + seenCommit: makeTestCommit(5, tmtime.Now()), }, { @@ -365,6 +387,92 @@ func TestLoadBlockPart(t *testing.T) { "expecting successful retrieval of previously saved block") } +func TestPruneBlocks(t *testing.T) { + config := cfg.ResetTestRoot("blockchain_reactor_test") + defer os.RemoveAll(config.RootDir) + state, err := sm.LoadStateFromDBOrGenesisFile(dbm.NewMemDB(), config.GenesisFile()) + require.NoError(t, err) + db := dbm.NewMemDB() + bs := NewBlockStore(db) + assert.EqualValues(t, 0, bs.Base()) + assert.EqualValues(t, 0, bs.Height()) + assert.EqualValues(t, 0, bs.Size()) + + // pruning an empty store should error, even when pruning to 0 + _, err = bs.PruneBlocks(1) + require.Error(t, err) + + _, err = bs.PruneBlocks(0) + require.Error(t, err) + + // make more than 1000 blocks, to test batch deletions + for h := int64(1); h <= 1500; h++ { + block := makeBlock(h, state, new(types.Commit)) + partSet := block.MakePartSet(2) + seenCommit := makeTestCommit(h, tmtime.Now()) + bs.SaveBlock(block, partSet, seenCommit) + } + + assert.EqualValues(t, 1, bs.Base()) + assert.EqualValues(t, 1500, bs.Height()) + assert.EqualValues(t, 1500, bs.Size()) + + prunedBlock := bs.LoadBlock(1199) + + // Check that basic pruning works + pruned, err := bs.PruneBlocks(1200) + require.NoError(t, err) + assert.EqualValues(t, 1199, pruned) + assert.EqualValues(t, 1200, bs.Base()) + assert.EqualValues(t, 1500, bs.Height()) + assert.EqualValues(t, 301, bs.Size()) + assert.EqualValues(t, BlockStoreStateJSON{ + Base: 1200, + Height: 1500, + }, LoadBlockStoreStateJSON(db)) + + require.NotNil(t, bs.LoadBlock(1200)) + require.Nil(t, bs.LoadBlock(1199)) + require.Nil(t, bs.LoadBlockByHash(prunedBlock.Hash())) + require.Nil(t, bs.LoadBlockCommit(1199)) + require.Nil(t, bs.LoadBlockMeta(1199)) + require.Nil(t, bs.LoadBlockPart(1199, 1)) + + for i := int64(1); i < 1200; i++ { + require.Nil(t, bs.LoadBlock(i)) + } + for i := int64(1200); i <= 1500; i++ { + require.NotNil(t, bs.LoadBlock(i)) + } + + // Pruning below the current base should error + _, err = bs.PruneBlocks(1199) + require.Error(t, err) + + // Pruning to the current base should work + pruned, err = bs.PruneBlocks(1200) + require.NoError(t, err) + assert.EqualValues(t, 0, pruned) + + // Pruning again should work + pruned, err = bs.PruneBlocks(1300) + require.NoError(t, err) + assert.EqualValues(t, 100, pruned) + assert.EqualValues(t, 1300, bs.Base()) + + // Pruning beyond the current height should error + _, err = bs.PruneBlocks(1501) + require.Error(t, err) + + // Pruning to the current height should work + pruned, err = bs.PruneBlocks(1500) + require.NoError(t, err) + assert.EqualValues(t, 200, pruned) + assert.Nil(t, bs.LoadBlock(1499)) + assert.NotNil(t, bs.LoadBlock(1500)) + assert.Nil(t, bs.LoadBlock(1501)) +} + func TestLoadBlockMeta(t *testing.T) { bs, db := freshBlockStore() height := int64(10) diff --git a/test/app/grpc_client.go b/test/app/grpc_client.go index 354443e5c..88e4650ab 100644 --- a/test/app/grpc_client.go +++ b/test/app/grpc_client.go @@ -8,6 +8,7 @@ import ( "context" amino "github.com/tendermint/go-amino" + coregrpc "github.com/tendermint/tendermint/rpc/grpc" ) diff --git a/test/docker/Dockerfile b/test/docker/Dockerfile index fb5458e82..9ba010942 100644 --- a/test/docker/Dockerfile +++ b/test/docker/Dockerfile @@ -1,7 +1,4 @@ -FROM golang:1.13 - -# Add testing deps for curl -RUN echo 'deb http://httpredir.debian.org/debian testing main non-free contrib' >> /etc/apt/sources.list +FROM golang:1.14 # Grab deps (jq, hexdump, xxd, killall) RUN apt-get update && \ diff --git a/tools.mk b/tools.mk index 516fc494e..de722ae89 100644 --- a/tools.mk +++ b/tools.mk @@ -38,14 +38,8 @@ mkfile_dir := $(shell cd $(shell dirname $(mkfile_path)); pwd) # Go tools ### -BIN ?= /usr/local/bin -UNAME_S ?= $(shell uname -s) -UNAME_M ?= $(shell uname -m) - TOOLS_DESTDIR ?= $(GOPATH)/bin -BUF_VERSION ?= 0.7.0 - CERTSTRAP = $(TOOLS_DESTDIR)/certstrap PROTOBUF = $(TOOLS_DESTDIR)/protoc GOODMAN = $(TOOLS_DESTDIR)/goodman @@ -77,14 +71,6 @@ $(PROTOBUF): @go get github.com/gogo/protobuf/protoc-gen-gogo@v1.3.1 .PHONY: protobuf -buf: - @echo "Installing buf..." - @curl -sSL \ - "https://github.com/bufbuild/buf/releases/download/v$(BUF_VERSION)/buf-$(UNAME_S)-$(UNAME_M)" \ - -o "$(BIN)/buf" && \ - chmod +x "$(BIN)/buf" -.PHONY: buf - goodman: $(GOODMAN) $(GOODMAN): @echo "Get Goodman" diff --git a/tools/build/.gitignore b/tools/build/.gitignore deleted file mode 100644 index 9974388f1..000000000 --- a/tools/build/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -BUILD -RPMS -SPECS -tmp diff --git a/tools/build/Makefile b/tools/build/Makefile deleted file mode 100644 index 1c2094427..000000000 --- a/tools/build/Makefile +++ /dev/null @@ -1,289 +0,0 @@ -## -# Extra checks, because we do not use autoconf. -## - -requirements_check = true -gpg_check = false -go_min_version = 1.13 -gpg_key = 2122CBE9 - -ifeq ($(requirements_check),true) -ifndef GOPATH -$(error GOPATH not set) -else -go_version := $(shell go version | sed "s/^.* go\([0-9\.]*\) .*$$/\1/" ) -$(info Found go version $(go_version)) -go_version_check := $(shell echo -e "$(go_min_version)\n$(go_version)" | sort -V | head -1) -ifneq ($(go_min_version),$(go_version_check)) -$(error go version go_min_version or above is required) -endif -endif -ifeq ($(gpg_check),true) -gpg_check := $(shell gpg -K | grep '/$(gpg_key) ' | sed 's,^.*/\($(gpg_key)\) .*$$,\1,') -ifneq ($(gpg_check),$(gpg_key)) -$(error GPG key $(gpg_key) not found.) -else -$(info GPG key $(gpg_key) found) -endif -ifndef GPG_PASSPHRASE -$(error GPG_PASSPHRASE not set) -endif -endif -endif - -### -# Here comes the real deal -### - -binaries = tendermint basecoind ethermint gaia -build-binaries = build-tendermint build-basecoind build-ethermint build-gaia -package-rpm = package-rpm-tendermint package-rpm-basecoind package-rpm-ethermint package-rpm-gaia -install-rpm = install-rpm-tendermint install-rpm-basecoind install-rpm-ethermint install-rpm-gaia -package-deb = package-deb-tendermint package-deb-basecoind package-deb-ethermint package-deb-gaia -install-deb = install-deb-tendermint install-deb-basecoind install-deb-ethermint install-deb-gaia - -all: $(binaries) -build: $(build-binaries) -package: $(package-rpm) $(package-deb) -install: $(install-rpm) $(install-deb) -$(binaries): %: build-% package-rpm-% package-deb-% - -### -# Build the binaries -### - -git-branch: - $(eval GIT_BRANCH=$(shell echo $${GIT_BRANCH:-master})) - -gopath-setup: - test -d $(GOPATH) || mkdir -p $(GOPATH) - test -d $(GOPATH)/bin || mkdir -p $(GOPATH)/bin - test -d $(GOPATH)/src || mkdir -p $(GOPATH)/src - -build-tendermint: git-branch gopath-setup - @echo "*** Building tendermint" - go get -d -u github.com/tendermint/tendermint/cmd/tendermint - cd $(GOPATH)/src/github.com/tendermint/tendermint && git checkout "$(GIT_BRANCH)" && git pull - export PATH=$(GOPATH)/bin:$(PATH) && $(MAKE) -C $(GOPATH)/src/github.com/tendermint/tendermint tools build - cp $(GOPATH)/src/github.com/tendermint/tendermint/build/tendermint $(GOPATH)/bin - @echo "*** Built tendermint" - -build-ethermint: git-branch gopath-setup - @echo "*** Building ethermint" - go get -d -u github.com/tendermint/ethermint/cmd/ethermint - cd $(GOPATH)/src/github.com/tendermint/ethermint && git checkout "$(GIT_BRANCH)" && git pull - export PATH=$(GOPATH)/bin:$(PATH) && $(MAKE) -C $(GOPATH)/src/github.com/tendermint/ethermint build - cp $(GOPATH)/src/github.com/tendermint/ethermint/build/ethermint $(GOPATH)/bin - @echo "*** Built ethermint" - -build-gaia: git-branch gopath-setup - @echo "*** Building gaia" - go get -d -u go github.com/cosmos/gaia || echo "Workaround for go downloads." - cd $(GOPATH)/src/github.com/cosmos/gaia && git checkout "$(GIT_BRANCH)" && git pull - export PATH=$(GOPATH)/bin:$(PATH) && $(MAKE) -C $(GOPATH)/src/github.com/cosmos/gaia install - @echo "*** Built gaia" - -build-basecoind: git-branch gopath-setup - @echo "*** Building basecoind from cosmos-sdk" - go get -d -u github.com/cosmos/cosmos-sdk/examples/basecoin/cmd/basecoind - cd $(GOPATH)/src/github.com/cosmos/cosmos-sdk && git checkout "$(GIT_BRANCH)" && git pull - export PATH=$(GOPATH)/bin:$(PATH) && $(MAKE) -C $(GOPATH)/src/github.com/cosmos/cosmos-sdk tools build - cp $(GOPATH)/src/github.com/cosmos/cosmos-sdk/build/basecoind $(GOPATH)/bin/basecoind - @echo "*** Built basecoind from cosmos-sdk" - -### -# Prepare package files -### - -# set app_version -version-%: - @echo "Checking if binary exists" - test -f $(GOPATH)/bin/$* - @echo "BUILD_NUMBER is $(BUILD_NUMBER)" - test -n "$(BUILD_NUMBER)" - $(eval $*_version=$(shell $(GOPATH)/bin/$* version | head -1 | cut -d- -f1 | sed 's/^\(ethermint:\s*\|\)\(v\|\)//' | tr -d '\t ' )) - -# set build_folder -folder-%: version-% - $(eval build_folder=BUILD/$*-$($*_version)-$(BUILD_NUMBER)) - -# clean up folder structure for package files -prepare-files = rm -rf $(build_folder) && mkdir -p $(build_folder) && cp -r ./$(1)/* $(build_folder) && mkdir -p $(build_folder)/usr/bin && cp $(GOPATH)/bin/$(1) $(build_folder)/usr/bin - -## -## Package customizations for the different applications -## - -prepare-tendermint = -prepare-ethermint = mkdir -p $(build_folder)/etc/ethermint && \ - cp $(GOPATH)/src/github.com/tendermint/ethermint/setup/genesis.json $(build_folder)/etc/ethermint/genesis.json && \ - cp -r $(GOPATH)/src/github.com/tendermint/ethermint/setup/keystore $(build_folder)/etc/ethermint -prepare-gaia = -prepare-basecoind = cp $(GOPATH)/bin/basecoind $(build_folder)/usr/bin - -### -# Package the binary for CentOS/RedHat (RPM) and Debian/Ubuntu (DEB) -### - -# Depends on rpmbuild, sorry, this can only be built on CentOS/RedHat machines. -package-rpm-%: folder-% - @echo "*** Packaging RPM $* version $($*_version)" - - $(call prepare-files,$*) - $(call prepare-$*) - - rm -rf $(build_folder)/DEBIAN - mkdir -p $(build_folder)/usr/share/licenses/$* - cp ./LICENSE $(build_folder)/usr/share/licenses/$*/LICENSE - chmod -Rf a+rX,u+w,g-w,o-w $(build_folder) - - mkdir -p {SPECS,tmp} - - ./generate-spec $* spectemplates SPECS - sed -i "s/@VERSION@/$($*_version)/" SPECS/$*.spec - sed -i "s/@BUILD_NUMBER@/$(BUILD_NUMBER)/" SPECS/$*.spec - sed -i "s/@PACKAGE_NAME@/$*/" SPECS/$*.spec - - rpmbuild -bb SPECS/$*.spec --define "_topdir `pwd`" --define "_tmppath `pwd`/tmp" - ./sign RPMS/x86_64/$*-$($*_version)-$(BUILD_NUMBER).x86_64.rpm "$(gpg_key)" "`which gpg`" - rpm -Kv RPMS/x86_64/$*-$($*_version)-$(BUILD_NUMBER).x86_64.rpm || echo "rpm returns non-zero exist for some reason. ($?)" - @echo "*** Packaged RPM $* version $($*_version)" - -package-deb-%: folder-% - @echo "*** Packaging DEB $* version $($*_version)-$(BUILD_NUMBER)" - - $(call prepare-files,$*) - $(call prepare-$*) - - mkdir -p $(build_folder)/usr/share/doc/$* - cp $(build_folder)/DEBIAN/copyright $(build_folder)/usr/share/doc/$* - chmod -Rf a+rX,u+w,g-w,o-w $(build_folder) - - sed -i "s/@VERSION@/$($*_version)-$(BUILD_NUMBER)/" $(build_folder)/DEBIAN/changelog - sed -i "s/@STABILITY@/stable/" $(build_folder)/DEBIAN/changelog - sed -i "s/@DATETIMESTAMP@/`date +%a,\ %d\ %b\ %Y\ %T\ %z`/" $(build_folder)/DEBIAN/changelog - sed -i "s/@VERSION@/$($*_version)-$(BUILD_NUMBER)/" $(build_folder)/DEBIAN/control - - gzip -c $(build_folder)/DEBIAN/changelog > $(build_folder)/usr/share/doc/$*/changelog.Debian.gz - gzip -c $(build_folder)/DEBIAN/changelog > $(build_folder)/usr/share/doc/$*/changelog.Debian.amd64.gz - sed -i "s/@INSTALLEDSIZE@/`du -ks $(build_folder) | cut -f 1`/" $(build_folder)/DEBIAN/control - - cd $(build_folder) && tar --owner=root --group=root -cvJf ../../tmp/data.tar.xz --exclude DEBIAN * - cd $(build_folder)/DEBIAN && tar --owner=root --group=root -cvzf ../../../tmp/control.tar.gz * - echo "2.0" > tmp/debian-binary - - cp ./_gpg tmp/ - cd tmp && sed -i "s/@DATETIMESTAMP@/`date +%a\ %b\ %d\ %T\ %Y`/" _gpg - cd tmp && sed -i "s/@BINMD5@/`md5sum debian-binary | cut -d\ -f1`/" _gpg - cd tmp && sed -i "s/@BINSHA1@/`sha1sum debian-binary | cut -d\ -f1`/" _gpg - cd tmp && sed -i "s/@BINSIZE@/`stat -c %s debian-binary | cut -d\ -f1`/" _gpg - cd tmp && sed -i "s/@CONMD5@/`md5sum control.tar.gz | cut -d\ -f1`/" _gpg - cd tmp && sed -i "s/@CONSHA1@/`sha1sum control.tar.gz | cut -d\ -f1`/" _gpg - cd tmp && sed -i "s/@CONSIZE@/`stat -c %s control.tar.gz | cut -d\ -f1`/" _gpg - cd tmp && sed -i "s/@DATMD5@/`md5sum data.tar.xz | cut -d\ -f1`/" _gpg - cd tmp && sed -i "s/@DATSHA1@/`sha1sum data.tar.xz | cut -d\ -f1`/" _gpg - cd tmp && sed -i "s/@DATSIZE@/`stat -c %s data.tar.xz | cut -d\ -f1`/" _gpg - gpg --batch --passphrase "$(GPG_PASSPHRASE)" --clearsign tmp/_gpg - mv tmp/_gpg.asc tmp/_gpgbuilder - ar r tmp/$*-$($*_version)-$(BUILD_NUMBER)_amd64.deb tmp/debian-binary tmp/control.tar.gz tmp/data.tar.xz tmp/_gpgbuilder - mv tmp/$*-$($*_version)-$(BUILD_NUMBER)_amd64.deb RPMS/ - rm tmp/debian-binary tmp/control.tar.gz tmp/data.tar.xz tmp/_gpgbuilder tmp/_gpg - @echo "*** Packaged DEB $* version $($*_version)-$(BUILD_NUMBER)" - -install-rpm-%: version-% -#Make sure your host has the IAM role to read/write the S3 bucket OR that you set up ~/.boto - @echo "*** Uploading $*-$($*_version)-$(BUILD_NUMBER).x86_64.rpm to AWS $(DEVOPS_PATH)CentOS repository" - aws s3 sync s3://tendermint-packages/$(DEVOPS_PATH)centos/ tmp/s3/ --delete - mkdir -p tmp/s3/7/os/x86_64/Packages - cp RPMS/x86_64/$*-$($*_version)-$(BUILD_NUMBER).x86_64.rpm tmp/s3/7/os/x86_64/Packages - cp ./RPM-GPG-KEY-Tendermint tmp/s3/7/os/x86_64/ - cp ./tendermint.repo tmp/s3/7/os/x86_64/ - rm -f tmp/s3/7/os/x86_64/repodata/*.bz2 tmp/s3/7/os/x86_64/repodata/*.gz tmp/s3/7/os/x86_64/repodata/repomd.xml.asc - createrepo tmp/s3/7/os/x86_64/Packages -u https://tendermint-packages.interblock.io/$(DEVOPS_PATH)centos/7/os/x86_64/Packages -o tmp/s3/7/os/x86_64 --update -S --repo Tendermint --content tendermint --content basecoind --content ethermint - gpg --batch --passphrase "$(GPG_PASSPHRASE)" --detach-sign -a tmp/s3/7/os/x86_64/repodata/repomd.xml - aws s3 sync tmp/s3/ s3://tendermint-packages/$(DEVOPS_PATH)centos/ --delete --acl public-read - @echo "*** Uploaded $* to AWS $(DEVOPS_PATH)CentOS repository" - -install-deb-%: version-% - @echo "*** Uploading $*-$($*_version)-$(BUILD_NUMBER)_amd64.deb to AWS $(DEVOPS_PATH)Debian repository" - @echo "Testing if $*-$($*_version)-$(BUILD_NUMBER)_amd64.deb is already uploaded" - test ! -f tmp/debian-s3/pool/$*-$($*_version)-$(BUILD_NUMBER)_amd64.deb - aws s3 sync s3://tendermint-packages/$(DEVOPS_PATH)debian/ tmp/debian-s3/ --delete - @echo "Testing if $*-$($*_version)-$(BUILD_NUMBER)_amd64.deb is already uploaded" - test ! -f tmp/debian-s3/pool/$*-$($*_version)-$(BUILD_NUMBER)_amd64.deb - cp ./tendermint.list tmp/debian-s3/ - mkdir -p tmp/debian-s3/pool tmp/debian-s3/dists/stable/main/binary-amd64 - cp RPMS/$*-$($*_version)-$(BUILD_NUMBER)_amd64.deb tmp/debian-s3/pool - cp ./Release_amd64 tmp/debian-s3/dists/stable/main/binary-amd64/Release - - #Packages / Packages.gz - - echo > tmp/Package - echo "Filename: pool/$*-$($*_version)-$(BUILD_NUMBER)_amd64.deb" >> tmp/Package - echo "MD5sum: `md5sum RPMS/$*-$($*_version)-$(BUILD_NUMBER)_amd64.deb | cut -d\ -f 1`" >> tmp/Package - echo "SHA1: `sha1sum RPMS/$*-$($*_version)-$(BUILD_NUMBER)_amd64.deb | cut -d\ -f 1`" >> tmp/Package - echo "SHA256: `sha256sum RPMS/$*-$($*_version)-$(BUILD_NUMBER)_amd64.deb | cut -d\ -f 1`" >> tmp/Package - echo "Size: `stat -c %s RPMS/$*-$($*_version)-$(BUILD_NUMBER)_amd64.deb | cut -d\ -f 1`" >> tmp/Package - cat BUILD/$*-$($*_version)-$(BUILD_NUMBER)/DEBIAN/control >> tmp/Package - - cat tmp/Package >> tmp/debian-s3/dists/stable/main/binary-amd64/Packages - rm -f tmp/debian-s3/dists/stable/main/binary-amd64/Packages.gz - gzip -c tmp/debian-s3/dists/stable/main/binary-amd64/Packages > tmp/debian-s3/dists/stable/main/binary-amd64/Packages.gz - rm -f tmp/Package - - #main / Release / InRelease / Release.gpg - - cp ./Release tmp/debian-s3/dists/stable/main/Release - rm -f tmp/debian-s3/dists/stable/main/InRelease - rm -f tmp/debian-s3/dists/stable/main/Release.gpg - - echo "MD5Sum:" >> tmp/debian-s3/dists/stable/main/Release - cd tmp/debian-s3/dists/stable/main && for f in `find . -type f | sed 's/^.\///'` ; do test "$$f" == "Release" && continue ; echo -n " " ; md5sum $$f | sed "s/ / `stat -c %s $$f` /" ; done >> Release - echo "SHA1:" >> tmp/debian-s3/dists/stable/main/Release - cd tmp/debian-s3/dists/stable/main && for f in `find . -type f | sed 's/^.\///'` ; do test "$$f" == "Release" && continue ; echo -n " " ; sha1sum $$f | sed "s/ / `stat -c %s $$f` /" ; done >> Release - echo "SHA256:" >> tmp/debian-s3/dists/stable/main/Release - cd tmp/debian-s3/dists/stable/main && for f in `find . -type f | sed 's/^.\///'` ; do test "$$f" == "Release" && continue ; echo -n " " ; sha256sum $$f | sed "s/ / `stat -c %s $$f` /" ; done >> Release - - gpg --batch --passphrase "$(GPG_PASSPHRASE)" --digest-algo SHA256 -b -a tmp/debian-s3/dists/stable/main/Release - mv tmp/debian-s3/dists/stable/main/Release.asc tmp/debian-s3/dists/stable/main/Release.gpg - gpg --batch --passphrase "$(GPG_PASSPHRASE)" --digest-algo SHA512 --clearsign tmp/debian-s3/dists/stable/main/Release - mv tmp/debian-s3/dists/stable/main/Release.asc tmp/debian-s3/dists/stable/main/InRelease - - #stable / Release / InRelease / Release.gpg - - cp ./Release tmp/debian-s3/dists/stable/Release - rm -f tmp/debian-s3/dists/stable/InRelease - rm -f tmp/debian-s3/dists/stable/Release.gpg - - echo "MD5Sum:" >> tmp/debian-s3/dists/stable/Release - cd tmp/debian-s3/dists/stable && for f in `find . -type f | sed 's/^.\///'` ; do test "$$f" == "Release" && continue ; echo -n " " ; md5sum $$f | sed "s/ / `stat -c %s $$f` /" ; done >> Release - echo "SHA1:" >> tmp/debian-s3/dists/stable/Release - cd tmp/debian-s3/dists/stable && for f in `find . -type f | sed 's/^.\///'` ; do test "$$f" == "Release" && continue ; echo -n " " ; sha1sum $$f | sed "s/ / `stat -c %s $$f` /" ; done >> Release - echo "SHA256:" >> tmp/debian-s3/dists/stable/Release - cd tmp/debian-s3/dists/stable && for f in `find . -type f | sed 's/^.\///'` ; do test "$$f" == "Release" && continue ; echo -n " " ; sha256sum $$f | sed "s/ / `stat -c %s $$f` /" ; done >> Release - - gpg --batch --passphrase "$(GPG_PASSPHRASE)" --digest-algo SHA256 -b -a tmp/debian-s3/dists/stable/Release - mv tmp/debian-s3/dists/stable/Release.asc tmp/debian-s3/dists/stable/Release.gpg - gpg --batch --passphrase "$(GPG_PASSPHRASE)" --digest-algo SHA512 --clearsign tmp/debian-s3/dists/stable/Release - mv tmp/debian-s3/dists/stable/Release.asc tmp/debian-s3/dists/stable/InRelease - - aws s3 sync tmp/debian-s3/ s3://tendermint-packages/$(DEVOPS_PATH)debian/ --delete --acl public-read - @echo "*** Uploaded $*-$($*_version)-$(BUILD_NUMBER)_amd64.deb to AWS $(DEVOPS_PATH)Debian repository" - -mostlyclean: - rm -rf {BUILDROOT,SOURCES,SPECS,SRPMS,tmp} - -clean: mostlyclean - rm -rf {BUILD,RPMS} - -distclean: clean - rm -rf $(GOPATH)/src/github.com/tendermint/tendermint - rm -rf $(GOPATH)/src/github.com/cosmos/cosmos-sdk - rm -rf $(GOPATH)/src/github.com/tendermint/ethermint - rm -rf $(GOPATH)/bin/tendermint - rm -rf $(GOPATH)/bin/basecoind - rm -rf $(GOPATH)/bin/ethermint - rm -rf $(GOPATH)/bin/gaia - -.PHONY : clean - diff --git a/tools/build/RPM-GPG-KEY-Tendermint b/tools/build/RPM-GPG-KEY-Tendermint deleted file mode 100644 index e6f200d87..000000000 --- a/tools/build/RPM-GPG-KEY-Tendermint +++ /dev/null @@ -1,19 +0,0 @@ ------BEGIN PGP PUBLIC KEY BLOCK----- -Version: GnuPG v2.0.22 (GNU/Linux) - -mQENBFk97ngBCADaiPQFKJI7zWYdUKqC490DzY9g9LatsWoJErK5LuMXwEnF5i+a -UkygueukA4C5U7L71l5EeOB9rtb6AbkF4IEZsmmp93APec/3Vfbac9xvK4dBdiht -F8SrazPdHeR6AKcZH8ZpG/+mdONvGb/gEgtxVjaeIJFpCbjKLlKEXazh2zamhhth -q+Nn/17QmI3KBiaGqQK5w4kGZ4mZPy6fXMQhW5dDMq9f4anlGIAYi9O53dVxsx2S -5d+NHuGer5Ps0u6WMJi/e+UT2EGwzP6ygOxkIjyhMFuVftabOtSSrRHHetw8UAaI -N/RPn2gSbQtOQ7unzHDXp3/o6/r2nDEErPyJABEBAAG0LkdyZWcgU3phYm8gKFRl -bmRlcm1pbnQpIDxncmVnQHBoaWxvc29iZWFyLmNvbT6JATkEEwECACMFAlk97ngC -GwMHCwkIBwMCAQYVCAIJCgsEFgIDAQIeAQIXgAAKCRDIkIHIISLL6bX/CACXTKmO -u5XgvJICH0pHNeVS5/4Om1Rsg1xNmEkGFBP8N2fqn576exbOLgWLSyNHTEyrJNoc -iTeUtod2qqbVGwRgWm1zeiP8NBYiQ9SUbqskIqcPavJNGWIxsCB0p/odoZah8xSj -tGrkoyoxrc+7z2JgKYK8SVSkJXQkzuc5/ZlY85ci5gPKQhlo5YDqGo+4U9n/Ieo5 -nkF8LBalFC2j7A7sQNroEicpulpGhIq3jyUHtadX01z3pNzuX+wfHX9futoet0YS -tG2007WoPGV0whGnoKxmk0JhwzhscC2XNtJl1GZcwqOOlPU9eGtZuPKj/HBAlRtz -4xTOAcklpg8soqRA -=jNDW ------END PGP PUBLIC KEY BLOCK----- diff --git a/tools/build/Release b/tools/build/Release deleted file mode 100644 index 9003d1320..000000000 --- a/tools/build/Release +++ /dev/null @@ -1,7 +0,0 @@ -Origin: Tendermint -Label: Tendermint -Suite: stable -Date: Fri, 16 Jun 2017 19:44:00 UTC -Architectures: amd64 -Components: main -Description: Tendermint repository diff --git a/tools/build/Release_amd64 b/tools/build/Release_amd64 deleted file mode 100644 index 1f2ecbfe2..000000000 --- a/tools/build/Release_amd64 +++ /dev/null @@ -1,5 +0,0 @@ -Archive: stable -Component: main -Origin: Tendermint -Label: Tendermint -Architecture: amd64 diff --git a/tools/build/_gpg b/tools/build/_gpg deleted file mode 100644 index 73742b5d8..000000000 --- a/tools/build/_gpg +++ /dev/null @@ -1,8 +0,0 @@ -Version: 4 -Signer: -Date: @DATETIMESTAMP@ -Role: builder -Files: - @BINMD5@ @BINSHA1@ @BINSIZE@ debian-binary - @CONMD5@ @CONSHA1@ @CONSIZE@ control.tar.gz - @DATMD5@ @DATSHA1@ @DATSIZE@ data.tar.xz diff --git a/tools/build/basecoind/DEBIAN/changelog b/tools/build/basecoind/DEBIAN/changelog deleted file mode 100644 index 260718eaf..000000000 --- a/tools/build/basecoind/DEBIAN/changelog +++ /dev/null @@ -1,6 +0,0 @@ -basecoind (@VERSION@) @STABILITY@; urgency=medium - - * Automatic build. See https://github.com/cosmos/cosmos-sdk for more information. - - -- Greg Szabo @DATETIMESTAMP@ - diff --git a/tools/build/basecoind/DEBIAN/compat b/tools/build/basecoind/DEBIAN/compat deleted file mode 100644 index ec635144f..000000000 --- a/tools/build/basecoind/DEBIAN/compat +++ /dev/null @@ -1 +0,0 @@ -9 diff --git a/tools/build/basecoind/DEBIAN/control b/tools/build/basecoind/DEBIAN/control deleted file mode 100644 index c15d49110..000000000 --- a/tools/build/basecoind/DEBIAN/control +++ /dev/null @@ -1,14 +0,0 @@ -Source: basecoind -Section: net -Priority: optional -Maintainer: Greg Szabo -Build-Depends: debhelper (>=9) -Standards-Version: 3.9.6 -Homepage: https://tendermint.com -Package: basecoind -Architecture: amd64 -Version: @VERSION@ -Installed-Size: @INSTALLEDSIZE@ -Description: basecoind is a Proof-of-Stake cryptocurrency and framework - Basecoind is an ABCI application designed to be used with the Tendermint consensus engine to form a Proof-of-Stake cryptocurrency. It also provides a general purpose framework for extending the feature-set of the cryptocurrency by implementing plugins. - diff --git a/tools/build/basecoind/DEBIAN/copyright b/tools/build/basecoind/DEBIAN/copyright deleted file mode 100644 index fe449650c..000000000 --- a/tools/build/basecoind/DEBIAN/copyright +++ /dev/null @@ -1,21 +0,0 @@ -Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ -Upstream-Name: basecoind -Source: https://github.com/cosmos/cosmos-sdk - -Files: * -Copyright: 2017 All In Bits, Inc. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - On Debian systems, the full text of the Apache License 2.0 can be found - in the file `/usr/share/common-licenses/Apache-2.0'. diff --git a/tools/build/basecoind/DEBIAN/postinst b/tools/build/basecoind/DEBIAN/postinst deleted file mode 100644 index d7d8f4413..000000000 --- a/tools/build/basecoind/DEBIAN/postinst +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/sh -# postinst script for basecoind -# - -set -e - -# summary of how this script can be called: -# * `configure' -# * `abort-upgrade' -# * `abort-remove' `in-favour' -# -# * `abort-remove' -# * `abort-deconfigure' `in-favour' -# `removing' -# -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - - -case "$1" in - configure) - chown basecoind.basecoind /etc/basecoind - sudo -Hu basecoind basecoind node init --home /etc/basecoind 2B24DEE2364762300168DF19B6C18BCE2D399EA2 - systemctl daemon-reload - ;; - - abort-upgrade|abort-remove|abort-deconfigure) - ;; - - *) - echo "postinst called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/tools/build/basecoind/DEBIAN/postrm b/tools/build/basecoind/DEBIAN/postrm deleted file mode 100644 index b84c9f2a4..000000000 --- a/tools/build/basecoind/DEBIAN/postrm +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/sh -# postrm script for basecoin -# - -set -e - -# summary of how this script can be called: -# * `remove' -# * `purge' -# * `upgrade' -# * `failed-upgrade' -# * `abort-install' -# * `abort-install' -# * `abort-upgrade' -# * `disappear' -# -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - - -case "$1" in - upgrade|failed-upgrade|abort-upgrade) - systemctl daemon-reload - ;; - - purge|remove|abort-install|disappear) - systemctl daemon-reload - ;; - - *) - echo "postrm called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/tools/build/basecoind/DEBIAN/preinst b/tools/build/basecoind/DEBIAN/preinst deleted file mode 100644 index 53124c0ce..000000000 --- a/tools/build/basecoind/DEBIAN/preinst +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/sh -# preinst script for basecoind -# - -set -e - -# summary of how this script can be called: -# * `install' -# * `install' -# * `upgrade' -# * `abort-upgrade' -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - - -case "$1" in - install|upgrade) - if ! grep -q '^basecoind:' /etc/passwd ; then - useradd -k /dev/null -r -m -b /etc basecoind - chmod 755 /etc/basecoind - fi - ;; - - abort-upgrade) - ;; - - *) - echo "preinst called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/tools/build/basecoind/DEBIAN/prerm b/tools/build/basecoind/DEBIAN/prerm deleted file mode 100644 index 18ef42079..000000000 --- a/tools/build/basecoind/DEBIAN/prerm +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/sh -# prerm script for basecoin -# - -set -e - -# summary of how this script can be called: -# * `remove' -# * `upgrade' -# * `failed-upgrade' -# * `remove' `in-favour' -# * `deconfigure' `in-favour' -# `removing' -# -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - - -case "$1" in - remove|upgrade|deconfigure) - systemctl stop basecoind 2> /dev/null || : - ;; - - failed-upgrade) - ;; - - *) - echo "prerm called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/tools/build/basecoind/etc/systemd/system-preset/50-basecoind.preset b/tools/build/basecoind/etc/systemd/system-preset/50-basecoind.preset deleted file mode 100644 index 358334fc3..000000000 --- a/tools/build/basecoind/etc/systemd/system-preset/50-basecoind.preset +++ /dev/null @@ -1,2 +0,0 @@ -disable basecoind.service - diff --git a/tools/build/basecoind/etc/systemd/system/basecoind.service b/tools/build/basecoind/etc/systemd/system/basecoind.service deleted file mode 100644 index 68b46d84f..000000000 --- a/tools/build/basecoind/etc/systemd/system/basecoind.service +++ /dev/null @@ -1,18 +0,0 @@ -[Unit] -Description=Basecoind -Requires=network-online.target -After=network-online.target - -[Service] -Environment="BCHOME=/etc/basecoind" -Restart=on-failure -User=basecoind -Group=basecoind -PermissionsStartOnly=true -ExecStart=/usr/bin/basecoind start -ExecReload=/bin/kill -HUP $MAINPID -KillSignal=SIGTERM - -[Install] -WantedBy=multi-user.target - diff --git a/tools/build/basecoind/usr/share/basecoind/key.json b/tools/build/basecoind/usr/share/basecoind/key.json deleted file mode 100644 index bdefe8fd4..000000000 --- a/tools/build/basecoind/usr/share/basecoind/key.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "address": "1B1BE55F969F54064628A63B9559E7C21C925165", - "priv_key": { - "type": "ed25519", - "data": "C70D6934B4F55F1B7BC33B56B9CA8A2061384AFC19E91E44B40C4BBA182953D1619D3678599971ED29C7529DDD4DA537B97129893598A17C82E3AC9A8BA95279" - }, - "pub_key": { - "type": "ed25519", - "data": "619D3678599971ED29C7529DDD4DA537B97129893598A17C82E3AC9A8BA95279" - } -} - diff --git a/tools/build/basecoind/usr/share/basecoind/key2.json b/tools/build/basecoind/usr/share/basecoind/key2.json deleted file mode 100644 index ddfc6809b..000000000 --- a/tools/build/basecoind/usr/share/basecoind/key2.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "address": "1DA7C74F9C219229FD54CC9F7386D5A3839F0090", - "priv_key": { - "type": "ed25519", - "data": "34BAE9E65CE8245FAD035A0E3EED9401BDE8785FFB3199ACCF8F5B5DDF7486A8352195DA90CB0B90C24295B90AEBA25A5A71BC61BAB2FE2387241D439698B7B8" - }, - "pub_key": { - "type": "ed25519", - "data": "352195DA90CB0B90C24295B90AEBA25A5A71BC61BAB2FE2387241D439698B7B8" - } -} - diff --git a/tools/build/ethermint/DEBIAN/changelog b/tools/build/ethermint/DEBIAN/changelog deleted file mode 100644 index 76a1fb154..000000000 --- a/tools/build/ethermint/DEBIAN/changelog +++ /dev/null @@ -1,6 +0,0 @@ -ethermint (@VERSION@) @STABILITY@; urgency=medium - - * Automatic build. See https://github.com/tendermint/tendermint for more information. - - -- Greg Szabo @DATETIMESTAMP@ - diff --git a/tools/build/ethermint/DEBIAN/compat b/tools/build/ethermint/DEBIAN/compat deleted file mode 100644 index ec635144f..000000000 --- a/tools/build/ethermint/DEBIAN/compat +++ /dev/null @@ -1 +0,0 @@ -9 diff --git a/tools/build/ethermint/DEBIAN/control b/tools/build/ethermint/DEBIAN/control deleted file mode 100644 index 2d8b3b002..000000000 --- a/tools/build/ethermint/DEBIAN/control +++ /dev/null @@ -1,15 +0,0 @@ -Source: ethermint -Section: net -Priority: optional -Maintainer: Greg Szabo -Build-Depends: debhelper (>=9) -Depends: tendermint (>=0.11.0) -Standards-Version: 3.9.6 -Homepage: https://tendermint.com -Package: ethermint -Architecture: amd64 -Version: @VERSION@ -Installed-Size: @INSTALLEDSIZE@ -Description: ethermint enables ethereum as an ABCI application on tendermint and the COSMOS hub - Ethermint enables ethereum to run as an ABCI application on tendermint and the COSMOS hub. This application allows you to get all the benefits of ethereum without having to run your own miners. - diff --git a/tools/build/ethermint/DEBIAN/copyright b/tools/build/ethermint/DEBIAN/copyright deleted file mode 100644 index 6d1bab01b..000000000 --- a/tools/build/ethermint/DEBIAN/copyright +++ /dev/null @@ -1,21 +0,0 @@ -Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ -Upstream-Name: ethermint -Source: https://github.com/tendermint/ethermint - -Files: * -Copyright: 2017 All In Bits, Inc. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - On Debian systems, the full text of the Apache License 2.0 can be found - in the file `/usr/share/common-licenses/Apache-2.0'. diff --git a/tools/build/ethermint/DEBIAN/postinst b/tools/build/ethermint/DEBIAN/postinst deleted file mode 100644 index 439fdc395..000000000 --- a/tools/build/ethermint/DEBIAN/postinst +++ /dev/null @@ -1,46 +0,0 @@ -#!/bin/sh -# postinst script for ethermint -# - -set -e - -# summary of how this script can be called: -# * `configure' -# * `abort-upgrade' -# * `abort-remove' `in-favour' -# -# * `abort-remove' -# * `abort-deconfigure' `in-favour' -# `removing' -# -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - - -case "$1" in - configure) - chown ethermint.ethermint /etc/ethermint - chown ethermint.ethermint /etc/ethermint/genesis.json - chown ethermint.ethermint /etc/ethermint/keystore - chown ethermint.ethermint /etc/ethermint/keystore/UTC--2016-10-21T22-30-03.071787745Z--7eff122b94897ea5b0e2a9abf47b86337fafebdc - - sudo -Hu ethermint /usr/bin/ethermint --datadir /etc/ethermint init /etc/ethermint/genesis.json - sudo -Hu ethermint tendermint init --home /etc/ethermint - systemctl daemon-reload - ;; - - abort-upgrade|abort-remove|abort-deconfigure) - ;; - - *) - echo "postinst called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/tools/build/ethermint/DEBIAN/postrm b/tools/build/ethermint/DEBIAN/postrm deleted file mode 100644 index f1d9d6afc..000000000 --- a/tools/build/ethermint/DEBIAN/postrm +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/sh -# postrm script for ethermint -# - -set -e - -# summary of how this script can be called: -# * `remove' -# * `purge' -# * `upgrade' -# * `failed-upgrade' -# * `abort-install' -# * `abort-install' -# * `abort-upgrade' -# * `disappear' -# -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - - -case "$1" in - upgrade|failed-upgrade|abort-upgrade) - systemctl daemon-reload - ;; - - purge|remove|abort-install|disappear) - systemctl daemon-reload - ;; - - *) - echo "postrm called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/tools/build/ethermint/DEBIAN/preinst b/tools/build/ethermint/DEBIAN/preinst deleted file mode 100644 index 829112e6b..000000000 --- a/tools/build/ethermint/DEBIAN/preinst +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/sh -# preinst script for ethermint -# - -set -e - -# summary of how this script can be called: -# * `install' -# * `install' -# * `upgrade' -# * `abort-upgrade' -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - - -case "$1" in - install|upgrade) - if ! grep -q '^ethermint:' /etc/passwd ; then - useradd -k /dev/null -r -m -b /etc ethermint - chmod 755 /etc/ethermint - fi - ;; - - abort-upgrade) - ;; - - *) - echo "preinst called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/tools/build/ethermint/DEBIAN/prerm b/tools/build/ethermint/DEBIAN/prerm deleted file mode 100644 index 00a775cef..000000000 --- a/tools/build/ethermint/DEBIAN/prerm +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/sh -# prerm script for ethermint -# - -set -e - -# summary of how this script can be called: -# * `remove' -# * `upgrade' -# * `failed-upgrade' -# * `remove' `in-favour' -# * `deconfigure' `in-favour' -# `removing' -# -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - - -case "$1" in - remove|upgrade|deconfigure) - systemctl stop ethermint 2> /dev/null || : - ;; - - failed-upgrade) - ;; - - *) - echo "prerm called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/tools/build/ethermint/etc/systemd/system-preset/50-ethermint.preset b/tools/build/ethermint/etc/systemd/system-preset/50-ethermint.preset deleted file mode 100644 index 836a28c30..000000000 --- a/tools/build/ethermint/etc/systemd/system-preset/50-ethermint.preset +++ /dev/null @@ -1,2 +0,0 @@ -disable ethermint.service - diff --git a/tools/build/ethermint/etc/systemd/system/ethermint.service b/tools/build/ethermint/etc/systemd/system/ethermint.service deleted file mode 100644 index f71a074ea..000000000 --- a/tools/build/ethermint/etc/systemd/system/ethermint.service +++ /dev/null @@ -1,17 +0,0 @@ -[Unit] -Description=Ethermint -Requires=network-online.target -After=network-online.target - -[Service] -Restart=on-failure -User=ethermint -Group=ethermint -PermissionsStartOnly=true -ExecStart=/usr/bin/ethermint --datadir /etc/ethermint -ExecReload=/bin/kill -HUP $MAINPID -KillSignal=SIGTERM - -[Install] -WantedBy=multi-user.target - diff --git a/tools/build/gaia/DEBIAN/changelog b/tools/build/gaia/DEBIAN/changelog deleted file mode 100644 index eca5fbc3d..000000000 --- a/tools/build/gaia/DEBIAN/changelog +++ /dev/null @@ -1,6 +0,0 @@ -gaia (@VERSION@) @STABILITY@; urgency=medium - - * Automatic build. See https://github.com/tendermint/basecoin for more information. - - -- Greg Szabo @DATETIMESTAMP@ - diff --git a/tools/build/gaia/DEBIAN/compat b/tools/build/gaia/DEBIAN/compat deleted file mode 100644 index ec635144f..000000000 --- a/tools/build/gaia/DEBIAN/compat +++ /dev/null @@ -1 +0,0 @@ -9 diff --git a/tools/build/gaia/DEBIAN/control b/tools/build/gaia/DEBIAN/control deleted file mode 100644 index 55d1cd5dd..000000000 --- a/tools/build/gaia/DEBIAN/control +++ /dev/null @@ -1,14 +0,0 @@ -Source: gaia -Section: net -Priority: optional -Maintainer: Greg Szabo -Build-Depends: debhelper (>=9) -Standards-Version: 3.9.6 -Homepage: https://cosmos.network -Package: gaia -Architecture: amd64 -Version: @VERSION@ -Installed-Size: @INSTALLEDSIZE@ -Description: gaia - Tendermint Cosmos delegation game chain - Gaia description comes later. - diff --git a/tools/build/gaia/DEBIAN/copyright b/tools/build/gaia/DEBIAN/copyright deleted file mode 100644 index ffc230134..000000000 --- a/tools/build/gaia/DEBIAN/copyright +++ /dev/null @@ -1,21 +0,0 @@ -Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ -Upstream-Name: gaia -Source: https://github.com/cosmos/gaia - -Files: * -Copyright: 2017 All In Bits, Inc. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - On Debian systems, the full text of the Apache License 2.0 can be found - in the file `/usr/share/common-licenses/Apache-2.0'. diff --git a/tools/build/gaia/DEBIAN/postinst b/tools/build/gaia/DEBIAN/postinst deleted file mode 100644 index 427b7c493..000000000 --- a/tools/build/gaia/DEBIAN/postinst +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/sh -# postinst script for gaia -# - -set -e - -# summary of how this script can be called: -# * `configure' -# * `abort-upgrade' -# * `abort-remove' `in-favour' -# -# * `abort-remove' -# * `abort-deconfigure' `in-favour' -# `removing' -# -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - - -case "$1" in - configure) - chown gaia.gaia /etc/gaia - sudo -Hu gaia gaia node init --home /etc/gaia 2B24DEE2364762300168DF19B6C18BCE2D399EA2 - systemctl daemon-reload - ;; - - abort-upgrade|abort-remove|abort-deconfigure) - ;; - - *) - echo "postinst called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/tools/build/gaia/DEBIAN/postrm b/tools/build/gaia/DEBIAN/postrm deleted file mode 100644 index da526ec30..000000000 --- a/tools/build/gaia/DEBIAN/postrm +++ /dev/null @@ -1,41 +0,0 @@ -#!/bin/sh -# postrm script for gaia -# - -set -e - -# summary of how this script can be called: -# * `remove' -# * `purge' -# * `upgrade' -# * `failed-upgrade' -# * `abort-install' -# * `abort-install' -# * `abort-upgrade' -# * `disappear' -# -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - - -case "$1" in - upgrade|failed-upgrade|abort-upgrade) - systemctl daemon-reload - ;; - - purge|remove|abort-install|disappear) - systemctl daemon-reload - ;; - - *) - echo "postrm called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/tools/build/gaia/DEBIAN/preinst b/tools/build/gaia/DEBIAN/preinst deleted file mode 100644 index 382fa419f..000000000 --- a/tools/build/gaia/DEBIAN/preinst +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/sh -# preinst script for gaia -# - -set -e - -# summary of how this script can be called: -# * `install' -# * `install' -# * `upgrade' -# * `abort-upgrade' -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - - -case "$1" in - install|upgrade) - if ! grep -q '^gaia:' /etc/passwd ; then - useradd -k /dev/null -r -m -b /etc gaia - chmod 755 /etc/gaia - fi - ;; - - abort-upgrade) - ;; - - *) - echo "preinst called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/tools/build/gaia/DEBIAN/prerm b/tools/build/gaia/DEBIAN/prerm deleted file mode 100644 index 165c1ab6a..000000000 --- a/tools/build/gaia/DEBIAN/prerm +++ /dev/null @@ -1,38 +0,0 @@ -#!/bin/sh -# prerm script for gaia -# - -set -e - -# summary of how this script can be called: -# * `remove' -# * `upgrade' -# * `failed-upgrade' -# * `remove' `in-favour' -# * `deconfigure' `in-favour' -# `removing' -# -# for details, see https://www.debian.org/doc/debian-policy/ or -# the debian-policy package - - -case "$1" in - remove|upgrade|deconfigure) - systemctl stop gaia 2> /dev/null || : - ;; - - failed-upgrade) - ;; - - *) - echo "prerm called with unknown argument \`$1'" >&2 - exit 1 - ;; -esac - -# dh_installdeb will replace this with shell code automatically -# generated by other debhelper scripts. - -#DEBHELPER# - -exit 0 diff --git a/tools/build/gaia/etc/systemd/system-preset/50-gaia.preset b/tools/build/gaia/etc/systemd/system-preset/50-gaia.preset deleted file mode 100644 index dfbf0bc06..000000000 --- a/tools/build/gaia/etc/systemd/system-preset/50-gaia.preset +++ /dev/null @@ -1,2 +0,0 @@ -disable gaia.service - diff --git a/tools/build/gaia/etc/systemd/system/gaia.service b/tools/build/gaia/etc/systemd/system/gaia.service deleted file mode 100644 index 372fe9343..000000000 --- a/tools/build/gaia/etc/systemd/system/gaia.service +++ /dev/null @@ -1,17 +0,0 @@ -[Unit] -Description=Gaia -Requires=network-online.target -After=network-online.target - -[Service] -Restart=on-failure -User=gaia -Group=gaia -PermissionsStartOnly=true -ExecStart=/usr/bin/gaia node start --home=/etc/gaia -ExecReload=/bin/kill -HUP $MAINPID -KillSignal=SIGTERM - -[Install] -WantedBy=multi-user.target - diff --git a/tools/build/gaia/usr/share/gaia/key.json b/tools/build/gaia/usr/share/gaia/key.json deleted file mode 100644 index bdefe8fd4..000000000 --- a/tools/build/gaia/usr/share/gaia/key.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "address": "1B1BE55F969F54064628A63B9559E7C21C925165", - "priv_key": { - "type": "ed25519", - "data": "C70D6934B4F55F1B7BC33B56B9CA8A2061384AFC19E91E44B40C4BBA182953D1619D3678599971ED29C7529DDD4DA537B97129893598A17C82E3AC9A8BA95279" - }, - "pub_key": { - "type": "ed25519", - "data": "619D3678599971ED29C7529DDD4DA537B97129893598A17C82E3AC9A8BA95279" - } -} - diff --git a/tools/build/gaia/usr/share/gaia/key2.json b/tools/build/gaia/usr/share/gaia/key2.json deleted file mode 100644 index ddfc6809b..000000000 --- a/tools/build/gaia/usr/share/gaia/key2.json +++ /dev/null @@ -1,12 +0,0 @@ -{ - "address": "1DA7C74F9C219229FD54CC9F7386D5A3839F0090", - "priv_key": { - "type": "ed25519", - "data": "34BAE9E65CE8245FAD035A0E3EED9401BDE8785FFB3199ACCF8F5B5DDF7486A8352195DA90CB0B90C24295B90AEBA25A5A71BC61BAB2FE2387241D439698B7B8" - }, - "pub_key": { - "type": "ed25519", - "data": "352195DA90CB0B90C24295B90AEBA25A5A71BC61BAB2FE2387241D439698B7B8" - } -} - diff --git a/tools/build/generate-spec b/tools/build/generate-spec deleted file mode 100755 index 4ca60a1d4..000000000 --- a/tools/build/generate-spec +++ /dev/null @@ -1,36 +0,0 @@ -#!/bin/bash - -if [ $# -ne 3 ]; then - echo "Usage: $0 " - exit 1 -fi - -app=$1 -src=$2 -dst=$3 - -# Find spectemplate -if [ ! -f "$src/$app.spec" ]; then - if [ ! -f "$src/app-template.spec" ]; then - echo "Source template not found." - exit 1 - else - srcfile="$src/app-template.spec" - fi -else - srcfile="$src/$app.spec" -fi - -# Copy spectemplate to SPECS -cp "$srcfile" "$dst/$app.spec" - -# Apply any variables defined in .data -if [ -f "$src/$app.data" ]; then - srcdata="$src/$app.data" - source "$srcdata" - for var in `grep -v -e ^# -e ^\s*$ "$srcdata" | grep = | sed 's/\s*=.*$//'` - do - sed -i "s\\@${var}@\\${!var}\\g" "$dst/$app.spec" - done -fi - diff --git a/tools/build/sign b/tools/build/sign deleted file mode 100755 index 0371b5d4b..000000000 --- a/tools/build/sign +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/expect -f -set timeout 3 -set PACKAGE [lindex $argv 0] -set GPG_NAME [lindex $argv 1] -set GPG_PATH [lindex $argv 2] -set GPG_PASSPHRASE $env(GPG_PASSPHRASE) - -if {[llength $argv] == 0} { - send_user "Usage: ./sign \n" - exit 1 -} - -send_user "\nSigning $PACKAGE\n" -spawn rpmsign --resign $PACKAGE --define "_signature gpg" --define "_gpg_name $GPG_NAME" --define "_gpgbin $GPG_PATH" -expect { - timeout { send_user "\nTimeout signing $PACKAGE\n"; exit 1 } - "Enter pass phrase:" -} -send "$GPG_PASSPHRASE\r" -expect { - timeout { send_user "\nTimeout signing $PACKAGE\n"; exit 1 } - "Pass phrase is good." -} -interact -sleep 3 - diff --git a/tools/build/spectemplates/app-template.spec b/tools/build/spectemplates/app-template.spec deleted file mode 100644 index 6cb8145bb..000000000 --- a/tools/build/spectemplates/app-template.spec +++ /dev/null @@ -1,55 +0,0 @@ -Version: @VERSION@ -Release: @BUILD_NUMBER@ - -%define __spec_install_post %{nil} -%define debug_package %{nil} -%define __os_install_post %{nil} - -Name: @PACKAGE_NAME@ -Summary: @PACKAGE_SUMMARY@ -License: Apache 2.0 -URL: @PACKAGE_URL@ -Packager: Greg Szabo -@PACKAGE_ADDITIONAL_HEADER@ - -%description -@PACKAGE_DESCRIPTION@ - -%pre -if ! %{__grep} -q '^%{name}:' /etc/passwd ; then - useradd -r -b %{_sysconfdir} %{name} - mkdir -p %{_sysconfdir}/%{name} - chmod 755 %{_sysconfdir}/%{name} - chown %{name}.%{name} %{_sysconfdir}/%{name} -fi - -%prep -# Nothing to do here. - It is done in the Makefile. - -%build -# Nothing to do here. - -%install -cd %{name}-%{version}-%{release} -%{__cp} -a * %{buildroot} - -%post -sudo -Hu %{name} %{name} node init --home %{_sysconfdir}/%{name} 2B24DEE2364762300168DF19B6C18BCE2D399EA2 -systemctl daemon-reload - -%preun -systemctl stop %{name} 2> /dev/null || : - -%postun -systemctl daemon-reload - -%files -%ghost %attr(0755, %{name}, %{name}) %dir %{_sysconfdir}/%{name} -%{_bindir}/* -%{_sysconfdir}/systemd/system/* -%{_sysconfdir}/systemd/system-preset/* -%dir %{_datadir}/%{name} -%{_datadir}/%{name}/* -%dir %{_defaultlicensedir}/%{name} -%doc %{_defaultlicensedir}/%{name}/LICENSE - diff --git a/tools/build/spectemplates/basecoind.data b/tools/build/spectemplates/basecoind.data deleted file mode 100644 index 36b172ecf..000000000 --- a/tools/build/spectemplates/basecoind.data +++ /dev/null @@ -1,5 +0,0 @@ -PACKAGE_SUMMARY="basecoind is a Proof-of-Stake cryptocurrency and framework" -PACKAGE_URL="https://cosmos.network/" -PACKAGE_ADDITIONAL_HEADER="Provides: basecoind" -PACKAGE_DESCRIPTION="Basecoind is an ABCI application designed to be used with the Tendermint consensus engine to form a Proof-of-Stake cryptocurrency. It also provides a general purpose framework for extending the feature-set of the cryptocurrency by implementing plugins." - diff --git a/tools/build/spectemplates/ethermint.data b/tools/build/spectemplates/ethermint.data deleted file mode 100644 index e9d403db7..000000000 --- a/tools/build/spectemplates/ethermint.data +++ /dev/null @@ -1,5 +0,0 @@ -PACKAGE_SUMMARY="ethermint enables ethereum as an ABCI application on tendermint and the COSMOS hub" -PACKAGE_URL="https://tendermint.com/" -PACKAGE_ADDITIONAL_HEADER="Provides: ethermint" -PACKAGE_DESCRIPTION="Ethermint enables ethereum to run as an ABCI application on tendermint and the COSMOS hub. This application allows you to get all the benefits of ethereum without having to run your own miners." - diff --git a/tools/build/spectemplates/ethermint.spec b/tools/build/spectemplates/ethermint.spec deleted file mode 100644 index fc443e35b..000000000 --- a/tools/build/spectemplates/ethermint.spec +++ /dev/null @@ -1,60 +0,0 @@ -Version: @VERSION@ -Release: @BUILD_NUMBER@ - -%define __spec_install_post %{nil} -%define debug_package %{nil} -%define __os_install_post %{nil} - -Name: @PACKAGE_NAME@ -Summary: @PACKAGE_SUMMARY@ -License: Apache 2.0 -URL: @PACKAGE_URL@ -Packager: Greg Szabo -Requires: tendermint >= 0.11.0 -@PACKAGE_ADDITIONAL_HEADER@ - -%description -@PACKAGE_DESCRIPTION@ - -%pre -if ! %{__grep} -q '^%{name}:' /etc/passwd ; then - useradd -r -b %{_sysconfdir} %{name} - mkdir -p %{_sysconfdir}/%{name} - chmod 755 %{_sysconfdir}/%{name} - chown %{name}.%{name} %{_sysconfdir}/%{name} -fi - -%prep -# Nothing to do here. - It is done in the Makefile. - -%build -# Nothing to do here. - -%install -cd %{name}-%{version}-%{release} -%{__cp} -a * %{buildroot} - -%post -sudo -Hu %{name} tendermint init --home %{_sysconfdir}/%{name} -sudo -Hu %{name} %{name} --datadir %{_sysconfdir}/%{name} init %{_sysconfdir}/%{name}/genesis.json - -systemctl daemon-reload - -%preun -systemctl stop %{name} 2> /dev/null || : -systemctl stop %{name}-service 2> /dev/null || : - -%postun -systemctl daemon-reload - -%files -%attr(0755, %{name}, %{name}) %dir %{_sysconfdir}/%{name} -%config(noreplace) %attr(0644, %{name}, %{name}) %{_sysconfdir}/%{name}/genesis.json -%attr(0755, %{name}, %{name}) %dir %{_sysconfdir}/%{name}/keystore -%attr(0644, %{name}, %{name}) %{_sysconfdir}/%{name}/keystore/* -%{_bindir}/* -%{_sysconfdir}/systemd/system/* -%{_sysconfdir}/systemd/system-preset/* -%dir %{_defaultlicensedir}/%{name} -%doc %{_defaultlicensedir}/%{name}/LICENSE - diff --git a/tools/build/spectemplates/gaia.data b/tools/build/spectemplates/gaia.data deleted file mode 100644 index 7152b1b51..000000000 --- a/tools/build/spectemplates/gaia.data +++ /dev/null @@ -1,5 +0,0 @@ -PACKAGE_SUMMARY="gaia - Tendermint Cosmos delegation game chain" -PACKAGE_URL="https://cosmos.network/" -PACKAGE_ADDITIONAL_HEADER="" -PACKAGE_DESCRIPTION="Gaia description comes later." - diff --git a/tools/build/spectemplates/tendermint.spec b/tools/build/spectemplates/tendermint.spec deleted file mode 100644 index 68902a170..000000000 --- a/tools/build/spectemplates/tendermint.spec +++ /dev/null @@ -1,31 +0,0 @@ -Version: @VERSION@ -Release: @BUILD_NUMBER@ - -%define __spec_install_post %{nil} -%define debug_package %{nil} -%define __os_install_post %{nil} - -Name: tendermint -Summary: securely and consistently replicate an application on many machines -License: Apache 2.0 -URL: https://tendermint.com/ -Packager: Greg Szabo - -%description -Tendermint is software for securely and consistently replicating an application on many machines. By securely, we mean that Tendermint works even if up to 1/3 of machines fail in arbitrary ways. By consistently, we mean that every non-faulty machine sees the same transaction log and computes the same state. - -%prep -# Nothing to do here. - It is done in the Makefile. - -%build -# Nothing to do here. - -%install -cd %{name}-%{version}-%{release} -%{__cp} -a * %{buildroot} - -%files -%{_bindir}/tendermint -%dir %{_defaultlicensedir}/%{name} -%doc %{_defaultlicensedir}/%{name}/LICENSE - diff --git a/tools/build/tendermint.list b/tools/build/tendermint.list deleted file mode 100644 index bba521af5..000000000 --- a/tools/build/tendermint.list +++ /dev/null @@ -1 +0,0 @@ -deb http://tendermint-packages.s3-website-us-west-1.amazonaws.com/debian stable main diff --git a/tools/build/tendermint.repo b/tools/build/tendermint.repo deleted file mode 100644 index 439f98ecb..000000000 --- a/tools/build/tendermint.repo +++ /dev/null @@ -1,12 +0,0 @@ -#This is the .repo file for the Tendermint CentOS repositories. -#Although it has only been tested under CentOS 7, it should work under Fedora and RedHat 7 too. -#Currently only 64-bit packages are built. - -[tendermint] -name=Tendermint stable releases repository -baseurl=https://do9rmxapsag1v.cloudfront.net/centos/7/os/x86_64 -gpgcheck=1 -gpgkey=https://do9rmxapsag1v.cloudfront.net/centos/7/os/x86_64/RPM-GPG-KEY-Tendermint -enabled=1 -#sslverify = 1 - diff --git a/tools/build/tendermint/DEBIAN/changelog b/tools/build/tendermint/DEBIAN/changelog deleted file mode 100644 index 4b016f845..000000000 --- a/tools/build/tendermint/DEBIAN/changelog +++ /dev/null @@ -1,6 +0,0 @@ -tendermint (@VERSION@) @STABILITY@; urgency=medium - - * Automatic build. See https://github.com/tendermint/tendermint for more information. - - -- Greg Szabo @DATETIMESTAMP@ - diff --git a/tools/build/tendermint/DEBIAN/compat b/tools/build/tendermint/DEBIAN/compat deleted file mode 100644 index ec635144f..000000000 --- a/tools/build/tendermint/DEBIAN/compat +++ /dev/null @@ -1 +0,0 @@ -9 diff --git a/tools/build/tendermint/DEBIAN/control b/tools/build/tendermint/DEBIAN/control deleted file mode 100644 index d9da17dd1..000000000 --- a/tools/build/tendermint/DEBIAN/control +++ /dev/null @@ -1,14 +0,0 @@ -Source: tendermint -Section: net -Priority: optional -Maintainer: Greg Szabo -Build-Depends: debhelper (>=9) -Standards-Version: 3.9.6 -Homepage: https://tendermint.com -Package: tendermint -Architecture: amd64 -Version: @VERSION@ -Installed-Size: @INSTALLEDSIZE@ -Description: securely and consistently replicate an application on many machines - Tendermint is software for securely and consistently replicating an application on many machines. By securely, we mean that Tendermint works even if up to 1/3 of machines fail in arbitrary ways. By consistently, we mean that every non-faulty machine sees the same transaction log and computes the same state. - diff --git a/tools/build/tendermint/DEBIAN/copyright b/tools/build/tendermint/DEBIAN/copyright deleted file mode 100644 index 15ee960dd..000000000 --- a/tools/build/tendermint/DEBIAN/copyright +++ /dev/null @@ -1,21 +0,0 @@ -Format: https://www.debian.org/doc/packaging-manuals/copyright-format/1.0/ -Upstream-Name: tendermint -Source: https://github.com/tendermint/tendermint - -Files: * -Copyright: 2017 All In Bits, Inc. -License: Apache-2.0 - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - . - http://www.apache.org/licenses/LICENSE-2.0 - . - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - . - On Debian systems, the full text of the Apache License 2.0 can be found - in the file `/usr/share/common-licenses/Apache-2.0'. diff --git a/tools/tm-signer-harness/internal/test_harness.go b/tools/tm-signer-harness/internal/test_harness.go index c489a2fd4..f9d48fdcb 100644 --- a/tools/tm-signer-harness/internal/test_harness.go +++ b/tools/tm-signer-harness/internal/test_harness.go @@ -190,9 +190,17 @@ func (th *TestHarness) Run() { // local Tendermint version. func (th *TestHarness) TestPublicKey() error { th.logger.Info("TEST: Public key of remote signer") - th.logger.Info("Local", "pubKey", th.fpv.GetPubKey()) - th.logger.Info("Remote", "pubKey", th.signerClient.GetPubKey()) - if th.fpv.GetPubKey() != th.signerClient.GetPubKey() { + fpvk, err := th.fpv.GetPubKey() + if err != nil { + return err + } + th.logger.Info("Local", "pubKey", fpvk) + sck, err := th.signerClient.GetPubKey() + if err != nil { + return err + } + th.logger.Info("Remote", "pubKey", sck) + if fpvk != sck { th.logger.Error("FAILED: Local and remote public keys do not match") return newTestHarnessError(ErrTestPublicKeyFailed, nil, "") } @@ -230,8 +238,12 @@ func (th *TestHarness) TestSignProposal() error { th.logger.Error("FAILED: Signed proposal is invalid", "err", err) return newTestHarnessError(ErrTestSignProposalFailed, err, "") } + sck, err := th.signerClient.GetPubKey() + if err != nil { + return err + } // now validate the signature on the proposal - if th.signerClient.GetPubKey().VerifyBytes(propBytes, prop.Signature) { + if sck.VerifyBytes(propBytes, prop.Signature) { th.logger.Info("Successfully validated proposal signature") } else { th.logger.Error("FAILED: Proposal signature validation failed") @@ -274,8 +286,13 @@ func (th *TestHarness) TestSignVote() error { th.logger.Error("FAILED: Signed vote is invalid", "err", err) return newTestHarnessError(ErrTestSignVoteFailed, err, fmt.Sprintf("voteType=%d", voteType)) } + sck, err := th.signerClient.GetPubKey() + if err != nil { + return err + } + // now validate the signature on the proposal - if th.signerClient.GetPubKey().VerifyBytes(voteBytes, vote.Signature) { + if sck.VerifyBytes(voteBytes, vote.Signature) { th.logger.Info("Successfully validated vote signature", "type", voteType) } else { th.logger.Error("FAILED: Vote signature validation failed", "type", voteType) diff --git a/types/block.go b/types/block.go index b06730fb0..dfbbf461b 100644 --- a/types/block.go +++ b/types/block.go @@ -105,11 +105,11 @@ func (b *Block) ValidateBasic() error { // Basic validation of hashes related to application data. // Will validate fully against state in state#ValidateBlock. - if err := ValidateHash(b.ValidatorsHash); err != nil { - return fmt.Errorf("wrong Header.ValidatorsHash: %v", err) + if err := ValidateHash(b.VotersHash); err != nil { + return fmt.Errorf("wrong Header.VotersHash: %v", err) } - if err := ValidateHash(b.NextValidatorsHash); err != nil { - return fmt.Errorf("wrong Header.NextValidatorsHash: %v", err) + if err := ValidateHash(b.NextVotersHash); err != nil { + return fmt.Errorf("wrong Header.NextVotersHash: %v", err) } if err := ValidateHash(b.ConsensusHash); err != nil { return fmt.Errorf("wrong Header.ConsensusHash: %v", err) @@ -336,10 +336,10 @@ type Header struct { DataHash tmbytes.HexBytes `json:"data_hash"` // transactions // hashes from the app output from the prev block - ValidatorsHash tmbytes.HexBytes `json:"validators_hash"` // validators for the current block - NextValidatorsHash tmbytes.HexBytes `json:"next_validators_hash"` // validators for the next block - ConsensusHash tmbytes.HexBytes `json:"consensus_hash"` // consensus params for current block - AppHash tmbytes.HexBytes `json:"app_hash"` // state after txs from the previous block + VotersHash tmbytes.HexBytes `json:"voters_hash"` // voters for the current block + NextVotersHash tmbytes.HexBytes `json:"next_voters_hash"` // voters for the next block + ConsensusHash tmbytes.HexBytes `json:"consensus_hash"` // consensus params for current block + AppHash tmbytes.HexBytes `json:"app_hash"` // state after txs from the previous block // root hash of all results from the txs from the previous block LastResultsHash tmbytes.HexBytes `json:"last_results_hash"` @@ -357,7 +357,7 @@ type Header struct { func (h *Header) Populate( version version.Consensus, chainID string, timestamp time.Time, lastBlockID BlockID, - valHash, nextValHash []byte, + votersHash, nextVotersHash []byte, consensusHash, appHash, lastResultsHash []byte, proposerAddress Address, round int, @@ -367,8 +367,8 @@ func (h *Header) Populate( h.ChainID = chainID h.Time = timestamp h.LastBlockID = lastBlockID - h.ValidatorsHash = valHash - h.NextValidatorsHash = nextValHash + h.VotersHash = votersHash + h.NextVotersHash = nextVotersHash h.ConsensusHash = consensusHash h.AppHash = appHash h.LastResultsHash = lastResultsHash @@ -384,7 +384,7 @@ func (h *Header) Populate( // since a Header is not valid unless there is // a ValidatorsHash (corresponding to the validator set). func (h *Header) Hash() tmbytes.HexBytes { - if h == nil || len(h.ValidatorsHash) == 0 { + if h == nil || len(h.VotersHash) == 0 { return nil } return merkle.SimpleHashFromByteSlices([][]byte{ @@ -395,8 +395,8 @@ func (h *Header) Hash() tmbytes.HexBytes { cdcEncode(h.LastBlockID), cdcEncode(h.LastCommitHash), cdcEncode(h.DataHash), - cdcEncode(h.ValidatorsHash), - cdcEncode(h.NextValidatorsHash), + cdcEncode(h.VotersHash), + cdcEncode(h.NextVotersHash), cdcEncode(h.ConsensusHash), cdcEncode(h.AppHash), cdcEncode(h.LastResultsHash), @@ -438,8 +438,8 @@ func (h *Header) StringIndented(indent string) string { indent, h.LastBlockID, indent, h.LastCommitHash, indent, h.DataHash, - indent, h.ValidatorsHash, - indent, h.NextValidatorsHash, + indent, h.VotersHash, + indent, h.NextVotersHash, indent, h.AppHash, indent, h.ConsensusHash, indent, h.LastResultsHash, @@ -571,9 +571,9 @@ func (cs CommitSig) ValidateBasic() error { // NOTE: Commit is empty for height 1, but never nil. type Commit struct { // NOTE: The signatures are in order of address to preserve the bonded - // ValidatorSet order. + // VoterSet order. // Any peer with a block can gossip signatures by index with a peer without - // recalculating the active ValidatorSet. + // recalculating the active VoterSet. Height int64 `json:"height"` Round int `json:"round"` BlockID BlockID `json:"block_id"` @@ -599,8 +599,8 @@ func NewCommit(height int64, round int, blockID BlockID, commitSigs []CommitSig) // CommitToVoteSet constructs a VoteSet from the Commit and validator set. // Panics if signatures from the commit can't be added to the voteset. // Inverse of VoteSet.MakeCommit(). -func CommitToVoteSet(chainID string, commit *Commit, vals *ValidatorSet) *VoteSet { - voteSet := NewVoteSet(chainID, commit.Height, commit.Round, PrecommitType, vals) +func CommitToVoteSet(chainID string, commit *Commit, voters *VoterSet) *VoteSet { + voteSet := NewVoteSet(chainID, commit.Height, commit.Round, PrecommitType, voters) for idx, commitSig := range commit.Signatures { if commitSig.Absent() { continue // OK, some precommits can be missing. diff --git a/types/block_test.go b/types/block_test.go index 11725673a..4295a9de0 100644 --- a/types/block_test.go +++ b/types/block_test.go @@ -35,11 +35,11 @@ func TestBlockAddEvidence(t *testing.T) { lastID := makeBlockIDRandom() h := int64(3) - voteSet, valSet, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) + voteSet, _, valSet, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals, time.Now()) require.NoError(t, err) - ev := NewMockEvidence(h, time.Now(), 0, valSet.Validators[0].Address) + ev := NewMockEvidence(h, time.Now(), 0, valSet.Voters[0].Address) evList := []Evidence{ev} block := MakeBlock(h, txs, commit, evList) @@ -55,11 +55,11 @@ func TestBlockValidateBasic(t *testing.T) { lastID := makeBlockIDRandom() h := int64(3) - voteSet, valSet, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) + voteSet, valSet, voterSet, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals, time.Now()) require.NoError(t, err) - ev := NewMockEvidence(h, time.Now(), 0, valSet.Validators[0].Address) + ev := NewMockEvidence(h, time.Now(), 0, voterSet.Voters[0].Address) evList := []Evidence{ev} testCases := []struct { @@ -69,7 +69,7 @@ func TestBlockValidateBasic(t *testing.T) { }{ {"Make Block", func(blk *Block) {}, false}, {"Make Block w/ proposer Addr", func(blk *Block) { - blk.ProposerAddress = SelectProposer(valSet, []byte{}, blk.Height, 0).Address + blk.ProposerAddress = valSet.SelectProposer([]byte{}, blk.Height, 0).Address }, false}, {"Negative Height", func(blk *Block) { blk.Height = -1 }, true}, {"Remove 1/2 the commits", func(blk *Block) { @@ -93,7 +93,7 @@ func TestBlockValidateBasic(t *testing.T) { i := i t.Run(tc.testName, func(t *testing.T) { block := MakeBlock(h, txs, commit, evList) - block.ProposerAddress = SelectProposer(valSet, []byte{}, block.Height, 0).Address + block.ProposerAddress = valSet.SelectProposer([]byte{}, block.Height, 0).Address tc.malleateBlock(block) err = block.ValidateBasic() assert.Equal(t, tc.expErr, err != nil, "#%d: %v", i, err) @@ -120,11 +120,11 @@ func TestBlockMakePartSetWithEvidence(t *testing.T) { lastID := makeBlockIDRandom() h := int64(3) - voteSet, valSet, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) + voteSet, _, voterSet, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals, time.Now()) require.NoError(t, err) - ev := NewMockEvidence(h, time.Now(), 0, valSet.Validators[0].Address) + ev := NewMockEvidence(h, time.Now(), 0, voterSet.Voters[0].Address) evList := []Evidence{ev} partSet := MakeBlock(h, []Tx{Tx("Hello World")}, commit, evList).MakePartSet(512) @@ -137,15 +137,15 @@ func TestBlockHashesTo(t *testing.T) { lastID := makeBlockIDRandom() h := int64(3) - voteSet, valSet, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) + voteSet, _, voterSet, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals, time.Now()) require.NoError(t, err) - ev := NewMockEvidence(h, time.Now(), 0, valSet.Validators[0].Address) + ev := NewMockEvidence(h, time.Now(), 0, voterSet.Voters[0].Address) evList := []Evidence{ev} block := MakeBlock(h, []Tx{Tx("Hello World")}, commit, evList) - block.ValidatorsHash = valSet.Hash() + block.VotersHash = voterSet.Hash() assert.False(t, block.HashesTo([]byte{})) assert.False(t, block.HashesTo([]byte("something else"))) assert.True(t, block.HashesTo(block.Hash())) @@ -210,7 +210,7 @@ func TestNilDataHashDoesntCrash(t *testing.T) { func TestCommit(t *testing.T) { lastID := makeBlockIDRandom() h := int64(3) - voteSet, _, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) + voteSet, _, _, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals, time.Now()) require.NoError(t, err) @@ -256,39 +256,39 @@ func TestHeaderHash(t *testing.T) { expectHash bytes.HexBytes }{ {"Generates expected hash", &Header{ - Version: version.Consensus{Block: 1, App: 2}, - ChainID: "chainId", - Height: 3, - Time: time.Date(2019, 10, 13, 16, 14, 44, 0, time.UTC), - LastBlockID: makeBlockID(make([]byte, tmhash.Size), 6, make([]byte, tmhash.Size)), - LastCommitHash: tmhash.Sum([]byte("last_commit_hash")), - DataHash: tmhash.Sum([]byte("data_hash")), - ValidatorsHash: tmhash.Sum([]byte("validators_hash")), - NextValidatorsHash: tmhash.Sum([]byte("next_validators_hash")), - ConsensusHash: tmhash.Sum([]byte("consensus_hash")), - AppHash: tmhash.Sum([]byte("app_hash")), - LastResultsHash: tmhash.Sum([]byte("last_results_hash")), - EvidenceHash: tmhash.Sum([]byte("evidence_hash")), - ProposerAddress: crypto.AddressHash([]byte("proposer_address")), - Round: 1, - Proof: tmhash.Sum([]byte("proof")), - }, hexBytesFromString("A607E71253D996B2D75CC98AEC7FE6363598F6ED37A501B427DBD3A7781FBE15")}, + Version: version.Consensus{Block: 1, App: 2}, + ChainID: "chainId", + Height: 3, + Time: time.Date(2019, 10, 13, 16, 14, 44, 0, time.UTC), + LastBlockID: makeBlockID(make([]byte, tmhash.Size), 6, make([]byte, tmhash.Size)), + LastCommitHash: tmhash.Sum([]byte("last_commit_hash")), + DataHash: tmhash.Sum([]byte("data_hash")), + VotersHash: tmhash.Sum([]byte("voters_hash")), + NextVotersHash: tmhash.Sum([]byte("next_voters_hash")), + ConsensusHash: tmhash.Sum([]byte("consensus_hash")), + AppHash: tmhash.Sum([]byte("app_hash")), + LastResultsHash: tmhash.Sum([]byte("last_results_hash")), + EvidenceHash: tmhash.Sum([]byte("evidence_hash")), + ProposerAddress: crypto.AddressHash([]byte("proposer_address")), + Round: 1, + Proof: tmhash.Sum([]byte("proof")), + }, hexBytesFromString("0ECEA9AA5613ECD1673C223FA92A4651727C3DD7AF61E2C5FA979EEDBCC05F37")}, {"nil header yields nil", nil, nil}, - {"nil ValidatorsHash yields nil", &Header{ - Version: version.Consensus{Block: 1, App: 2}, - ChainID: "chainId", - Height: 3, - Time: time.Date(2019, 10, 13, 16, 14, 44, 0, time.UTC), - LastBlockID: makeBlockID(make([]byte, tmhash.Size), 6, make([]byte, tmhash.Size)), - LastCommitHash: tmhash.Sum([]byte("last_commit_hash")), - DataHash: tmhash.Sum([]byte("data_hash")), - ValidatorsHash: nil, - NextValidatorsHash: tmhash.Sum([]byte("next_validators_hash")), - ConsensusHash: tmhash.Sum([]byte("consensus_hash")), - AppHash: tmhash.Sum([]byte("app_hash")), - LastResultsHash: tmhash.Sum([]byte("last_results_hash")), - EvidenceHash: tmhash.Sum([]byte("evidence_hash")), - ProposerAddress: crypto.AddressHash([]byte("proposer_address")), + {"nil VotersHash yields nil", &Header{ + Version: version.Consensus{Block: 1, App: 2}, + ChainID: "chainId", + Height: 3, + Time: time.Date(2019, 10, 13, 16, 14, 44, 0, time.UTC), + LastBlockID: makeBlockID(make([]byte, tmhash.Size), 6, make([]byte, tmhash.Size)), + LastCommitHash: tmhash.Sum([]byte("last_commit_hash")), + DataHash: tmhash.Sum([]byte("data_hash")), + VotersHash: nil, + NextVotersHash: tmhash.Sum([]byte("next_voters_hash")), + ConsensusHash: tmhash.Sum([]byte("consensus_hash")), + AppHash: tmhash.Sum([]byte("app_hash")), + LastResultsHash: tmhash.Sum([]byte("last_results_hash")), + EvidenceHash: tmhash.Sum([]byte("evidence_hash")), + ProposerAddress: crypto.AddressHash([]byte("proposer_address")), }, nil}, } for _, tc := range testCases { @@ -329,20 +329,20 @@ func TestMaxHeaderBytes(t *testing.T) { timestamp := time.Date(math.MaxInt64, 0, 0, 0, 0, 0, math.MaxInt64, time.UTC) h := Header{ - Version: version.Consensus{Block: math.MaxInt64, App: math.MaxInt64}, - ChainID: maxChainID, - Height: math.MaxInt64, - Time: timestamp, - LastBlockID: makeBlockID(make([]byte, tmhash.Size), math.MaxInt64, make([]byte, tmhash.Size)), - LastCommitHash: tmhash.Sum([]byte("last_commit_hash")), - DataHash: tmhash.Sum([]byte("data_hash")), - ValidatorsHash: tmhash.Sum([]byte("validators_hash")), - NextValidatorsHash: tmhash.Sum([]byte("next_validators_hash")), - ConsensusHash: tmhash.Sum([]byte("consensus_hash")), - AppHash: tmhash.Sum([]byte("app_hash")), - LastResultsHash: tmhash.Sum([]byte("last_results_hash")), - EvidenceHash: tmhash.Sum([]byte("evidence_hash")), - ProposerAddress: crypto.AddressHash([]byte("proposer_address")), + Version: version.Consensus{Block: math.MaxInt64, App: math.MaxInt64}, + ChainID: maxChainID, + Height: math.MaxInt64, + Time: timestamp, + LastBlockID: makeBlockID(make([]byte, tmhash.Size), math.MaxInt64, make([]byte, tmhash.Size)), + LastCommitHash: tmhash.Sum([]byte("last_commit_hash")), + DataHash: tmhash.Sum([]byte("data_hash")), + VotersHash: tmhash.Sum([]byte("voters_hash")), + NextVotersHash: tmhash.Sum([]byte("next_voters_hash")), + ConsensusHash: tmhash.Sum([]byte("consensus_hash")), + AppHash: tmhash.Sum([]byte("app_hash")), + LastResultsHash: tmhash.Sum([]byte("last_results_hash")), + EvidenceHash: tmhash.Sum([]byte("evidence_hash")), + ProposerAddress: crypto.AddressHash([]byte("proposer_address")), } bz, err := cdc.MarshalBinaryLengthPrefixed(h) @@ -354,7 +354,7 @@ func TestMaxHeaderBytes(t *testing.T) { func randCommit(now time.Time) *Commit { lastID := makeBlockIDRandom() h := int64(3) - voteSet, _, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) + voteSet, _, _, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals, now) if err != nil { panic(err) @@ -433,7 +433,7 @@ func TestCommitToVoteSet(t *testing.T) { lastID := makeBlockIDRandom() h := int64(3) - voteSet, valSet, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) + voteSet, _, valSet, vals := randVoteSet(h-1, 1, PrecommitType, 10, 1) commit, err := MakeCommit(lastID, h-1, 1, voteSet, vals, time.Now()) assert.NoError(t, err) @@ -473,14 +473,15 @@ func TestCommitToVoteSetWithVotesForNilBlock(t *testing.T) { } for _, tc := range testCases { - voteSet, valSet, vals := randVoteSet(height-1, round, PrecommitType, tc.numValidators, 1) + voteSet, _, valSet, vals := randVoteSet(height-1, round, PrecommitType, tc.numValidators, 1) vi := 0 for n := range tc.blockIDs { for i := 0; i < tc.numVotes[n]; i++ { - addr := vals[vi].GetPubKey().Address() + pubKey, err := vals[vi].GetPubKey() + require.NoError(t, err) vote := &Vote{ - ValidatorAddress: addr, + ValidatorAddress: pubKey.Address(), ValidatorIndex: vi, Height: height - 1, Round: round, @@ -513,20 +514,20 @@ func TestSignedHeaderValidateBasic(t *testing.T) { chainID := "𠜎" timestamp := time.Date(math.MaxInt64, 0, 0, 0, 0, 0, math.MaxInt64, time.UTC) h := Header{ - Version: version.Consensus{Block: math.MaxInt64, App: math.MaxInt64}, - ChainID: chainID, - Height: commit.Height, - Time: timestamp, - LastBlockID: commit.BlockID, - LastCommitHash: commit.Hash(), - DataHash: commit.Hash(), - ValidatorsHash: commit.Hash(), - NextValidatorsHash: commit.Hash(), - ConsensusHash: commit.Hash(), - AppHash: commit.Hash(), - LastResultsHash: commit.Hash(), - EvidenceHash: commit.Hash(), - ProposerAddress: crypto.AddressHash([]byte("proposer_address")), + Version: version.Consensus{Block: math.MaxInt64, App: math.MaxInt64}, + ChainID: chainID, + Height: commit.Height, + Time: timestamp, + LastBlockID: commit.BlockID, + LastCommitHash: commit.Hash(), + DataHash: commit.Hash(), + VotersHash: commit.Hash(), + NextVotersHash: commit.Hash(), + ConsensusHash: commit.Hash(), + AppHash: commit.Hash(), + LastResultsHash: commit.Hash(), + EvidenceHash: commit.Hash(), + ProposerAddress: crypto.AddressHash([]byte("proposer_address")), } validSignedHeader := SignedHeader{Header: &h, Commit: commit} diff --git a/types/codec.go b/types/codec.go index d77f2b29d..b4989d267 100644 --- a/types/codec.go +++ b/types/codec.go @@ -2,6 +2,7 @@ package types import ( amino "github.com/tendermint/go-amino" + cryptoamino "github.com/tendermint/tendermint/crypto/encoding/amino" ) diff --git a/types/events.go b/types/events.go index fb80db0f0..c257ba328 100644 --- a/types/events.go +++ b/types/events.go @@ -4,6 +4,7 @@ import ( "fmt" amino "github.com/tendermint/go-amino" + abci "github.com/tendermint/tendermint/abci/types" tmpubsub "github.com/tendermint/tendermint/libs/pubsub" tmquery "github.com/tendermint/tendermint/libs/pubsub/query" diff --git a/types/evidence.go b/types/evidence.go index 199a01c70..244244f9e 100644 --- a/types/evidence.go +++ b/types/evidence.go @@ -7,6 +7,7 @@ import ( "time" "github.com/pkg/errors" + "github.com/tendermint/tendermint/crypto/tmhash" amino "github.com/tendermint/go-amino" @@ -59,7 +60,7 @@ type Evidence interface { Height() int64 // height of the equivocation Time() time.Time // time of the equivocation Address() []byte // address of the equivocating validator - Bytes() []byte // bytes which compromise the evidence + Bytes() []byte // bytes which comprise the evidence Hash() []byte // hash of the evidence Verify(chainID string, pubKey crypto.PubKey) error // verify the evidence Equal(Evidence) bool // check equality of evidence diff --git a/types/evidence_test.go b/types/evidence_test.go index bb04d9a4b..40e096fcd 100644 --- a/types/evidence_test.go +++ b/types/evidence_test.go @@ -7,6 +7,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto/secp256k1" "github.com/tendermint/tendermint/crypto/tmhash" ) @@ -17,17 +18,20 @@ type voteData struct { valid bool } -func makeVote(val PrivValidator, chainID string, valIndex int, height int64, round, step int, blockID BlockID) *Vote { - addr := val.GetPubKey().Address() +func makeVote( + t *testing.T, val PrivValidator, chainID string, valIndex int, height int64, round, step int, blockID BlockID, +) *Vote { + pubKey, err := val.GetPubKey() + require.NoError(t, err) v := &Vote{ - ValidatorAddress: addr, + ValidatorAddress: pubKey.Address(), ValidatorIndex: valIndex, Height: height, Round: round, Type: SignedMsgType(step), BlockID: blockID, } - err := val.SignVote(chainID, v) + err = val.SignVote(chainID, v) if err != nil { panic(err) } @@ -45,28 +49,27 @@ func TestEvidence(t *testing.T) { const chainID = "mychain" - vote1 := makeVote(val, chainID, 0, 10, 2, 1, blockID) - badVote := makeVote(val, chainID, 0, 10, 2, 1, blockID) + vote1 := makeVote(t, val, chainID, 0, 10, 2, 1, blockID) + badVote := makeVote(t, val, chainID, 0, 10, 2, 1, blockID) err := val2.SignVote(chainID, badVote) - if err != nil { - panic(err) - } + assert.NoError(t, err) cases := []voteData{ - {vote1, makeVote(val, chainID, 0, 10, 2, 1, blockID2), true}, // different block ids - {vote1, makeVote(val, chainID, 0, 10, 2, 1, blockID3), true}, - {vote1, makeVote(val, chainID, 0, 10, 2, 1, blockID4), true}, - {vote1, makeVote(val, chainID, 0, 10, 2, 1, blockID), false}, // wrong block id - {vote1, makeVote(val, "mychain2", 0, 10, 2, 1, blockID2), false}, // wrong chain id - {vote1, makeVote(val, chainID, 1, 10, 2, 1, blockID2), false}, // wrong val index - {vote1, makeVote(val, chainID, 0, 11, 2, 1, blockID2), false}, // wrong height - {vote1, makeVote(val, chainID, 0, 10, 3, 1, blockID2), false}, // wrong round - {vote1, makeVote(val, chainID, 0, 10, 2, 2, blockID2), false}, // wrong step - {vote1, makeVote(val2, chainID, 0, 10, 2, 1, blockID), false}, // wrong validator + {vote1, makeVote(t, val, chainID, 0, 10, 2, 1, blockID2), true}, // different block ids + {vote1, makeVote(t, val, chainID, 0, 10, 2, 1, blockID3), true}, + {vote1, makeVote(t, val, chainID, 0, 10, 2, 1, blockID4), true}, + {vote1, makeVote(t, val, chainID, 0, 10, 2, 1, blockID), false}, // wrong block id + {vote1, makeVote(t, val, "mychain2", 0, 10, 2, 1, blockID2), false}, // wrong chain id + {vote1, makeVote(t, val, chainID, 1, 10, 2, 1, blockID2), false}, // wrong val index + {vote1, makeVote(t, val, chainID, 0, 11, 2, 1, blockID2), false}, // wrong height + {vote1, makeVote(t, val, chainID, 0, 10, 3, 1, blockID2), false}, // wrong round + {vote1, makeVote(t, val, chainID, 0, 10, 2, 2, blockID2), false}, // wrong step + {vote1, makeVote(t, val2, chainID, 0, 10, 2, 1, blockID), false}, // wrong validator {vote1, badVote, false}, // signed by wrong key } - pubKey := val.GetPubKey() + pubKey, err := val.GetPubKey() + require.NoError(t, err) for _, c := range cases { ev := &DuplicateVoteEvidence{ VoteA: c.vote1, @@ -81,14 +84,14 @@ func TestEvidence(t *testing.T) { } func TestDuplicatedVoteEvidence(t *testing.T) { - ev := randomDuplicatedVoteEvidence() + ev := randomDuplicatedVoteEvidence(t) assert.True(t, ev.Equal(ev)) assert.False(t, ev.Equal(&DuplicateVoteEvidence{})) } func TestEvidenceList(t *testing.T) { - ev := randomDuplicatedVoteEvidence() + ev := randomDuplicatedVoteEvidence(t) evl := EvidenceList([]Evidence{ev}) assert.NotNil(t, evl.Hash()) @@ -103,8 +106,8 @@ func TestMaxEvidenceBytes(t *testing.T) { const chainID = "mychain" ev := &DuplicateVoteEvidence{ PubKey: secp256k1.GenPrivKey().PubKey(), // use secp because it's pubkey is longer - VoteA: makeVote(val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, math.MaxInt64, blockID), - VoteB: makeVote(val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, math.MaxInt64, blockID2), + VoteA: makeVote(t, val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, math.MaxInt64, blockID), + VoteB: makeVote(t, val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, math.MaxInt64, blockID2), } bz, err := cdc.MarshalBinaryLengthPrefixed(ev) @@ -113,14 +116,14 @@ func TestMaxEvidenceBytes(t *testing.T) { assert.EqualValues(t, MaxEvidenceBytes, len(bz)) } -func randomDuplicatedVoteEvidence() *DuplicateVoteEvidence { +func randomDuplicatedVoteEvidence(t *testing.T) *DuplicateVoteEvidence { val := NewMockPV() blockID := makeBlockID([]byte("blockhash"), 1000, []byte("partshash")) blockID2 := makeBlockID([]byte("blockhash2"), 1000, []byte("partshash")) const chainID = "mychain" return &DuplicateVoteEvidence{ - VoteA: makeVote(val, chainID, 0, 10, 2, 1, blockID), - VoteB: makeVote(val, chainID, 0, 10, 2, 1, blockID2), + VoteA: makeVote(t, val, chainID, 0, 10, 2, 1, blockID), + VoteB: makeVote(t, val, chainID, 0, 10, 2, 1, blockID2), } } @@ -143,7 +146,7 @@ func TestDuplicateVoteEvidenceValidation(t *testing.T) { ev.VoteB = nil }, true}, {"Invalid vote type", func(ev *DuplicateVoteEvidence) { - ev.VoteA = makeVote(val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, 0, blockID2) + ev.VoteA = makeVote(t, val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, 0, blockID2) }, true}, {"Invalid vote order", func(ev *DuplicateVoteEvidence) { swap := ev.VoteA.Copy() @@ -155,8 +158,8 @@ func TestDuplicateVoteEvidenceValidation(t *testing.T) { tc := tc t.Run(tc.testName, func(t *testing.T) { pk := secp256k1.GenPrivKey().PubKey() - vote1 := makeVote(val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, 0x02, blockID) - vote2 := makeVote(val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, 0x02, blockID2) + vote1 := makeVote(t, val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, 0x02, blockID) + vote2 := makeVote(t, val, chainID, math.MaxInt64, math.MaxInt64, math.MaxInt64, 0x02, blockID2) ev := NewDuplicateVoteEvidence(pk, vote1, vote2) tc.malleateEvidence(ev) assert.Equal(t, tc.expectErr, ev.ValidateBasic() != nil, "Validate Basic had an unexpected result") diff --git a/types/genesis.go b/types/genesis.go index 7ca328c9a..24ec17c48 100644 --- a/types/genesis.go +++ b/types/genesis.go @@ -34,12 +34,22 @@ type GenesisValidator struct { Name string `json:"name"` } +type VoterParams struct { + VoterElectionThreshold int `json:"voter_election_threshold"` + MaxTolerableByzantinePercentage int `json:"max_tolerable_byzantine_percentage"` + + // As a unit of precision, if it is 1, it is 0.9, and if it is 2, it is 0.99. + // The default is 5, with a precision of 0.99999. + ElectionPrecision int `json:"election_precision"` +} + // GenesisDoc defines the initial conditions for a tendermint blockchain, in particular its validator set. type GenesisDoc struct { GenesisTime time.Time `json:"genesis_time"` ChainID string `json:"chain_id"` ConsensusParams *ConsensusParams `json:"consensus_params,omitempty"` Validators []GenesisValidator `json:"validators,omitempty"` + VoterParams *VoterParams `json:"voter_params,omitempty"` AppHash tmbytes.HexBytes `json:"app_hash"` AppState json.RawMessage `json:"app_state,omitempty"` } @@ -79,6 +89,12 @@ func (genDoc *GenesisDoc) ValidateAndComplete() error { return err } + if genDoc.VoterParams == nil { + genDoc.VoterParams = DefaultVoterParams() + } else if err := genDoc.VoterParams.Validate(); err != nil { + return err + } + for i, v := range genDoc.Validators { if v.Power == 0 { return errors.Errorf("the genesis file cannot contain validators with no voting power: %v", v) diff --git a/types/genesis_test.go b/types/genesis_test.go index ee713a6e7..16961ea7b 100644 --- a/types/genesis_test.go +++ b/types/genesis_test.go @@ -47,6 +47,15 @@ func TestGenesisBad(t *testing.T) { `},"power":"10","name":""}` + `]}`, ), + // missing some params in voter_params + []byte( + `{"chain_id":"mychain", "validators":[` + + `{"pub_key":{` + + `"type":"tendermint/PubKeyEd25519","value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="` + + `},"power":"10","name":""}], ` + + `"voter_params":{"voter_election_threshold":"1"}` + + `}`, + ), } for _, testCase := range testCases { @@ -62,7 +71,7 @@ func TestGenesisGood(t *testing.T) { `{"pub_key":{` + `"type":"tendermint/PubKeyEd25519","value":"AT/+aaL1eB0477Mud9JMm8Sh8BIvOYlPGC9KkIUmFaE="` + `},"power":"10","name":""}` + - `],"app_hash":"","app_state":{"account_owner": "Bob"}}`, + `],"voter_params":null, "app_hash":"","app_state":{"account_owner": "Bob"}}`, ) _, err := GenesisDocFromJSON(genDocBytes) assert.NoError(t, err, "expected no error for good genDoc json") diff --git a/types/params.go b/types/params.go index 538bbbd6d..1ac1aa34e 100644 --- a/types/params.go +++ b/types/params.go @@ -19,6 +19,10 @@ const ( // MaxBlockPartsCount is the maximum number of block parts. MaxBlockPartsCount = (MaxBlockSizeBytes / BlockPartSizeBytes) + 1 + + DefaultVoterElectionThreshold = 33 + DefaultMaxTolerableByzantinePercentage = 20 + DefaultElectionPrecision = 5 // 5 is 0.99999 ) // ConsensusParams contains consensus critical parameters that determine the @@ -68,6 +72,29 @@ func DefaultConsensusParams() *ConsensusParams { } } +// DefaultVoterParams returns a default VoterParams. +func DefaultVoterParams() *VoterParams { + return &VoterParams{ + VoterElectionThreshold: DefaultVoterElectionThreshold, + MaxTolerableByzantinePercentage: DefaultMaxTolerableByzantinePercentage, + ElectionPrecision: DefaultElectionPrecision} +} + +func (params *VoterParams) Validate() error { + if params.VoterElectionThreshold < 0 { + return errors.Errorf("VoterElectionThreshold must be greater than or equal to 0. Got %d", + params.VoterElectionThreshold) + } + if params.MaxTolerableByzantinePercentage <= 0 || params.MaxTolerableByzantinePercentage >= 34 { + return errors.Errorf("MaxTolerableByzantinePercentage must be in between 1 and 33. Got %d", + params.MaxTolerableByzantinePercentage) + } + if params.ElectionPrecision <= 1 || params.ElectionPrecision > 15 { + return errors.Errorf("ElectionPrecision must be in 2~15(including). Got %d", params.ElectionPrecision) + } + return nil +} + // DefaultBlockParams returns a default BlockParams. func DefaultBlockParams() BlockParams { return BlockParams{ diff --git a/types/params_test.go b/types/params_test.go index 719d51da0..9fb11fa6a 100644 --- a/types/params_test.go +++ b/types/params_test.go @@ -7,6 +7,7 @@ import ( "time" "github.com/stretchr/testify/assert" + abci "github.com/tendermint/tendermint/abci/types" ) @@ -131,3 +132,61 @@ func TestConsensusParamsUpdate(t *testing.T) { assert.Equal(t, tc.updatedParams, tc.params.Update(tc.updates)) } } + +func TestVoterParamsValidate(t *testing.T) { + errorCases := []VoterParams{ + { + VoterElectionThreshold: -1, + MaxTolerableByzantinePercentage: 1, + ElectionPrecision: 2, + }, + { + VoterElectionThreshold: 0, + MaxTolerableByzantinePercentage: 0, + ElectionPrecision: 2, + }, + { + VoterElectionThreshold: 0, + MaxTolerableByzantinePercentage: 34, + ElectionPrecision: 2, + }, + { + VoterElectionThreshold: 0, + MaxTolerableByzantinePercentage: 33, + ElectionPrecision: 1, + }, + { + VoterElectionThreshold: 0, + MaxTolerableByzantinePercentage: 33, + ElectionPrecision: 17, + }, + } + normalCases := []VoterParams{ + { + VoterElectionThreshold: 0, + MaxTolerableByzantinePercentage: 1, + ElectionPrecision: 2, + }, + { + VoterElectionThreshold: 99999999, + MaxTolerableByzantinePercentage: 1, + ElectionPrecision: 2, + }, + { + VoterElectionThreshold: 0, + MaxTolerableByzantinePercentage: 33, + ElectionPrecision: 2, + }, + { + VoterElectionThreshold: 0, + MaxTolerableByzantinePercentage: 1, + ElectionPrecision: 15, + }, + } + for _, tc := range errorCases { + assert.Error(t, tc.Validate()) + } + for _, tc := range normalCases { + assert.NoError(t, tc.Validate()) + } +} diff --git a/types/priv_validator.go b/types/priv_validator.go index d1593a744..71ac31cb8 100644 --- a/types/priv_validator.go +++ b/types/priv_validator.go @@ -13,8 +13,7 @@ import ( // PrivValidator defines the functionality of a local Tendermint validator // that signs votes and proposals, and never double signs. type PrivValidator interface { - // TODO: Extend the interface to return errors too. Issue: https://github.com/tendermint/tendermint/issues/3602 - GetPubKey() crypto.PubKey + GetPubKey() (crypto.PubKey, error) SignVote(chainID string, vote *Vote) error SignProposal(chainID string, proposal *Proposal) error @@ -32,7 +31,16 @@ func (pvs PrivValidatorsByAddress) Len() int { } func (pvs PrivValidatorsByAddress) Less(i, j int) bool { - return bytes.Compare(pvs[i].GetPubKey().Address(), pvs[j].GetPubKey().Address()) == -1 + pvi, err := pvs[i].GetPubKey() + if err != nil { + panic(err) + } + pvj, err := pvs[j].GetPubKey() + if err != nil { + panic(err) + } + + return bytes.Compare(pvi.Address(), pvj.Address()) == -1 } func (pvs PrivValidatorsByAddress) Swap(i, j int) { @@ -64,8 +72,8 @@ func NewMockPVWithParams(privKey crypto.PrivKey, breakProposalSigning, breakVote } // Implements PrivValidator. -func (pv MockPV) GetPubKey() crypto.PubKey { - return pv.PrivKey.PubKey() +func (pv MockPV) GetPubKey() (crypto.PubKey, error) { + return pv.PrivKey.PubKey(), nil } // Implements PrivValidator. @@ -109,8 +117,8 @@ func (pv MockPV) GenerateVRFProof(message []byte) (vrf.Proof, error) { // String returns a string representation of the MockPV. func (pv MockPV) String() string { - addr := pv.GetPubKey().Address() - return fmt.Sprintf("MockPV{%v}", addr) + mpv, _ := pv.GetPubKey() // mockPV will never return an error, ignored here + return fmt.Sprintf("MockPV{%v}", mpv.Address()) } // XXX: Implement. diff --git a/types/proposal_test.go b/types/proposal_test.go index 1b30a7286..e4ea19183 100644 --- a/types/proposal_test.go +++ b/types/proposal_test.go @@ -7,6 +7,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto/tmhash" ) @@ -45,7 +46,8 @@ func TestProposalString(t *testing.T) { func TestProposalVerifySignature(t *testing.T) { privVal := NewMockPV() - pubKey := privVal.GetPubKey() + pubKey, err := privVal.GetPubKey() + require.NoError(t, err) prop := NewProposal( 4, 2, 2, @@ -53,7 +55,7 @@ func TestProposalVerifySignature(t *testing.T) { signBytes := prop.SignBytes("test_chain_id") // sign it - err := privVal.SignProposal("test_chain_id", prop) + err = privVal.SignProposal("test_chain_id", prop) require.NoError(t, err) // verify the same proposal @@ -93,8 +95,9 @@ func BenchmarkProposalSign(b *testing.B) { func BenchmarkProposalVerifySignature(b *testing.B) { privVal := NewMockPV() err := privVal.SignProposal("test_chain_id", testProposal) - require.Nil(b, err) - pubKey := privVal.GetPubKey() + require.NoError(b, err) + pubKey, err := privVal.GetPubKey() + require.NoError(b, err) for i := 0; i < b.N; i++ { pubKey.VerifyBytes(testProposal.SignBytes("test_chain_id"), testProposal.Signature) diff --git a/types/proto3/block.pb.go b/types/proto3/block.pb.go index af3d5faf5..58da0ce9f 100644 --- a/types/proto3/block.pb.go +++ b/types/proto3/block.pb.go @@ -124,11 +124,11 @@ type Header struct { LastCommitHash []byte `protobuf:"bytes,6,opt,name=LastCommitHash,proto3" json:"LastCommitHash,omitempty"` DataHash []byte `protobuf:"bytes,7,opt,name=DataHash,proto3" json:"DataHash,omitempty"` // hashes from the app output from the prev block - ValidatorsHash []byte `protobuf:"bytes,8,opt,name=ValidatorsHash,proto3" json:"ValidatorsHash,omitempty"` - NextValidatorsHash []byte `protobuf:"bytes,9,opt,name=NextValidatorsHash,proto3" json:"NextValidatorsHash,omitempty"` - ConsensusHash []byte `protobuf:"bytes,10,opt,name=ConsensusHash,proto3" json:"ConsensusHash,omitempty"` - AppHash []byte `protobuf:"bytes,11,opt,name=AppHash,proto3" json:"AppHash,omitempty"` - LastResultsHash []byte `protobuf:"bytes,12,opt,name=LastResultsHash,proto3" json:"LastResultsHash,omitempty"` + VotersHash []byte `protobuf:"bytes,8,opt,name=VotersHash,proto3" json:"VotersHash,omitempty"` + NextVotersHash []byte `protobuf:"bytes,9,opt,name=NextVotersHash,proto3" json:"NextVotersHash,omitempty"` + ConsensusHash []byte `protobuf:"bytes,10,opt,name=ConsensusHash,proto3" json:"ConsensusHash,omitempty"` + AppHash []byte `protobuf:"bytes,11,opt,name=AppHash,proto3" json:"AppHash,omitempty"` + LastResultsHash []byte `protobuf:"bytes,12,opt,name=LastResultsHash,proto3" json:"LastResultsHash,omitempty"` // consensus info EvidenceHash []byte `protobuf:"bytes,13,opt,name=EvidenceHash,proto3" json:"EvidenceHash,omitempty"` ProposerAddress []byte `protobuf:"bytes,14,opt,name=ProposerAddress,proto3" json:"ProposerAddress,omitempty"` @@ -210,16 +210,16 @@ func (m *Header) GetDataHash() []byte { return nil } -func (m *Header) GetValidatorsHash() []byte { +func (m *Header) GetVotersHash() []byte { if m != nil { - return m.ValidatorsHash + return m.VotersHash } return nil } -func (m *Header) GetNextValidatorsHash() []byte { +func (m *Header) GetNextVotersHash() []byte { if m != nil { - return m.NextValidatorsHash + return m.NextVotersHash } return nil } @@ -367,35 +367,34 @@ func init() { func init() { proto.RegisterFile("types/proto3/block.proto", fileDescriptor_760f4d5ceb2a11f0) } var fileDescriptor_760f4d5ceb2a11f0 = []byte{ - // 468 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x93, 0xdf, 0x8b, 0x13, 0x31, - 0x10, 0xc7, 0x59, 0xbb, 0x6d, 0xaf, 0xb3, 0xed, 0x29, 0x83, 0xe8, 0xe2, 0x53, 0x59, 0xe4, 0xe8, - 0x8b, 0x5b, 0xbc, 0x03, 0x41, 0x7d, 0xea, 0x0f, 0xa1, 0x07, 0x22, 0x47, 0x3c, 0xee, 0xc1, 0xb7, - 0xb4, 0x1b, 0xda, 0x60, 0x37, 0x59, 0x92, 0x54, 0xf4, 0x1f, 0xf4, 0xef, 0x92, 0x4c, 0xb6, 0xbd, - 0x6e, 0xb1, 0xdc, 0x53, 0xf3, 0x9d, 0xf9, 0xcc, 0x37, 0xb3, 0x93, 0x29, 0xa4, 0xee, 0x4f, 0x25, - 0xec, 0xb8, 0x32, 0xda, 0xe9, 0x9b, 0xf1, 0x72, 0xab, 0x57, 0x3f, 0x73, 0x12, 0xf8, 0xda, 0x09, - 0x55, 0x08, 0x53, 0x4a, 0xe5, 0x72, 0x82, 0x42, 0xfc, 0x26, 0xfb, 0x08, 0x83, 0x3b, 0x6e, 0xdc, - 0x77, 0xe1, 0x16, 0x82, 0x17, 0xc2, 0xe0, 0x4b, 0x68, 0xdf, 0x6b, 0xc7, 0xb7, 0x69, 0x34, 0x8c, - 0x46, 0x6d, 0x16, 0x04, 0x22, 0xc4, 0x0b, 0x6e, 0x37, 0xe9, 0xb3, 0x61, 0x34, 0xea, 0x33, 0x3a, - 0x67, 0x6b, 0xe8, 0x4e, 0xfd, 0x15, 0xb7, 0xf3, 0x43, 0x3a, 0x7a, 0x4c, 0xe3, 0x02, 0x12, 0xef, - 0x6c, 0x83, 0x2f, 0x55, 0x26, 0xd7, 0x57, 0xf9, 0x99, 0x46, 0xf2, 0x46, 0x17, 0xec, 0xb8, 0x34, - 0xfb, 0x1b, 0x43, 0xa7, 0xee, 0xee, 0x13, 0x74, 0x1f, 0x84, 0xb1, 0x52, 0x2b, 0xba, 0x2b, 0xb9, - 0x1e, 0x9e, 0x35, 0xac, 0x39, 0xb6, 0x2f, 0xc0, 0x14, 0xba, 0xb3, 0x0d, 0x97, 0xea, 0x76, 0x4e, - 0xcd, 0xf4, 0xd8, 0x5e, 0xe2, 0x2b, 0xef, 0x2f, 0xd7, 0x1b, 0x97, 0xb6, 0x86, 0xd1, 0xa8, 0xc5, - 0x6a, 0x85, 0x1f, 0x20, 0xbe, 0x97, 0xa5, 0x48, 0x63, 0xba, 0x2a, 0x3b, 0x7b, 0x95, 0x87, 0xac, - 0xe3, 0x65, 0xc5, 0x88, 0xc7, 0x29, 0x24, 0x5f, 0xb9, 0x75, 0xf5, 0x74, 0xd2, 0xf6, 0x13, 0x9d, - 0xd6, 0x1c, 0x3b, 0x2e, 0xc2, 0x2b, 0xb8, 0xf4, 0x72, 0xa6, 0xcb, 0x52, 0x3a, 0x1a, 0x6e, 0x87, - 0x86, 0x7b, 0x12, 0xc5, 0x37, 0x70, 0x31, 0xe7, 0x8e, 0x13, 0xd1, 0x25, 0xe2, 0xa0, 0xbd, 0xc7, - 0x03, 0xdf, 0xca, 0x82, 0x3b, 0x6d, 0x2c, 0x11, 0x17, 0xc1, 0xa3, 0x19, 0xc5, 0x1c, 0xf0, 0x9b, - 0xf8, 0xed, 0x4e, 0xd8, 0x1e, 0xb1, 0xff, 0xc9, 0xe0, 0x5b, 0x18, 0xcc, 0xb4, 0xb2, 0x42, 0xd9, - 0x5d, 0x40, 0x81, 0xd0, 0x66, 0xd0, 0xcf, 0x7b, 0x52, 0x55, 0x94, 0x4f, 0x28, 0xbf, 0x97, 0x38, - 0x82, 0xe7, 0xfe, 0x2b, 0x98, 0xb0, 0xbb, 0xad, 0x0b, 0x0e, 0x7d, 0x22, 0x4e, 0xc3, 0x98, 0x41, - 0xff, 0xcb, 0x2f, 0x59, 0x08, 0xb5, 0x12, 0x84, 0x0d, 0x08, 0x6b, 0xc4, 0xbc, 0xdb, 0x9d, 0xd1, - 0x95, 0xb6, 0xc2, 0x4c, 0x8a, 0xc2, 0x08, 0x6b, 0xd3, 0xcb, 0xe0, 0x76, 0x12, 0xce, 0xde, 0x1f, - 0xb6, 0xc7, 0xaf, 0x39, 0x4d, 0x9a, 0xd6, 0x28, 0x66, 0x41, 0xe0, 0x0b, 0x68, 0x4d, 0xaa, 0x8a, - 0xd6, 0x23, 0x66, 0xfe, 0x98, 0x7d, 0x86, 0xde, 0xe1, 0x75, 0xfd, 0x17, 0x59, 0xb1, 0xd2, 0xaa, - 0xb0, 0x54, 0xd6, 0x62, 0x7b, 0xe9, 0xed, 0x14, 0x57, 0xda, 0x52, 0x69, 0x9b, 0x05, 0x31, 0x1d, - 0xff, 0x78, 0xb7, 0x96, 0x6e, 0xb3, 0x5b, 0xe6, 0x2b, 0x5d, 0x8e, 0x1f, 0x9f, 0xbf, 0x71, 0x3c, - 0xfa, 0xcb, 0x2e, 0x3b, 0xe1, 0xf7, 0x5f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x7d, 0x0b, 0x4e, 0x15, - 0xc9, 0x03, 0x00, 0x00, + // 464 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x84, 0x53, 0xdf, 0x8b, 0xd3, 0x40, + 0x10, 0x26, 0x36, 0x6d, 0xaf, 0x93, 0xf6, 0x94, 0x45, 0x34, 0xf8, 0x20, 0x25, 0xc8, 0xd1, 0x17, + 0x53, 0xbc, 0x03, 0x41, 0x7d, 0xea, 0x0f, 0xa1, 0x07, 0x22, 0xc7, 0x7a, 0xdc, 0x83, 0x6f, 0xdb, + 0x66, 0x68, 0x83, 0xcd, 0x6e, 0xd8, 0xdd, 0x8a, 0xfe, 0x63, 0xfe, 0x7d, 0xb2, 0xb3, 0x69, 0x2e, + 0x29, 0x94, 0x7b, 0xea, 0x7c, 0xdf, 0x7c, 0xf3, 0xcd, 0x74, 0x76, 0x02, 0xb1, 0xfd, 0x5b, 0xa2, + 0x99, 0x96, 0x5a, 0x59, 0x75, 0x33, 0x5d, 0xef, 0xd5, 0xe6, 0x57, 0x4a, 0x80, 0xbd, 0xb6, 0x28, + 0x33, 0xd4, 0x45, 0x2e, 0x6d, 0x4a, 0x22, 0xcf, 0xdf, 0x24, 0x9f, 0x60, 0x74, 0x27, 0xb4, 0xfd, + 0x81, 0x76, 0x85, 0x22, 0x43, 0xcd, 0x5e, 0x42, 0xf7, 0x5e, 0x59, 0xb1, 0x8f, 0x83, 0x71, 0x30, + 0xe9, 0x72, 0x0f, 0x18, 0x83, 0x70, 0x25, 0xcc, 0x2e, 0x7e, 0x36, 0x0e, 0x26, 0x43, 0x4e, 0x71, + 0xb2, 0x85, 0xfe, 0xdc, 0xb5, 0xb8, 0x5d, 0xd6, 0xe9, 0xe0, 0x31, 0xcd, 0x56, 0x10, 0x39, 0x67, + 0xe3, 0x7d, 0xa9, 0x32, 0xba, 0xbe, 0x4a, 0xcf, 0x0c, 0x92, 0xb6, 0xa6, 0xe0, 0xcd, 0xd2, 0xe4, + 0x5f, 0x08, 0xbd, 0x6a, 0xba, 0xcf, 0xd0, 0x7f, 0x40, 0x6d, 0x72, 0x25, 0xa9, 0x57, 0x74, 0x3d, + 0x3e, 0x6b, 0x58, 0xe9, 0xf8, 0xb1, 0x80, 0xc5, 0xd0, 0x5f, 0xec, 0x44, 0x2e, 0x6f, 0x97, 0x34, + 0xcc, 0x80, 0x1f, 0x21, 0x7b, 0xe5, 0xfc, 0xf3, 0xed, 0xce, 0xc6, 0x9d, 0x71, 0x30, 0xe9, 0xf0, + 0x0a, 0xb1, 0x8f, 0x10, 0xde, 0xe7, 0x05, 0xc6, 0x21, 0xb5, 0x4a, 0xce, 0xb6, 0x72, 0x22, 0x63, + 0x45, 0x51, 0x72, 0xd2, 0xb3, 0x39, 0x44, 0xdf, 0x84, 0xb1, 0xd5, 0x76, 0xe2, 0xee, 0x13, 0x93, + 0x56, 0x3a, 0xde, 0x2c, 0x62, 0x57, 0x70, 0xe9, 0xe0, 0x42, 0x15, 0x45, 0x6e, 0x69, 0xb9, 0x3d, + 0x5a, 0xee, 0x09, 0xcb, 0xde, 0xc0, 0xc5, 0x52, 0x58, 0x41, 0x8a, 0x3e, 0x29, 0x6a, 0xcc, 0xde, + 0x02, 0x3c, 0x28, 0x8b, 0xda, 0x50, 0xf6, 0x82, 0xb2, 0x0d, 0xc6, 0xf5, 0xf8, 0x8e, 0x7f, 0x6c, + 0x43, 0x33, 0xf0, 0x3d, 0xda, 0x2c, 0x7b, 0x07, 0xa3, 0x85, 0x92, 0x06, 0xa5, 0x39, 0x78, 0x19, + 0x90, 0xac, 0x4d, 0xba, 0xfd, 0xce, 0xca, 0x92, 0xf2, 0x11, 0xe5, 0x8f, 0x90, 0x4d, 0xe0, 0xb9, + 0x9b, 0x9a, 0xa3, 0x39, 0xec, 0xad, 0x77, 0x18, 0x92, 0xe2, 0x94, 0x66, 0x09, 0x0c, 0xbf, 0xfe, + 0xce, 0x33, 0x94, 0x1b, 0x24, 0xd9, 0x88, 0x64, 0x2d, 0xce, 0xb9, 0xdd, 0x69, 0x55, 0x2a, 0x83, + 0x7a, 0x96, 0x65, 0x1a, 0x8d, 0x89, 0x2f, 0xbd, 0xdb, 0x09, 0x9d, 0x7c, 0xa8, 0xaf, 0xc5, 0x9d, + 0x35, 0x6d, 0x96, 0xce, 0x26, 0xe4, 0x1e, 0xb0, 0x17, 0xd0, 0x99, 0x95, 0x25, 0x9d, 0x43, 0xc8, + 0x5d, 0x98, 0x7c, 0x81, 0x41, 0xfd, 0x9a, 0xee, 0x1f, 0x19, 0xdc, 0x28, 0x99, 0x19, 0x2a, 0xeb, + 0xf0, 0x23, 0x74, 0x76, 0x52, 0x48, 0x65, 0xa8, 0xb4, 0xcb, 0x3d, 0x98, 0x4f, 0x7f, 0xbe, 0xdf, + 0xe6, 0x76, 0x77, 0x58, 0xa7, 0x1b, 0x55, 0x4c, 0x1f, 0x9f, 0xbb, 0x15, 0x36, 0x3e, 0xd1, 0x75, + 0xcf, 0xff, 0xfe, 0x0f, 0x00, 0x00, 0xff, 0xff, 0xf7, 0xe5, 0x35, 0x61, 0xb9, 0x03, 0x00, 0x00, } diff --git a/types/proto3/block.proto b/types/proto3/block.proto index adaa0a00d..eb198b7ba 100644 --- a/types/proto3/block.proto +++ b/types/proto3/block.proto @@ -24,19 +24,19 @@ message Header { BlockID LastBlockID = 5; // hashes of block data - bytes LastCommitHash = 6; // commit from validators from the last block - bytes DataHash = 7; // transactions + bytes LastCommitHash = 6; // commit from validators from the last block + bytes DataHash = 7; // transactions // hashes from the app output from the prev block - bytes ValidatorsHash = 8; // validators for the current block - bytes NextValidatorsHash = 9; // validators for the next block - bytes ConsensusHash = 10; // consensus params for current block - bytes AppHash = 11; // state after txs from the previous block - bytes LastResultsHash = 12; // root hash of all results from the txs from the previous block + bytes VotersHash = 8; // voters for the current block + bytes NextVotersHash = 9; // voters for the next block + bytes ConsensusHash = 10; // consensus params for current block + bytes AppHash = 11; // state after txs from the previous block + bytes LastResultsHash = 12; // root hash of all results from the txs from the previous block // consensus info - bytes EvidenceHash = 13; // evidence included in the block - bytes ProposerAddress = 14; // original proposer of the block + bytes EvidenceHash = 13; // evidence included in the block + bytes ProposerAddress = 14; // original proposer of the block } message Version { diff --git a/types/proto3_test.go b/types/proto3_test.go index f969be128..3c6f93d6c 100644 --- a/types/proto3_test.go +++ b/types/proto3_test.go @@ -4,7 +4,7 @@ import ( "testing" "time" - "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/proto" // nolint: staticcheck // still used by gogoproto "github.com/stretchr/testify/assert" "github.com/tendermint/tendermint/types/proto3" @@ -33,7 +33,7 @@ func TestProto3Compatibility(t *testing.T) { }, LastCommitHash: []byte("commit hash"), DataHash: []byte("data hash"), - ValidatorsHash: []byte("validators hash"), + VotersHash: []byte("voters hash"), } aminoHeader := Header{ ChainID: "cosmos", @@ -48,7 +48,7 @@ func TestProto3Compatibility(t *testing.T) { }, LastCommitHash: []byte("commit hash"), DataHash: []byte("data hash"), - ValidatorsHash: []byte("validators hash"), + VotersHash: []byte("voters hash"), } ab, err := cdc.MarshalBinaryBare(aminoHeader) assert.NoError(t, err, "unexpected error") @@ -64,7 +64,7 @@ func TestProto3Compatibility(t *testing.T) { Time: &proto3.Timestamp{Seconds: seconds, Nanos: nanos}, LastCommitHash: []byte("commit hash"), DataHash: []byte("data hash"), - ValidatorsHash: []byte("validators hash"), + VotersHash: []byte("voters hash"), } emptyLastBlockAm := Header{ ChainID: "cosmos", @@ -72,7 +72,7 @@ func TestProto3Compatibility(t *testing.T) { Time: tm, LastCommitHash: []byte("commit hash"), DataHash: []byte("data hash"), - ValidatorsHash: []byte("validators hash"), + VotersHash: []byte("voters hash"), } ab, err = cdc.MarshalBinaryBare(emptyLastBlockAm) diff --git a/types/protobuf.go b/types/protobuf.go index 52815593f..2cf53225b 100644 --- a/types/protobuf.go +++ b/types/protobuf.go @@ -57,8 +57,8 @@ func (tm2pb) Header(header *Header) abci.Header { LastCommitHash: header.LastCommitHash, DataHash: header.DataHash, - ValidatorsHash: header.ValidatorsHash, - NextValidatorsHash: header.NextValidatorsHash, + ValidatorsHash: header.VotersHash, + NextValidatorsHash: header.NextVotersHash, ConsensusHash: header.ConsensusHash, AppHash: header.AppHash, LastResultsHash: header.LastResultsHash, @@ -71,7 +71,7 @@ func (tm2pb) Header(header *Header) abci.Header { func (tm2pb) Validator(val *Validator) abci.Validator { return abci.Validator{ Address: val.PubKey.Address(), - Power: val.VotingPower, + Power: val.StakingPower, } } @@ -93,7 +93,7 @@ func (tm2pb) PartSetHeader(header PartSetHeader) abci.PartSetHeader { func (tm2pb) ValidatorUpdate(val *Validator) abci.ValidatorUpdate { return abci.ValidatorUpdate{ PubKey: TM2PB.PubKey(val.PubKey), - Power: val.VotingPower, + Power: val.StakingPower, } } @@ -149,8 +149,8 @@ func (tm2pb) ConsensusParams(params *ConsensusParams) *abci.ConsensusParams { // ABCI Evidence includes information from the past that's not included in the evidence itself // so Evidence types stays compact. // XXX: panics on nil or unknown pubkey type -func (tm2pb) Evidence(ev Evidence, valSet *ValidatorSet, evTime time.Time) abci.Evidence { - _, val := valSet.GetByAddress(ev.Address()) +func (tm2pb) Evidence(ev Evidence, voterSet *VoterSet, evTime time.Time) abci.Evidence { + _, val := voterSet.GetByAddress(ev.Address()) if val == nil { // should already have checked this panic(val) @@ -173,7 +173,7 @@ func (tm2pb) Evidence(ev Evidence, valSet *ValidatorSet, evTime time.Time) abci. Validator: TM2PB.Validator(val), Height: ev.Height(), Time: evTime, - TotalVotingPower: valSet.TotalVotingPower(), + TotalVotingPower: voterSet.TotalVotingPower(), } } diff --git a/types/protobuf_test.go b/types/protobuf_test.go index b688716b5..ae6734af3 100644 --- a/types/protobuf_test.go +++ b/types/protobuf_test.go @@ -4,10 +4,12 @@ import ( "testing" "time" - "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/proto" // nolint: staticcheck // still used by gogoproto "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" amino "github.com/tendermint/go-amino" + abci "github.com/tendermint/tendermint/abci/types" "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" @@ -131,15 +133,16 @@ func TestABCIEvidence(t *testing.T) { blockID := makeBlockID([]byte("blockhash"), 1000, []byte("partshash")) blockID2 := makeBlockID([]byte("blockhash2"), 1000, []byte("partshash")) const chainID = "mychain" - pubKey := val.GetPubKey() + pubKey, err := val.GetPubKey() + require.NoError(t, err) ev := &DuplicateVoteEvidence{ PubKey: pubKey, - VoteA: makeVote(val, chainID, 0, 10, 2, 1, blockID), - VoteB: makeVote(val, chainID, 0, 10, 2, 1, blockID2), + VoteA: makeVote(t, val, chainID, 0, 10, 2, 1, blockID), + VoteB: makeVote(t, val, chainID, 0, 10, 2, 1, blockID2), } abciEv := TM2PB.Evidence( ev, - NewValidatorSet([]*Validator{NewValidator(pubKey, 10)}), + ToVoterAll([]*Validator{NewValidator(pubKey, 10)}), time.Now(), ) diff --git a/types/results_test.go b/types/results_test.go index a37de9ec4..9ecfe35ca 100644 --- a/types/results_test.go +++ b/types/results_test.go @@ -5,6 +5,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + abci "github.com/tendermint/tendermint/abci/types" ) diff --git a/types/test_util.go b/types/test_util.go index 48913f308..377c965a8 100644 --- a/types/test_util.go +++ b/types/test_util.go @@ -2,6 +2,8 @@ package types import ( "time" + + "github.com/pkg/errors" ) func MakeCommit(blockID BlockID, height int64, round int, @@ -9,9 +11,12 @@ func MakeCommit(blockID BlockID, height int64, round int, // all sign for i := 0; i < len(validators); i++ { - addr := validators[i].GetPubKey().Address() + pubKey, err := validators[i].GetPubKey() + if err != nil { + return nil, errors.Wrap(err, "can't get pubkey") + } vote := &Vote{ - ValidatorAddress: addr, + ValidatorAddress: pubKey.Address(), ValidatorIndex: i, Height: height, Round: round, @@ -20,7 +25,7 @@ func MakeCommit(blockID BlockID, height int64, round int, Timestamp: now, } - _, err := signAddVote(validators[i], vote, voteSet) + _, err = signAddVote(validators[i], vote, voteSet) if err != nil { return nil, err } @@ -45,7 +50,11 @@ func MakeVote( chainID string, now time.Time, ) (*Vote, error) { - addr := privVal.GetPubKey().Address() + pubKey, err := privVal.GetPubKey() + if err != nil { + return nil, errors.Wrap(err, "can't get pubkey") + } + addr := pubKey.Address() idx, _ := valSet.GetByAddress(addr) vote := &Vote{ ValidatorAddress: addr, diff --git a/types/validator.go b/types/validator.go index c3cadc4d3..ab34c318c 100644 --- a/types/validator.go +++ b/types/validator.go @@ -10,21 +10,27 @@ import ( ) // Volatile state for each Validator -// NOTE: The ProposerPriority is not included in Validator.Hash(); +// NOTE: The ProposerPriority, VotingPower is not included in Validator.Hash(); // make sure to update that method if changes are made here +// StakingPower is the potential voting power proportional to the amount of stake, +// and VotingPower is the actual voting power granted by the election process. +// StakingPower is durable and can be changed by staking txs. +// VotingPower is volatile and can be changed at every height. type Validator struct { - Address Address `json:"address"` - PubKey crypto.PubKey `json:"pub_key"` - VotingPower int64 `json:"voting_power"` + Address Address `json:"address"` + PubKey crypto.PubKey `json:"pub_key"` + StakingPower int64 `json:"staking_power"` + VotingPower int64 `json:"voting_power"` ProposerPriority int64 `json:"proposer_priority"` } -func NewValidator(pubKey crypto.PubKey, votingPower int64) *Validator { +func NewValidator(pubKey crypto.PubKey, stakingPower int64) *Validator { return &Validator{ Address: pubKey.Address(), PubKey: pubKey, - VotingPower: votingPower, + StakingPower: stakingPower, + VotingPower: 0, ProposerPriority: 0, } } @@ -66,7 +72,7 @@ func (v *Validator) String() string { return fmt.Sprintf("Validator{%v %v VP:%v A:%v}", v.Address, v.PubKey, - v.VotingPower, + v.StakingPower, v.ProposerPriority) } @@ -74,7 +80,7 @@ func (v *Validator) String() string { func ValidatorListString(vals []*Validator) string { chunks := make([]string, len(vals)) for i, val := range vals { - chunks[i] = fmt.Sprintf("%s:%d", val.Address, val.VotingPower) + chunks[i] = fmt.Sprintf("%s:%d", val.Address, val.StakingPower) } return strings.Join(chunks, ",") @@ -86,11 +92,11 @@ func ValidatorListString(vals []*Validator) string { // which changes every round. func (v *Validator) Bytes() []byte { return cdcEncode(struct { - PubKey crypto.PubKey - VotingPower int64 + PubKey crypto.PubKey + StakingPower int64 }{ v.PubKey, - v.VotingPower, + v.StakingPower, }) } @@ -101,11 +107,14 @@ func (v *Validator) Bytes() []byte { // UNSTABLE func RandValidator(randPower bool, minPower int64) (*Validator, PrivValidator) { privVal := NewMockPV() - votePower := minPower + stakingPower := minPower if randPower { - votePower += int64(tmrand.Uint32()) + stakingPower += int64(tmrand.Uint32()) } - pubKey := privVal.GetPubKey() - val := NewValidator(pubKey, votePower) + pubKey, err := privVal.GetPubKey() + if err != nil { + panic(fmt.Errorf("could not retrieve pubkey %w", err)) + } + val := NewValidator(pubKey, stakingPower) return val, privVal } diff --git a/types/validator_set.go b/types/validator_set.go index 6d04424a6..4eb0093ea 100644 --- a/types/validator_set.go +++ b/types/validator_set.go @@ -2,7 +2,6 @@ package types import ( "bytes" - "encoding/binary" "fmt" "math" "math/big" @@ -10,28 +9,40 @@ import ( "strings" "github.com/pkg/errors" + "github.com/tendermint/tendermint/crypto/merkle" - "github.com/tendermint/tendermint/crypto/tmhash" - tmmath "github.com/tendermint/tendermint/libs/math" tmrand "github.com/tendermint/tendermint/libs/rand" ) const ( - // MaxTotalVotingPower - the maximum allowed total voting power. + // MaxTotalStakingPower - the maximum allowed total voting power. // It needs to be sufficiently small to, in all cases: // 1. prevent clipping in incrementProposerPriority() // 2. let (diff+diffMax-1) not overflow in IncrementProposerPriority() // (Proof of 1 is tricky, left to the reader). // It could be higher, but this is sufficiently large for our purposes, // and leaves room for defensive purposes. - MaxTotalVotingPower = int64(math.MaxInt64) / 8 + MaxTotalStakingPower = int64(math.MaxInt64) / 8 + + // MaxTotalVotingPower should be same to MaxTotalStakingPower theoretically, + // but the value can be higher when it is type-casted as float64 + // because of the number of valid digits of float64. + // This phenomenon occurs in the following computations. + // + // `winner.SetWinPoint(int64(float64(totalPriority) * winPoints[i] / totalWinPoint))` lib/rand/sampling.go + // + // MaxTotalVotingPower can be as large as MaxTotalStakingPower+alpha + // but I don't know the exact alpha. 1000 seems to be enough by some examination. + // Please refer TestMaxVotingPowerTest for this. + // TODO: 1000 is temporary limit, we should remove float calculation and then we can fix this limit + MaxTotalVotingPower = MaxTotalStakingPower + 1000 // PriorityWindowSizeFactor - is a constant that when multiplied with the total voting power gives // the maximum allowed distance between validator priorities. PriorityWindowSizeFactor = 2 ) -// ValidatorSet represent a set of *Validator at a given height. +// VoterSet represent a set of *Validator at a given height. // The validators can be fetched by address or index. // The index is in order of .Address, so the indices are fixed // for all rounds of a given blockchain height - ie. the validators @@ -46,12 +57,12 @@ type ValidatorSet struct { Validators []*Validator `json:"validators"` // cached (unexported) - totalVotingPower int64 + totalStakingPower int64 } -// NewValidatorSet initializes a ValidatorSet by copying over the +// NewValidatorSet initializes a VoterSet by copying over the // values from `valz`, a list of Validators. If valz is nil or empty, -// the new ValidatorSet will have an empty list of Validators. +// the new VoterSet will have an empty list of Validators. // The addresses of validators in `valz` must be unique otherwise the // function panics. // Note the validator set size has an implied limit equal to that of the MaxVotesCount - @@ -78,7 +89,7 @@ func (vals *ValidatorSet) CopyIncrementProposerPriority(times int) *ValidatorSet return copy } -// TODO The current random selection by VRF uses VotingPower, so the processing on ProposerPriority can be removed, +// TODO The current random selection by VRF uses StakingPower, so the processing on ProposerPriority can be removed, // TODO but it remains for later verification of random selection based on ProposerPriority. // IncrementProposerPriority increments ProposerPriority of each validator and updates the // proposer. Panics if validator set is empty. @@ -93,8 +104,8 @@ func (vals *ValidatorSet) IncrementProposerPriority(times int) { // Cap the difference between priorities to be proportional to 2*totalPower by // re-normalizing priorities, i.e., rescale all priorities by multiplying with: - // 2*totalVotingPower/(maxPriority - minPriority) - diffMax := PriorityWindowSizeFactor * vals.TotalVotingPower() + // 2*totalStakingPower/(maxPriority - minPriority) + diffMax := PriorityWindowSizeFactor * vals.TotalStakingPower() vals.RescalePriorities(diffMax) vals.shiftByAvgProposerPriority() @@ -132,13 +143,13 @@ func (vals *ValidatorSet) RescalePriorities(diffMax int64) { func (vals *ValidatorSet) incrementProposerPriority() *Validator { for _, val := range vals.Validators { // Check for overflow for sum. - newPrio := safeAddClip(val.ProposerPriority, val.VotingPower) + newPrio := safeAddClip(val.ProposerPriority, val.StakingPower) val.ProposerPriority = newPrio } // Decrement the validator with most ProposerPriority. mostest := vals.getValWithMostPriority() // Mind the underflow. - mostest.ProposerPriority = safeSubClip(mostest.ProposerPriority, vals.TotalVotingPower()) + mostest.ProposerPriority = safeSubClip(mostest.ProposerPriority, vals.TotalStakingPower()) return mostest } @@ -211,11 +222,11 @@ func validatorListCopy(valsList []*Validator) []*Validator { return valsCopy } -// Copy each validator into a new ValidatorSet. +// Copy each validator into a new VoterSet. func (vals *ValidatorSet) Copy() *ValidatorSet { return &ValidatorSet{ - Validators: validatorListCopy(vals.Validators), - totalVotingPower: vals.totalVotingPower, + Validators: validatorListCopy(vals.Validators), + totalStakingPower: vals.totalStakingPower, } } @@ -242,7 +253,7 @@ func (vals *ValidatorSet) GetByAddress(address []byte) (index int, val *Validato // GetByIndex returns the validator's address and validator itself by index. // It returns nil values if index is less than 0 or greater or equal to -// len(ValidatorSet.Validators). +// len(VoterSet.Validators). func (vals *ValidatorSet) GetByIndex(index int) (address []byte, val *Validator) { if index < 0 || index >= len(vals.Validators) { return nil, nil @@ -256,32 +267,32 @@ func (vals *ValidatorSet) Size() int { return len(vals.Validators) } -// Forces recalculation of the set's total voting power. -// Panics if total voting power is bigger than MaxTotalVotingPower. -func (vals *ValidatorSet) updateTotalVotingPower() { +// Forces recalculation of the set's total staking power. +// Panics if total voting power is bigger than MaxTotalStakingPower. +func (vals *ValidatorSet) updateTotalStakingPower() { sum := int64(0) for _, val := range vals.Validators { // mind overflow - sum = safeAddClip(sum, val.VotingPower) - if sum > MaxTotalVotingPower { + sum = safeAddClip(sum, val.StakingPower) + if sum > MaxTotalStakingPower { panic(fmt.Sprintf( - "Total voting power should be guarded to not exceed %v; got: %v", - MaxTotalVotingPower, + "Total staking power should be guarded to not exceed %v; got: %v", + MaxTotalStakingPower, sum)) } } - vals.totalVotingPower = sum + vals.totalStakingPower = sum } -// TotalVotingPower returns the sum of the voting powers of all validators. +// TotalStakingPower returns the sum of the voting powers of all validators. // It recomputes the total voting power if required. -func (vals *ValidatorSet) TotalVotingPower() int64 { - if vals.totalVotingPower == 0 { - vals.updateTotalVotingPower() +func (vals *ValidatorSet) TotalStakingPower() int64 { + if vals.totalStakingPower == 0 { + vals.updateTotalStakingPower() } - return vals.totalVotingPower + return vals.totalStakingPower } // Hash returns the Merkle root hash build using validators (as leaves) in the @@ -332,14 +343,14 @@ func processChanges(origChanges []*Validator) (updates, removals []*Validator, e } switch { - case valUpdate.VotingPower < 0: - err = fmt.Errorf("voting power can't be negative: %d", valUpdate.VotingPower) + case valUpdate.StakingPower < 0: + err = fmt.Errorf("voting power can't be negative: %d", valUpdate.StakingPower) return nil, nil, err - case valUpdate.VotingPower > MaxTotalVotingPower: + case valUpdate.StakingPower > MaxTotalStakingPower: err = fmt.Errorf("to prevent clipping/overflow, voting power can't be higher than %d, got %d", - MaxTotalVotingPower, valUpdate.VotingPower) + MaxTotalStakingPower, valUpdate.StakingPower) return nil, nil, err - case valUpdate.VotingPower == 0: + case valUpdate.StakingPower == 0: removals = append(removals, valUpdate) default: updates = append(updates, valUpdate) @@ -362,7 +373,7 @@ func processChanges(origChanges []*Validator) (updates, removals []*Validator, e // // Returns: // tvpAfterUpdatesBeforeRemovals - the new total voting power if these updates would be applied without the removals. -// Note that this will be < 2 * MaxTotalVotingPower in case high power validators are removed and +// Note that this will be < 2 * MaxTotalStakingPower in case high power validators are removed and // validators are added/ updated with high power values. // // err - non-nil if the maximum allowed total voting power would be exceeded @@ -374,9 +385,9 @@ func verifyUpdates( delta := func(update *Validator, vals *ValidatorSet) int64 { _, val := vals.GetByAddress(update.Address) if val != nil { - return update.VotingPower - val.VotingPower + return update.StakingPower - val.StakingPower } - return update.VotingPower + return update.StakingPower } updatesCopy := validatorListCopy(updates) @@ -384,13 +395,13 @@ func verifyUpdates( return delta(updatesCopy[i], vals) < delta(updatesCopy[j], vals) }) - tvpAfterRemovals := vals.TotalVotingPower() - removedPower + tvpAfterRemovals := vals.TotalStakingPower() - removedPower for _, upd := range updatesCopy { tvpAfterRemovals += delta(upd, vals) - if tvpAfterRemovals > MaxTotalVotingPower { + if tvpAfterRemovals > MaxTotalStakingPower { err = fmt.Errorf( "failed to add/update validator %v, total voting power would exceed the max allowed %v", - upd.Address, MaxTotalVotingPower) + upd.Address, MaxTotalStakingPower) return 0, err } } @@ -408,31 +419,31 @@ func numNewValidators(updates []*Validator, vals *ValidatorSet) int { } // computeNewPriorities computes the proposer priority for the validators not present in the set based on -// 'updatedTotalVotingPower'. +// 'updatedTotalStakingPower'. // Leaves unchanged the priorities of validators that are changed. // // 'updates' parameter must be a list of unique validators to be added or updated. // -// 'updatedTotalVotingPower' is the total voting power of a set where all updates would be applied but -// not the removals. It must be < 2*MaxTotalVotingPower and may be close to this limit if close to -// MaxTotalVotingPower will be removed. This is still safe from overflow since MaxTotalVotingPower is maxInt64/8. +// 'updatedTotalStakingPower' is the total voting power of a set where all updates would be applied but +// not the removals. It must be < 2*MaxTotalStakingPower and may be close to this limit if close to +// MaxTotalStakingPower will be removed. This is still safe from overflow since MaxTotalStakingPower is maxInt64/8. // // No changes are made to the validator set 'vals'. -func computeNewPriorities(updates []*Validator, vals *ValidatorSet, updatedTotalVotingPower int64) { +func computeNewPriorities(updates []*Validator, vals *ValidatorSet, updatedTotalStakingPower int64) { for _, valUpdate := range updates { address := valUpdate.Address _, val := vals.GetByAddress(address) if val == nil { // add val - // Set ProposerPriority to -C*totalVotingPower (with C ~= 1.125) to make sure validators can't + // Set ProposerPriority to -C*totalStakingPower (with C ~= 1.125) to make sure validators can't // un-bond and then re-bond to reset their (potentially previously negative) ProposerPriority to zero. // - // Contract: updatedVotingPower < 2 * MaxTotalVotingPower to ensure ProposerPriority does + // Contract: updatedStakingPower < 2 * MaxTotalStakingPower to ensure ProposerPriority does // not exceed the bounds of int64. // - // Compute ProposerPriority = -1.125*totalVotingPower == -(updatedVotingPower + (updatedVotingPower >> 3)). - valUpdate.ProposerPriority = -(updatedTotalVotingPower + (updatedTotalVotingPower >> 3)) + // Compute ProposerPriority = -1.125*totalStakingPower == -(updatedStakingPower + (updatedStakingPower >> 3)). + valUpdate.ProposerPriority = -(updatedTotalStakingPower + (updatedTotalStakingPower >> 3)) } else { valUpdate.ProposerPriority = val.ProposerPriority } @@ -482,21 +493,21 @@ func (vals *ValidatorSet) applyUpdates(updates []*Validator) { // Checks that the validators to be removed are part of the validator set. // No changes are made to the validator set 'vals'. -func verifyRemovals(deletes []*Validator, vals *ValidatorSet) (votingPower int64, err error) { +func verifyRemovals(deletes []*Validator, vals *ValidatorSet) (staingPower int64, err error) { - removedVotingPower := int64(0) + removedStakingPower := int64(0) for _, valUpdate := range deletes { address := valUpdate.Address _, val := vals.GetByAddress(address) if val == nil { - return removedVotingPower, fmt.Errorf("failed to find validator %X to remove", address) + return removedStakingPower, fmt.Errorf("failed to find validator %X to remove", address) } - removedVotingPower += val.VotingPower + removedStakingPower += val.StakingPower } if len(deletes) > len(vals.Validators) { panic("more deletes than validators") } - return removedVotingPower, nil + return removedStakingPower, nil } // Removes the validators specified in 'deletes' from validator set 'vals'. @@ -555,14 +566,14 @@ func (vals *ValidatorSet) updateWithChangeSet(changes []*Validator, allowDeletes // Verify that applying the 'deletes' against 'vals' will not result in error. // Get the voting power that is going to be removed. - removedVotingPower, err := verifyRemovals(deletes, vals) + removedStakingPower, err := verifyRemovals(deletes, vals) if err != nil { return err } // Verify that applying the 'updates' against 'vals' will not result in error. - // Get the updated total voting power before removal. Note that this is < 2 * MaxTotalVotingPower - tvpAfterUpdatesBeforeRemovals, err := verifyUpdates(updates, vals, removedVotingPower) + // Get the updated total voting power before removal. Note that this is < 2 * MaxTotalStakingPower + tvpAfterUpdatesBeforeRemovals, err := verifyUpdates(updates, vals, removedStakingPower) if err != nil { return err } @@ -574,10 +585,10 @@ func (vals *ValidatorSet) updateWithChangeSet(changes []*Validator, allowDeletes vals.applyUpdates(updates) vals.applyRemovals(deletes) - vals.updateTotalVotingPower() // will panic if total voting power > MaxTotalVotingPower + vals.updateTotalStakingPower() // will panic if total voting power > MaxTotalStakingPower // Scale and center. - vals.RescalePriorities(PriorityWindowSizeFactor * vals.TotalVotingPower()) + vals.RescalePriorities(PriorityWindowSizeFactor * vals.TotalStakingPower()) vals.shiftByAvgProposerPriority() return nil @@ -599,223 +610,20 @@ func (vals *ValidatorSet) UpdateWithChangeSet(changes []*Validator) error { return vals.updateWithChangeSet(changes, true) } -// VerifyCommit verifies +2/3 of the set had signed the given commit. -func (vals *ValidatorSet) VerifyCommit(chainID string, blockID BlockID, - height int64, commit *Commit) error { - - if vals.Size() != len(commit.Signatures) { - return NewErrInvalidCommitSignatures(vals.Size(), len(commit.Signatures)) - } - if err := verifyCommitBasic(commit, height, blockID); err != nil { - return err - } - - talliedVotingPower := int64(0) - votingPowerNeeded := vals.TotalVotingPower() * 2 / 3 - for idx, commitSig := range commit.Signatures { - if commitSig.Absent() { - continue // OK, some signatures can be absent. - } - - // The vals and commit have a 1-to-1 correspondance. - // This means we don't need the validator address or to do any lookup. - val := vals.Validators[idx] - - // Validate signature. - voteSignBytes := commit.VoteSignBytes(chainID, idx) - if !val.PubKey.VerifyBytes(voteSignBytes, commitSig.Signature) { - return fmt.Errorf("wrong signature (#%d): %X", idx, commitSig.Signature) - } - // Good! - if blockID.Equals(commitSig.BlockID(commit.BlockID)) { - talliedVotingPower += val.VotingPower - } - // else { - // It's OK that the BlockID doesn't match. We include stray - // signatures (~votes for nil) to measure validator availability. - // } - - // return as soon as +2/3 of the signatures are verified - if talliedVotingPower > votingPowerNeeded { - return nil - } - } - - // talliedVotingPower <= needed, thus return error - return ErrNotEnoughVotingPowerSigned{Got: talliedVotingPower, Needed: votingPowerNeeded} -} - -// VerifyFutureCommit will check to see if the set would be valid with a different -// validator set. -// -// vals is the old validator set that we know. Over 2/3 of the power in old -// signed this block. -// -// In Tendermint, 1/3 of the voting power can halt or fork the chain, but 1/3 -// can't make arbitrary state transitions. You still need > 2/3 Byzantine to -// make arbitrary state transitions. -// -// To preserve this property in the light client, we also require > 2/3 of the -// old vals to sign the future commit at H, that way we preserve the property -// that if they weren't being truthful about the validator set at H (block hash -// -> vals hash) or about the app state (block hash -> app hash) we can slash -// > 2/3. Otherwise, the lite client isn't providing the same security -// guarantees. -// -// Even if we added a slashing condition that if you sign a block header with -// the wrong validator set, then we would only need > 1/3 of signatures from -// the old vals on the new commit, it wouldn't be sufficient because the new -// vals can be arbitrary and commit some arbitrary app hash. -// -// newSet is the validator set that signed this block. Only votes from new are -// sufficient for 2/3 majority in the new set as well, for it to be a valid -// commit. -// -// NOTE: This doesn't check whether the commit is a future commit, because the -// current height isn't part of the ValidatorSet. Caller must check that the -// commit height is greater than the height for this validator set. -func (vals *ValidatorSet) VerifyFutureCommit(newSet *ValidatorSet, chainID string, - blockID BlockID, height int64, commit *Commit) error { - oldVals := vals - - // Commit must be a valid commit for newSet. - err := newSet.VerifyCommit(chainID, blockID, height, commit) - if err != nil { - return err - } - - // Check old voting power. - oldVotingPower := int64(0) - seen := map[int]bool{} - - for idx, commitSig := range commit.Signatures { - if commitSig.Absent() { - continue // OK, some signatures can be absent. - } - - // See if this validator is in oldVals. - oldIdx, val := oldVals.GetByAddress(commitSig.ValidatorAddress) - if val == nil || seen[oldIdx] { - continue // missing or double vote... - } - seen[oldIdx] = true - - // Validate signature. - voteSignBytes := commit.VoteSignBytes(chainID, idx) - if !val.PubKey.VerifyBytes(voteSignBytes, commitSig.Signature) { - return errors.Errorf("wrong signature (#%d): %X", idx, commitSig.Signature) - } - // Good! - if blockID.Equals(commitSig.BlockID(commit.BlockID)) { - oldVotingPower += val.VotingPower - } - // else { - // It's OK that the BlockID doesn't match. We include stray - // signatures (~votes for nil) to measure validator availability. - // } - } - - if got, needed := oldVotingPower, oldVals.TotalVotingPower()*2/3; got <= needed { - return ErrNotEnoughVotingPowerSigned{Got: got, Needed: needed} - } - return nil -} - -// VerifyCommitTrusting verifies that trustLevel ([1/3, 1]) of the validator -// set signed this commit. -// NOTE the given validators do not necessarily correspond to the validator set -// for this commit, but there may be some intersection. -func (vals *ValidatorSet) VerifyCommitTrusting(chainID string, blockID BlockID, - height int64, commit *Commit, trustLevel tmmath.Fraction) error { - - if trustLevel.Numerator*3 < trustLevel.Denominator || // < 1/3 - trustLevel.Numerator > trustLevel.Denominator { // > 1 - panic(fmt.Sprintf("trustLevel must be within [1/3, 1], given %v", trustLevel)) - } - - if err := verifyCommitBasic(commit, height, blockID); err != nil { - return err +func (vals *ValidatorSet) SelectProposer(proofHash []byte, height int64, round int) *Validator { + if vals.IsNilOrEmpty() { + panic("empty validator set") } - - var ( - talliedVotingPower int64 - seenVals = make(map[int]int, len(commit.Signatures)) // validator index -> commit index - votingPowerNeeded = (vals.TotalVotingPower() * trustLevel.Numerator) / trustLevel.Denominator - ) - - for idx, commitSig := range commit.Signatures { - if commitSig.Absent() { - continue // OK, some signatures can be absent. - } - - // We don't know the validators that committed this block, so we have to - // check for each vote if its validator is already known. - valIdx, val := vals.GetByAddress(commitSig.ValidatorAddress) - - if firstIndex, ok := seenVals[valIdx]; ok { // double vote - secondIndex := idx - return errors.Errorf("double vote from %v (%d and %d)", val, firstIndex, secondIndex) - } - - if val != nil { - seenVals[valIdx] = idx - - // Validate signature. - voteSignBytes := commit.VoteSignBytes(chainID, idx) - if !val.PubKey.VerifyBytes(voteSignBytes, commitSig.Signature) { - return errors.Errorf("wrong signature (#%d): %X", idx, commitSig.Signature) - } - - // Good! - if blockID.Equals(commitSig.BlockID(commit.BlockID)) { - talliedVotingPower += val.VotingPower - } - // else { - // It's OK that the BlockID doesn't match. We include stray - // signatures (~votes for nil) to measure validator availability. - // } - - if talliedVotingPower > votingPowerNeeded { - return nil - } + seed := hashToSeed(MakeRoundHash(proofHash, height, round)) + candidates := make([]tmrand.Candidate, len(vals.Validators)) + for i, val := range vals.Validators { + candidates[i] = &candidate{ + priority: uint64(val.StakingPower), + val: val, // don't need to assign the copy } } - - return ErrNotEnoughVotingPowerSigned{Got: talliedVotingPower, Needed: votingPowerNeeded} -} - -func verifyCommitBasic(commit *Commit, height int64, blockID BlockID) error { - if err := commit.ValidateBasic(); err != nil { - return err - } - if height != commit.Height { - return NewErrInvalidCommitHeight(height, commit.Height) - } - if !blockID.Equals(commit.BlockID) { - return fmt.Errorf("invalid commit -- wrong block ID: want %v, got %v", - blockID, commit.BlockID) - } - return nil -} - -//----------------- - -// IsErrNotEnoughVotingPowerSigned returns true if err is -// ErrNotEnoughVotingPowerSigned. -func IsErrNotEnoughVotingPowerSigned(err error) bool { - _, ok := errors.Cause(err).(ErrNotEnoughVotingPowerSigned) - return ok -} - -// ErrNotEnoughVotingPowerSigned is returned when not enough validators signed -// a commit. -type ErrNotEnoughVotingPowerSigned struct { - Got int64 - Needed int64 -} - -func (e ErrNotEnoughVotingPowerSigned) Error() string { - return fmt.Sprintf("invalid commit -- insufficient voting power: got %d, needed more than %d", e.Got, e.Needed) + samples := tmrand.RandomSamplingWithPriority(seed, candidates, 1, uint64(vals.TotalStakingPower())) + return samples[0].(*candidate).val } //---------------- @@ -824,17 +632,17 @@ func (vals *ValidatorSet) String() string { return vals.StringIndented("") } -// StringIndented returns an intended string representation of ValidatorSet. +// StringIndented returns an intended string representation of VoterSet. func (vals *ValidatorSet) StringIndented(indent string) string { if vals == nil { - return "nil-ValidatorSet" + return "nil-VoterSet" } var valStrings []string vals.Iterate(func(index int, val *Validator) bool { valStrings = append(valStrings, val.String()) return false }) - return fmt.Sprintf(`ValidatorSet{ + return fmt.Sprintf(`VoterSet{ %s Validators: %s %v %s}`, @@ -924,60 +732,3 @@ func safeSubClip(a, b int64) int64 { } return c } - -// candidate save simple validator data for selecting proposer -type candidate struct { - idx int - address Address - votingPower int64 -} - -func (c *candidate) Priority() uint64 { - // TODO Is it possible to have a negative VotingPower? - if c.votingPower < 0 { - return 0 - } - return uint64(c.votingPower) -} - -func (c *candidate) LessThan(other tmrand.Candidate) bool { - o, ok := other.(*candidate) - if !ok { - panic("incompatible type") - } - return bytes.Compare(c.address, o.address) < 0 -} - -func SelectProposer(validators *ValidatorSet, proofHash []byte, height int64, round int) *Validator { - if validators.IsNilOrEmpty() { - panic("empty validator set") - } - seed := hashToSeed(MakeRoundHash(proofHash, height, round)) - candidates := make([]tmrand.Candidate, len(validators.Validators)) - for i, val := range validators.Validators { - candidates[i] = &candidate{idx: i, address: val.Address, votingPower: val.VotingPower} - } - vals := tmrand.RandomSamplingWithPriority(seed, candidates, 1, uint64(validators.TotalVotingPower())) - proposerIdx := vals[0].(*candidate).idx - return validators.Validators[proposerIdx] -} - -func hashToSeed(hash []byte) uint64 { - for len(hash) < 8 { - hash = append(hash, byte(0)) - } - return binary.LittleEndian.Uint64(hash[:8]) -} - -// MakeRoundHash combines the VRF hash, block height, and round to create a hash value for each round. This value is -// used for random sampling of the Proposer. -func MakeRoundHash(proofHash []byte, height int64, round int) []byte { - b := make([]byte, 16) - binary.LittleEndian.PutUint64(b, uint64(height)) - binary.LittleEndian.PutUint64(b[8:], uint64(round)) - hash := tmhash.New() - hash.Write(proofHash) - hash.Write(b[:8]) - hash.Write(b[8:16]) - return hash.Sum(nil) -} diff --git a/types/validator_set_test.go b/types/validator_set_test.go index 468d33f59..a173425e4 100644 --- a/types/validator_set_test.go +++ b/types/validator_set_test.go @@ -10,8 +10,11 @@ import ( "strings" "testing" "testing/quick" + "time" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/crypto" "github.com/tendermint/tendermint/crypto/ed25519" tmmath "github.com/tendermint/tendermint/libs/math" @@ -19,16 +22,33 @@ import ( tmtime "github.com/tendermint/tendermint/types/time" ) +func TestMaxVotingPowerTest(t *testing.T) { + large := MaxTotalStakingPower + maxDiff := int64(0) + for i := 0; i < 8; i++ { + for j := 0; j < 8; j++ { + testNum := (large - int64(i)) >> j + casted := int64(float64(testNum)) + t.Logf("org=%d, casting=%d", testNum, casted) + if maxDiff < casted-testNum { + maxDiff = casted - testNum + } + } + } + t.Logf("max difference=%d", maxDiff) + assert.True(t, MaxTotalStakingPower+maxDiff <= MaxTotalVotingPower) +} + func TestValidatorSetBasic(t *testing.T) { // empty or nil validator lists are allowed, // but attempting to IncrementProposerPriority on them will panic. vset := NewValidatorSet([]*Validator{}) assert.Panics(t, func() { vset.IncrementProposerPriority(1) }) - assert.Panics(t, func() { SelectProposer(vset, []byte{}, 1, 0) }) + assert.Panics(t, func() { vset.SelectProposer([]byte{}, 1, 0) }) vset = NewValidatorSet(nil) assert.Panics(t, func() { vset.IncrementProposerPriority(1) }) - assert.Panics(t, func() { SelectProposer(vset, []byte{}, 1, 0) }) + assert.Panics(t, func() { vset.SelectProposer([]byte{}, 1, 0) }) assert.EqualValues(t, vset, vset.Copy()) assert.False(t, vset.HasAddress([]byte("some val"))) @@ -45,11 +65,11 @@ func TestValidatorSetBasic(t *testing.T) { assert.Nil(t, addr) assert.Nil(t, val) assert.Zero(t, vset.Size()) - assert.Equal(t, int64(0), vset.TotalVotingPower()) + assert.Equal(t, int64(0), vset.TotalStakingPower()) assert.Nil(t, vset.Hash()) // add - val = randValidator(vset.TotalVotingPower()) + val = randValidator(vset.TotalStakingPower()) assert.NoError(t, vset.UpdateWithChangeSet([]*Validator{val})) assert.True(t, vset.HasAddress(val.Address)) @@ -58,16 +78,17 @@ func TestValidatorSetBasic(t *testing.T) { addr, _ = vset.GetByIndex(0) assert.Equal(t, []byte(val.Address), addr) assert.Equal(t, 1, vset.Size()) - assert.Equal(t, val.VotingPower, vset.TotalVotingPower()) + assert.Equal(t, val.StakingPower, vset.TotalStakingPower()) assert.NotNil(t, vset.Hash()) assert.NotPanics(t, func() { vset.IncrementProposerPriority(1) }) - assert.Equal(t, val.Address, SelectProposer(vset, []byte{}, 1, 0).Address) + assert.Equal(t, val.Address, + vset.SelectProposer([]byte{}, 1, 0).Address) // update - val = randValidator(vset.TotalVotingPower()) + val = randValidator(vset.TotalStakingPower()) assert.NoError(t, vset.UpdateWithChangeSet([]*Validator{val})) _, val = vset.GetByAddress(val.Address) - val.VotingPower += 100 + val.StakingPower += 100 proposerPriority := val.ProposerPriority val.ProposerPriority = 0 @@ -81,14 +102,14 @@ func TestCopy(t *testing.T) { vset := randValidatorSet(10) vsetHash := vset.Hash() if len(vsetHash) == 0 { - t.Fatalf("ValidatorSet had unexpected zero hash") + t.Fatalf("VoterSet had unexpected zero hash") } vsetCopy := vset.Copy() vsetCopyHash := vsetCopy.Hash() if !bytes.Equal(vsetHash, vsetCopyHash) { - t.Fatalf("ValidatorSet copy had wrong hash. Orig: %X, Copy: %X", vsetHash, vsetCopyHash) + t.Fatalf("VoterSet copy had wrong hash. Orig: %X, Copy: %X", vsetHash, vsetCopyHash) } } @@ -142,7 +163,7 @@ func bytesToInt(b []byte) int { func verifyWinningRate(t *testing.T, vals *ValidatorSet, tries int, error float64) { selected := make([]int, len(vals.Validators)) for i := 0; i < tries; i++ { - prop := SelectProposer(vals, []byte{}, int64(i), 0) + prop := vals.SelectProposer([]byte{}, int64(i), 0) for j := 0; j < len(vals.Validators); j++ { if bytes.Equal(prop.Address, vals.Validators[j].Address) { selected[j]++ @@ -156,7 +177,7 @@ func verifyWinningRate(t *testing.T, vals *ValidatorSet, tries int, error float6 } for i := 0; i < len(actual); i++ { - expected := float64(vals.Validators[i].VotingPower) / float64(vals.TotalVotingPower()) + expected := float64(vals.Validators[i].StakingPower) / float64(vals.TotalStakingPower()) if math.Abs(expected-actual[i]) > expected*error { t.Errorf("The winning rate is too far off from expected: %f ∉ %f±%f", actual[i], expected, expected*error) @@ -172,13 +193,14 @@ func TestProposerSelection1(t *testing.T) { }) var proposers []string for i := 0; i < 99; i++ { - val := SelectProposer(vset, []byte{}, int64(i), 0) + val := vset.SelectProposer([]byte{}, int64(i), 0) proposers = append(proposers, string(val.Address)) } expected := `foo foo foo foo bar bar foo bar foo baz bar foo baz baz baz foo foo bar foo bar baz bar foo baz foo ` + `foo baz foo foo baz foo foo baz bar foo foo foo baz foo baz baz bar foo foo foo foo baz bar bar bar bar foo ` + `foo foo baz foo foo foo foo foo foo baz foo foo baz bar bar foo bar foo foo baz bar foo foo baz foo foo baz ` + `foo foo bar foo foo baz foo foo foo bar foo foo baz baz foo foo bar baz foo baz` + if expected != strings.Join(proposers, " ") { t.Errorf("expected sequence of proposers was\n%v\nbut got \n%v", expected, strings.Join(proposers, " ")) } @@ -195,7 +217,7 @@ func TestProposerSelection2(t *testing.T) { vals := NewValidatorSet(valList) expected := []int{0, 1, 0, 0, 2, 2, 0, 2, 1, 2, 2, 1, 2, 2, 2} for i := 0; i < len(valList)*5; i++ { - prop := SelectProposer(vals, []byte{}, int64(i), 0) + prop := vals.SelectProposer([]byte{}, int64(i), 0) if bytesToInt(prop.Address) != expected[i] { t.Fatalf("(%d): Expected %d. Got %d", i, expected[i], bytesToInt(prop.Address)) } @@ -210,7 +232,7 @@ func TestProposerSelection2(t *testing.T) { // One validator has more than the others *val2 = *newValidator(addr2, 401) vals = NewValidatorSet(valList) - verifyWinningRate(t, vals, 100000, 0.01) + verifyWinningRate(t, vals, 10000, 0.01) // each validator should be the proposer a proportional number of times val0, val1, val2 = newValidator(addr0, 4), newValidator(addr1, 5), newValidator(addr2, 3) @@ -219,15 +241,15 @@ func TestProposerSelection2(t *testing.T) { vals = NewValidatorSet(valList) N := 4 + 5 + 3 for i := 0; i < 10000*N; i++ { - prop := SelectProposer(vals, []byte{}, int64(i), 0) + prop := vals.SelectProposer([]byte{}, int64(i), 0) propCount[bytesToInt(prop.Address)]++ } - fmt.Printf("%v", propCount) + fmt.Printf("%v\n", propCount) if propCount[0] != 40257 { t.Fatalf( "Expected prop count for validator with 4/12 of voting power to be %d/%d. Got %d/%d", - 40185, + 40038, 10000*N, propCount[0], 10000*N, @@ -236,7 +258,7 @@ func TestProposerSelection2(t *testing.T) { if propCount[1] != 50017 { t.Fatalf( "Expected prop count for validator with 5/12 of voting power to be %d/%d. Got %d/%d", - 40185, + 50077, 10000*N, propCount[1], 10000*N, @@ -245,7 +267,7 @@ func TestProposerSelection2(t *testing.T) { if propCount[2] != 29726 { t.Fatalf( "Expected prop count for validator with 3/12 of voting power to be %d/%d. Got %d/%d", - 40185, + 29885, 10000*N, propCount[2], 10000*N, @@ -254,7 +276,7 @@ func TestProposerSelection2(t *testing.T) { } func newValidator(address []byte, power int64) *Validator { - return &Validator{Address: address, VotingPower: power, PubKey: randPubKey()} + return &Validator{Address: address, StakingPower: power, PubKey: randPubKey()} } func randPubKey() crypto.PubKey { @@ -263,24 +285,54 @@ func randPubKey() crypto.PubKey { return ed25519.PubKeyEd25519(pubKey) } -func randValidator(totalVotingPower int64) *Validator { - // this modulo limits the ProposerPriority/VotingPower to stay in the - // bounds of MaxTotalVotingPower minus the already existing voting power: - val := NewValidator(randPubKey(), int64(tmrand.Uint64()%uint64((MaxTotalVotingPower-totalVotingPower)))) - val.ProposerPriority = tmrand.Int64() % (MaxTotalVotingPower - totalVotingPower) +func defendLimit(a int64) int64 { + if a <= 0 { + return 1 + } + if a > MaxTotalStakingPower/8 { + a = MaxTotalStakingPower / 8 + } + return a +} + +func randValidator(totalStakingPower int64) *Validator { + // this modulo limits the ProposerPriority/StakingPower to stay in the + // bounds of MaxTotalStakingPower minus the already existing voting power: + stakingPower := defendLimit(int64(tmrand.Uint64() % uint64(MaxTotalStakingPower-totalStakingPower))) + val := NewValidator(randPubKey(), stakingPower) + val.ProposerPriority = stakingPower return val } func randValidatorSet(numValidators int) *ValidatorSet { validators := make([]*Validator, numValidators) - totalVotingPower := int64(0) + totalStakingPower := int64(0) for i := 0; i < numValidators; i++ { - validators[i] = randValidator(totalVotingPower) - totalVotingPower += validators[i].VotingPower + validators[i] = randValidator(totalStakingPower) + totalStakingPower += validators[i].StakingPower } return NewValidatorSet(validators) } +func randValidatorWithMinMax(min, max int64) (*Validator, PrivValidator) { + privVal := NewMockPV() + pubKey, _ := privVal.GetPubKey() + val := NewValidator(pubKey, min+int64(tmrand.Uint64()%uint64(1+max-min))) + val.ProposerPriority = min + tmrand.Int64()%max + return val, privVal +} + +func randValidatorSetWithMinMax(numValidators int, min, max int64) (*ValidatorSet, map[string]PrivValidator) { + validators := make([]*Validator, numValidators) + privMap := make(map[string]PrivValidator) + var privVal PrivValidator + for i := 0; i < numValidators; i++ { + validators[i], privVal = randValidatorWithMinMax(min, max) + privMap[validators[i].Address.String()] = privVal + } + return NewValidatorSet(validators), privMap +} + func (vals *ValidatorSet) toBytes() []byte { bz, err := cdc.MarshalBinaryLengthPrefixed(vals) if err != nil { @@ -299,14 +351,14 @@ func (vals *ValidatorSet) fromBytes(b []byte) { //------------------------------------------------------------------- -func TestValidatorSetTotalVotingPowerPanicsOnOverflow(t *testing.T) { - // NewValidatorSet calls IncrementProposerPriority which calls TotalVotingPower() +func TestValidatorSetTotalStakingPowerPanicsOnOverflow(t *testing.T) { + // NewValidatorSet calls IncrementProposerPriority which calls TotalStakingPower() // which should panic on overflows: shouldPanic := func() { NewValidatorSet([]*Validator{ - {Address: []byte("a"), VotingPower: math.MaxInt64, ProposerPriority: 0}, - {Address: []byte("b"), VotingPower: math.MaxInt64, ProposerPriority: 0}, - {Address: []byte("c"), VotingPower: math.MaxInt64, ProposerPriority: 0}, + {Address: []byte("a"), StakingPower: math.MaxInt64, ProposerPriority: 0}, + {Address: []byte("b"), StakingPower: math.MaxInt64, ProposerPriority: 0}, + {Address: []byte("c"), StakingPower: math.MaxInt64, ProposerPriority: 0}, }) } @@ -390,7 +442,7 @@ func TestAveragingInIncrementProposerPriority(t *testing.T) { } } -func TestAveragingInIncrementProposerPriorityWithVotingPower(t *testing.T) { +func TestAveragingInIncrementProposerPriorityWithStakingPower(t *testing.T) { // Other than TestAveragingInIncrementProposerPriority this is a more complete test showing // how each ProposerPriority changes in relation to the validator's voting power respectively. // average is zero in each round: @@ -400,9 +452,9 @@ func TestAveragingInIncrementProposerPriorityWithVotingPower(t *testing.T) { total := vp0 + vp1 + vp2 avg := (vp0 + vp1 + vp2 - total) / 3 vals := ValidatorSet{Validators: []*Validator{ - {Address: []byte{0}, ProposerPriority: 0, VotingPower: vp0}, - {Address: []byte{1}, ProposerPriority: 0, VotingPower: vp1}, - {Address: []byte{2}, ProposerPriority: 0, VotingPower: vp2}}} + {Address: []byte{0}, ProposerPriority: 0, StakingPower: vp0}, + {Address: []byte{1}, ProposerPriority: 0, StakingPower: vp1}, + {Address: []byte{2}, ProposerPriority: 0, StakingPower: vp2}}} tcs := []struct { vals *ValidatorSet wantProposerPrioritys []int64 @@ -413,7 +465,7 @@ func TestAveragingInIncrementProposerPriorityWithVotingPower(t *testing.T) { 0: { vals.Copy(), []int64{ - // Acumm+VotingPower-Avg: + // Acumm+StakingPower-Avg: 0 + vp0 - total - avg, // mostest will be subtracted by total voting power (12) 0 + vp1, 0 + vp2}, @@ -447,7 +499,7 @@ func TestAveragingInIncrementProposerPriorityWithVotingPower(t *testing.T) { vals.Copy(), []int64{ 0 + 4*(vp0-total) + vp0, // 4 iters was mostest - 0 + 5*vp1 - total, // now this val is mostest for the 1st time (hence -12==totalVotingPower) + 0 + 5*vp1 - total, // now this val is mostest for the 1st time (hence -12==totalStakingPower) 0 + 5*vp2}, 5, vals.Validators[2]}, @@ -503,9 +555,11 @@ func TestAveragingInIncrementProposerPriorityWithVotingPower(t *testing.T) { for i, tc := range tcs { tc.vals.IncrementProposerPriority(tc.times) - assert.Equal(t, tc.wantProposer.Address, SelectProposer(tc.vals, []byte{}, int64(i), 0).Address, + assert.Equal(t, tc.wantProposer.Address, + tc.vals.SelectProposer([]byte{}, int64(i), 0).Address, "test case: %v", i) + for valIdx, val := range tc.vals.Validators { assert.Equal(t, tc.wantProposerPrioritys[valIdx], @@ -546,7 +600,7 @@ func TestValidatorSetVerifyCommit(t *testing.T) { privKey := ed25519.GenPrivKey() pubKey := privKey.PubKey() v1 := NewValidator(pubKey, 1000) - vset := NewValidatorSet([]*Validator{v1}) + vset := ToVoterAll([]*Validator{v1}) // good var ( @@ -700,11 +754,11 @@ func verifyValidatorSet(t *testing.T, valSet *ValidatorSet) { assert.Equal(t, len(valSet.Validators), cap(valSet.Validators)) // verify that the set's total voting power has been updated - tvp := valSet.totalVotingPower - valSet.updateTotalVotingPower() - expectedTvp := valSet.TotalVotingPower() + tvp := valSet.totalStakingPower + valSet.updateTotalStakingPower() + expectedTvp := valSet.TotalStakingPower() assert.Equal(t, expectedTvp, tvp, - "expected TVP %d. Got %d, valSet=%s", expectedTvp, tvp, valSet) + "expected TVP %d. Got %d, voterSet=%s", expectedTvp, tvp, valSet) // verify that validator priorities are centered valsCount := int64(len(valSet.Validators)) @@ -722,7 +776,7 @@ func toTestValList(valList []*Validator) []testVal { testList := make([]testVal, len(valList)) for i, val := range valList { testList[i].name = string(val.Address) - testList[i].power = val.VotingPower + testList[i].power = val.StakingPower } return testList } @@ -814,7 +868,7 @@ func TestValSetUpdatesDuplicateEntries(t *testing.T) { } func TestValSetUpdatesOverflows(t *testing.T) { - maxVP := MaxTotalVotingPower + maxVP := MaxTotalStakingPower testCases := []valSetErrTestCase{ { // single update leading to overflow testValSet(2, 10), @@ -922,7 +976,7 @@ func TestValSetUpdatesBasicTestsExecute(t *testing.T) { // is changed in the list of validators previously passed as parameter to UpdateWithChangeSet. // this is to make sure copies of the validators are made by UpdateWithChangeSet. if len(valList) > 0 { - valList[0].VotingPower++ + valList[0].StakingPower++ assert.Equal(t, toTestValList(valListCopy), toTestValList(valSet.Validators), "test %v", i) } @@ -1218,39 +1272,39 @@ func TestValSetUpdateOverflowRelated(t *testing.T) { testCases := []testVSetCfg{ { name: "1 no false overflow error messages for updates", - startVals: []testVal{{"v1", 1}, {"v2", MaxTotalVotingPower - 1}}, - updatedVals: []testVal{{"v1", MaxTotalVotingPower - 1}, {"v2", 1}}, - expectedVals: []testVal{{"v1", MaxTotalVotingPower - 1}, {"v2", 1}}, + startVals: []testVal{{"v1", 1}, {"v2", MaxTotalStakingPower - 1}}, + updatedVals: []testVal{{"v1", MaxTotalStakingPower - 1}, {"v2", 1}}, + expectedVals: []testVal{{"v1", MaxTotalStakingPower - 1}, {"v2", 1}}, wantErr: false, }, { // this test shows that it is important to apply the updates in the order of the change in power // i.e. apply first updates with decreases in power, v2 change in this case. name: "2 no false overflow error messages for updates", - startVals: []testVal{{"v1", 1}, {"v2", MaxTotalVotingPower - 1}}, - updatedVals: []testVal{{"v1", MaxTotalVotingPower/2 - 1}, {"v2", MaxTotalVotingPower / 2}}, - expectedVals: []testVal{{"v1", MaxTotalVotingPower/2 - 1}, {"v2", MaxTotalVotingPower / 2}}, + startVals: []testVal{{"v1", 1}, {"v2", MaxTotalStakingPower - 1}}, + updatedVals: []testVal{{"v1", MaxTotalStakingPower/2 - 1}, {"v2", MaxTotalStakingPower / 2}}, + expectedVals: []testVal{{"v1", MaxTotalStakingPower/2 - 1}, {"v2", MaxTotalStakingPower / 2}}, wantErr: false, }, { name: "3 no false overflow error messages for deletes", - startVals: []testVal{{"v1", MaxTotalVotingPower - 2}, {"v2", 1}, {"v3", 1}}, + startVals: []testVal{{"v1", MaxTotalStakingPower - 2}, {"v2", 1}, {"v3", 1}}, deletedVals: []testVal{{"v1", 0}}, - addedVals: []testVal{{"v4", MaxTotalVotingPower - 2}}, - expectedVals: []testVal{{"v2", 1}, {"v3", 1}, {"v4", MaxTotalVotingPower - 2}}, + addedVals: []testVal{{"v4", MaxTotalStakingPower - 2}}, + expectedVals: []testVal{{"v2", 1}, {"v3", 1}, {"v4", MaxTotalStakingPower - 2}}, wantErr: false, }, { name: "4 no false overflow error messages for adds, updates and deletes", startVals: []testVal{ - {"v1", MaxTotalVotingPower / 4}, {"v2", MaxTotalVotingPower / 4}, - {"v3", MaxTotalVotingPower / 4}, {"v4", MaxTotalVotingPower / 4}}, + {"v1", MaxTotalStakingPower / 4}, {"v2", MaxTotalStakingPower / 4}, + {"v3", MaxTotalStakingPower / 4}, {"v4", MaxTotalStakingPower / 4}}, deletedVals: []testVal{{"v2", 0}}, updatedVals: []testVal{ - {"v1", MaxTotalVotingPower/2 - 2}, {"v3", MaxTotalVotingPower/2 - 3}, {"v4", 2}}, + {"v1", MaxTotalStakingPower/2 - 2}, {"v3", MaxTotalStakingPower/2 - 3}, {"v4", 2}}, addedVals: []testVal{{"v5", 3}}, expectedVals: []testVal{ - {"v1", MaxTotalVotingPower/2 - 2}, {"v3", MaxTotalVotingPower/2 - 3}, {"v4", 2}, {"v5", 3}}, + {"v1", MaxTotalStakingPower/2 - 2}, {"v3", MaxTotalStakingPower/2 - 3}, {"v4", 2}, {"v5", 3}}, wantErr: false, }, { @@ -1259,9 +1313,9 @@ func TestValSetUpdateOverflowRelated(t *testing.T) { {"v1", 1}, {"v2", 1}, {"v3", 1}, {"v4", 1}, {"v5", 1}, {"v6", 1}, {"v7", 1}, {"v8", 1}, {"v9", 1}}, updatedVals: []testVal{ - {"v1", MaxTotalVotingPower}, {"v2", MaxTotalVotingPower}, {"v3", MaxTotalVotingPower}, - {"v4", MaxTotalVotingPower}, {"v5", MaxTotalVotingPower}, {"v6", MaxTotalVotingPower}, - {"v7", MaxTotalVotingPower}, {"v8", MaxTotalVotingPower}, {"v9", 8}}, + {"v1", MaxTotalStakingPower}, {"v2", MaxTotalStakingPower}, {"v3", MaxTotalStakingPower}, + {"v4", MaxTotalStakingPower}, {"v5", MaxTotalStakingPower}, {"v6", MaxTotalStakingPower}, + {"v7", MaxTotalStakingPower}, {"v8", MaxTotalStakingPower}, {"v9", 8}}, expectedVals: []testVal{ {"v1", 1}, {"v2", 1}, {"v3", 1}, {"v4", 1}, {"v5", 1}, {"v6", 1}, {"v7", 1}, {"v8", 1}, {"v9", 1}}, @@ -1285,6 +1339,51 @@ func TestValSetUpdateOverflowRelated(t *testing.T) { } } +func TestVerifyCommitTrusting(t *testing.T) { + var ( + blockID = makeBlockIDRandom() + voteSet, _, originalVoterSet, privValidators = randVoteSet(1, 1, PrecommitType, 6, 1) + commit, err = MakeCommit(blockID, 1, 1, voteSet, privValidators, time.Now()) + _, newVoterSet, _ = RandVoterSet(2, 1) + ) + require.NoError(t, err) + + testCases := []struct { + //valSet *ValidatorSet + voterSet *VoterSet + err bool + }{ + // good + 0: { + //valSet: originalValset, + voterSet: originalVoterSet, + err: false, + }, + // bad - no overlap between validator sets + 1: { + voterSet: newVoterSet, + err: true, + }, + // good - first two are different but the rest of the same -> >1/3 + 2: { + //voterSet: WrapValidatorsToVoterSet(append(newValSet.Validators, originalValset.Validators...)), + voterSet: WrapValidatorsToVoterSet(append(newVoterSet.Voters, originalVoterSet.Voters...)), + err: false, + }, + } + + for _, tc := range testCases { + err = tc.voterSet.VerifyCommitTrusting("test_chain_id", blockID, commit.Height, commit, + tmmath.Fraction{Numerator: 1, Denominator: 3}) + if tc.err { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + } + +} + //--------------------- // Sort validators by priority and address type validatorsByPriority []*Validator diff --git a/types/vote_set.go b/types/vote_set.go index 82698fe51..1b343f3e5 100644 --- a/types/vote_set.go +++ b/types/vote_set.go @@ -63,7 +63,7 @@ type VoteSet struct { height int64 round int signedMsgType SignedMsgType - valSet *ValidatorSet + voterSet *VoterSet mtx sync.Mutex votesBitArray *bits.BitArray @@ -75,7 +75,7 @@ type VoteSet struct { } // Constructs a new VoteSet struct used to accumulate votes for given height/round. -func NewVoteSet(chainID string, height int64, round int, signedMsgType SignedMsgType, valSet *ValidatorSet) *VoteSet { +func NewVoteSet(chainID string, height int64, round int, signedMsgType SignedMsgType, voterSet *VoterSet) *VoteSet { if height == 0 { panic("Cannot make VoteSet for height == 0, doesn't make sense.") } @@ -84,12 +84,12 @@ func NewVoteSet(chainID string, height int64, round int, signedMsgType SignedMsg height: height, round: round, signedMsgType: signedMsgType, - valSet: valSet, - votesBitArray: bits.NewBitArray(valSet.Size()), - votes: make([]*Vote, valSet.Size()), + voterSet: voterSet, + votesBitArray: bits.NewBitArray(voterSet.Size()), + votes: make([]*Vote, voterSet.Size()), sum: 0, maj23: nil, - votesByBlock: make(map[string]*blockVotes, valSet.Size()), + votesByBlock: make(map[string]*blockVotes, voterSet.Size()), peerMaj23s: make(map[P2PID]BlockID), } } @@ -127,7 +127,7 @@ func (voteSet *VoteSet) Size() int { if voteSet == nil { return 0 } - return voteSet.valSet.Size() + return voteSet.voterSet.Size() } // Returns added=true if vote is valid and new. @@ -175,10 +175,10 @@ func (voteSet *VoteSet) addVote(vote *Vote) (added bool, err error) { } // Ensure that signer is a validator. - lookupAddr, val := voteSet.valSet.GetByIndex(valIndex) - if val == nil { + lookupAddr, voter := voteSet.voterSet.GetByIndex(valIndex) + if voter == nil { return false, errors.Wrapf(ErrVoteInvalidValidatorIndex, - "Cannot find validator %d in valSet of size %d", valIndex, voteSet.valSet.Size()) + "Cannot find voter %d in voterSet of size %d", valIndex, voteSet.voterSet.Size()) } // Ensure that the signer has the right address. @@ -198,14 +198,14 @@ func (voteSet *VoteSet) addVote(vote *Vote) (added bool, err error) { } // Check signature. - if err := vote.Verify(voteSet.chainID, val.PubKey); err != nil { - return false, errors.Wrapf(err, "Failed to verify vote with ChainID %s and PubKey %s", voteSet.chainID, val.PubKey) + if err := vote.Verify(voteSet.chainID, voter.PubKey); err != nil { + return false, errors.Wrapf(err, "Failed to verify vote with ChainID %s and PubKey %s", voteSet.chainID, voter.PubKey) } // Add vote and get conflicting vote if any. - added, conflicting := voteSet.addVerifiedVote(vote, blockKey, val.VotingPower) + added, conflicting := voteSet.addVerifiedVote(vote, blockKey, voter.VotingPower) if conflicting != nil { - return added, NewConflictingVoteError(val, conflicting, vote) + return added, NewConflictingVoteError(voter, conflicting, vote) } if !added { panic("Expected to add non-conflicting vote") @@ -269,14 +269,14 @@ func (voteSet *VoteSet) addVerifiedVote( } // ... and there's no conflicting vote. // Start tracking this blockKey - votesByBlock = newBlockVotes(false, voteSet.valSet.Size()) + votesByBlock = newBlockVotes(false, voteSet.voterSet.Size()) voteSet.votesByBlock[blockKey] = votesByBlock // We'll add the vote in a bit. } // Before adding to votesByBlock, see if we'll exceed quorum origSum := votesByBlock.sum - quorum := voteSet.valSet.TotalVotingPower()*2/3 + 1 + quorum := voteSet.voterSet.TotalVotingPower()*2/3 + 1 // Add vote to votesByBlock votesByBlock.addVerifiedVote(vote, votingPower) @@ -332,7 +332,7 @@ func (voteSet *VoteSet) SetPeerMaj23(peerID P2PID, blockID BlockID) error { votesByBlock.peerMaj23 = true // No need to copy votes, already there. } else { - votesByBlock = newBlockVotes(true, voteSet.valSet.Size()) + votesByBlock = newBlockVotes(true, voteSet.voterSet.Size()) voteSet.votesByBlock[blockKey] = votesByBlock // No need to copy votes, no votes to copy over. } @@ -379,7 +379,7 @@ func (voteSet *VoteSet) GetByAddress(address []byte) *Vote { } voteSet.mtx.Lock() defer voteSet.mtx.Unlock() - valIndex, val := voteSet.valSet.GetByAddress(address) + valIndex, val := voteSet.voterSet.GetByAddress(address) if val == nil { panic("GetByAddress(address) returned nil") } @@ -414,13 +414,13 @@ func (voteSet *VoteSet) HasTwoThirdsAny() bool { } voteSet.mtx.Lock() defer voteSet.mtx.Unlock() - return voteSet.sum > voteSet.valSet.TotalVotingPower()*2/3 + return voteSet.sum > voteSet.voterSet.TotalVotingPower()*2/3 } func (voteSet *VoteSet) HasAll() bool { voteSet.mtx.Lock() defer voteSet.mtx.Unlock() - return voteSet.sum == voteSet.valSet.TotalVotingPower() + return voteSet.sum == voteSet.voterSet.TotalVotingPower() } // If there was a +2/3 majority for blockID, return blockID and true. @@ -539,7 +539,7 @@ func (voteSet *VoteSet) StringShort() string { // return the power voted, the total, and the fraction func (voteSet *VoteSet) sumTotalFrac() (int64, int64, float64) { - voted, total := voteSet.sum, voteSet.valSet.TotalVotingPower() + voted, total := voteSet.sum, voteSet.voterSet.TotalVotingPower() fracVoted := float64(voted) / float64(total) return voted, total, fracVoted } @@ -586,11 +586,11 @@ type blockVotes struct { sum int64 // vote sum } -func newBlockVotes(peerMaj23 bool, numValidators int) *blockVotes { +func newBlockVotes(peerMaj23 bool, numVoters int) *blockVotes { return &blockVotes{ peerMaj23: peerMaj23, - bitArray: bits.NewBitArray(numValidators), - votes: make([]*Vote, numValidators), + bitArray: bits.NewBitArray(numVoters), + votes: make([]*Vote, numVoters), sum: 0, } } diff --git a/types/vote_set_test.go b/types/vote_set_test.go index ab4433a39..0153f38d1 100644 --- a/types/vote_set_test.go +++ b/types/vote_set_test.go @@ -5,6 +5,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/tendermint/tendermint/crypto" tmrand "github.com/tendermint/tendermint/libs/rand" @@ -18,9 +19,9 @@ func randVoteSet( signedMsgType SignedMsgType, numValidators int, votingPower int64, -) (*VoteSet, *ValidatorSet, []PrivValidator) { - valSet, privValidators := RandValidatorSet(numValidators, votingPower) - return NewVoteSet("test_chain_id", height, round, signedMsgType, valSet), valSet, privValidators +) (*VoteSet, *ValidatorSet, *VoterSet, []PrivValidator) { + valSet, voterSet, privValidators := RandVoterSet(numValidators, votingPower) + return NewVoteSet("test_chain_id", height, round, signedMsgType, voterSet), valSet, voterSet, privValidators } // Convenience: Return new vote with different validator address/index @@ -68,12 +69,15 @@ func withBlockPartsHeader(vote *Vote, blockPartsHeader PartSetHeader) *Vote { func TestAddVote(t *testing.T) { height, round := int64(1), 0 - voteSet, _, privValidators := randVoteSet(height, round, PrevoteType, 10, 1) + voteSet, _, _, privValidators := randVoteSet(height, round, PrevoteType, 10, 1) val0 := privValidators[0] // t.Logf(">> %v", voteSet) - val0Addr := val0.GetPubKey().Address() + val0p, err := val0.GetPubKey() + require.NoError(t, err) + val0Addr := val0p.Address() + if voteSet.GetByAddress(val0Addr) != nil { t.Errorf("expected GetByAddress(val0.Address) to be nil") } @@ -94,7 +98,7 @@ func TestAddVote(t *testing.T) { Timestamp: tmtime.Now(), BlockID: BlockID{nil, PartSetHeader{}}, } - _, err := signAddVote(val0, vote, voteSet) + _, err = signAddVote(val0, vote, voteSet) if err != nil { t.Error(err) } @@ -113,7 +117,7 @@ func TestAddVote(t *testing.T) { func Test2_3Majority(t *testing.T) { height, round := int64(1), 0 - voteSet, _, privValidators := randVoteSet(height, round, PrevoteType, 10, 1) + voteSet, _, _, privValidators := randVoteSet(height, round, PrevoteType, 10, 1) voteProto := &Vote{ ValidatorAddress: nil, // NOTE: must fill in @@ -126,9 +130,11 @@ func Test2_3Majority(t *testing.T) { } // 6 out of 10 voted for nil. for i := 0; i < 6; i++ { - addr := privValidators[i].GetPubKey().Address() + pubKey, err := privValidators[i].GetPubKey() + require.NoError(t, err) + addr := pubKey.Address() vote := withValidator(voteProto, addr, i) - _, err := signAddVote(privValidators[i], vote, voteSet) + _, err = signAddVote(privValidators[i], vote, voteSet) if err != nil { t.Error(err) } @@ -140,9 +146,11 @@ func Test2_3Majority(t *testing.T) { // 7th validator voted for some blockhash { - addr := privValidators[6].GetPubKey().Address() + pubKey, err := privValidators[6].GetPubKey() + require.NoError(t, err) + addr := pubKey.Address() vote := withValidator(voteProto, addr, 6) - _, err := signAddVote(privValidators[6], withBlockHash(vote, tmrand.Bytes(32)), voteSet) + _, err = signAddVote(privValidators[6], withBlockHash(vote, tmrand.Bytes(32)), voteSet) if err != nil { t.Error(err) } @@ -154,9 +162,11 @@ func Test2_3Majority(t *testing.T) { // 8th validator voted for nil. { - addr := privValidators[7].GetPubKey().Address() + pubKey, err := privValidators[7].GetPubKey() + require.NoError(t, err) + addr := pubKey.Address() vote := withValidator(voteProto, addr, 7) - _, err := signAddVote(privValidators[7], vote, voteSet) + _, err = signAddVote(privValidators[7], vote, voteSet) if err != nil { t.Error(err) } @@ -169,7 +179,7 @@ func Test2_3Majority(t *testing.T) { func Test2_3MajorityRedux(t *testing.T) { height, round := int64(1), 0 - voteSet, _, privValidators := randVoteSet(height, round, PrevoteType, 100, 1) + voteSet, _, _, privValidators := randVoteSet(height, round, PrevoteType, 100, 1) blockHash := crypto.CRandBytes(32) blockPartsTotal := 123 @@ -187,9 +197,11 @@ func Test2_3MajorityRedux(t *testing.T) { // 66 out of 100 voted for nil. for i := 0; i < 66; i++ { - addr := privValidators[i].GetPubKey().Address() + pubKey, err := privValidators[i].GetPubKey() + require.NoError(t, err) + addr := pubKey.Address() vote := withValidator(voteProto, addr, i) - _, err := signAddVote(privValidators[i], vote, voteSet) + _, err = signAddVote(privValidators[i], vote, voteSet) if err != nil { t.Error(err) } @@ -201,9 +213,11 @@ func Test2_3MajorityRedux(t *testing.T) { // 67th validator voted for nil { - adrr := privValidators[66].GetPubKey().Address() + pubKey, err := privValidators[66].GetPubKey() + require.NoError(t, err) + adrr := pubKey.Address() vote := withValidator(voteProto, adrr, 66) - _, err := signAddVote(privValidators[66], withBlockHash(vote, nil), voteSet) + _, err = signAddVote(privValidators[66], withBlockHash(vote, nil), voteSet) if err != nil { t.Error(err) } @@ -215,10 +229,12 @@ func Test2_3MajorityRedux(t *testing.T) { // 68th validator voted for a different BlockParts PartSetHeader { - addr := privValidators[67].GetPubKey().Address() + pubKey, err := privValidators[67].GetPubKey() + require.NoError(t, err) + addr := pubKey.Address() vote := withValidator(voteProto, addr, 67) blockPartsHeader := PartSetHeader{blockPartsTotal, crypto.CRandBytes(32)} - _, err := signAddVote(privValidators[67], withBlockPartsHeader(vote, blockPartsHeader), voteSet) + _, err = signAddVote(privValidators[67], withBlockPartsHeader(vote, blockPartsHeader), voteSet) if err != nil { t.Error(err) } @@ -230,10 +246,12 @@ func Test2_3MajorityRedux(t *testing.T) { // 69th validator voted for different BlockParts Total { - addr := privValidators[68].GetPubKey().Address() + pubKey, err := privValidators[68].GetPubKey() + require.NoError(t, err) + addr := pubKey.Address() vote := withValidator(voteProto, addr, 68) blockPartsHeader := PartSetHeader{blockPartsTotal + 1, blockPartsHeader.Hash} - _, err := signAddVote(privValidators[68], withBlockPartsHeader(vote, blockPartsHeader), voteSet) + _, err = signAddVote(privValidators[68], withBlockPartsHeader(vote, blockPartsHeader), voteSet) if err != nil { t.Error(err) } @@ -245,9 +263,11 @@ func Test2_3MajorityRedux(t *testing.T) { // 70th validator voted for different BlockHash { - addr := privValidators[69].GetPubKey().Address() + pubKey, err := privValidators[69].GetPubKey() + require.NoError(t, err) + addr := pubKey.Address() vote := withValidator(voteProto, addr, 69) - _, err := signAddVote(privValidators[69], withBlockHash(vote, tmrand.Bytes(32)), voteSet) + _, err = signAddVote(privValidators[69], withBlockHash(vote, tmrand.Bytes(32)), voteSet) if err != nil { t.Error(err) } @@ -259,9 +279,11 @@ func Test2_3MajorityRedux(t *testing.T) { // 71st validator voted for the right BlockHash & BlockPartsHeader { - addr := privValidators[70].GetPubKey().Address() + pubKey, err := privValidators[70].GetPubKey() + require.NoError(t, err) + addr := pubKey.Address() vote := withValidator(voteProto, addr, 70) - _, err := signAddVote(privValidators[70], vote, voteSet) + _, err = signAddVote(privValidators[70], vote, voteSet) if err != nil { t.Error(err) } @@ -274,7 +296,7 @@ func Test2_3MajorityRedux(t *testing.T) { func TestBadVotes(t *testing.T) { height, round := int64(1), 0 - voteSet, _, privValidators := randVoteSet(height, round, PrevoteType, 10, 1) + voteSet, _, _, privValidators := randVoteSet(height, round, PrevoteType, 10, 1) voteProto := &Vote{ ValidatorAddress: nil, @@ -288,7 +310,9 @@ func TestBadVotes(t *testing.T) { // val0 votes for nil. { - addr := privValidators[0].GetPubKey().Address() + pubKey, err := privValidators[0].GetPubKey() + require.NoError(t, err) + addr := pubKey.Address() vote := withValidator(voteProto, addr, 0) added, err := signAddVote(privValidators[0], vote, voteSet) if !added || err != nil { @@ -298,7 +322,9 @@ func TestBadVotes(t *testing.T) { // val0 votes again for some block. { - addr := privValidators[0].GetPubKey().Address() + pubKey, err := privValidators[0].GetPubKey() + require.NoError(t, err) + addr := pubKey.Address() vote := withValidator(voteProto, addr, 0) added, err := signAddVote(privValidators[0], withBlockHash(vote, tmrand.Bytes(32)), voteSet) if added || err == nil { @@ -308,7 +334,9 @@ func TestBadVotes(t *testing.T) { // val1 votes on another height { - addr := privValidators[1].GetPubKey().Address() + pubKey, err := privValidators[1].GetPubKey() + require.NoError(t, err) + addr := pubKey.Address() vote := withValidator(voteProto, addr, 1) added, err := signAddVote(privValidators[1], withHeight(vote, height+1), voteSet) if added || err == nil { @@ -318,7 +346,9 @@ func TestBadVotes(t *testing.T) { // val2 votes on another round { - addr := privValidators[2].GetPubKey().Address() + pubKey, err := privValidators[2].GetPubKey() + require.NoError(t, err) + addr := pubKey.Address() vote := withValidator(voteProto, addr, 2) added, err := signAddVote(privValidators[2], withRound(vote, round+1), voteSet) if added || err == nil { @@ -328,7 +358,9 @@ func TestBadVotes(t *testing.T) { // val3 votes of another type. { - addr := privValidators[3].GetPubKey().Address() + pubKey, err := privValidators[3].GetPubKey() + require.NoError(t, err) + addr := pubKey.Address() vote := withValidator(voteProto, addr, 3) added, err := signAddVote(privValidators[3], withType(vote, byte(PrecommitType)), voteSet) if added || err == nil { @@ -339,7 +371,7 @@ func TestBadVotes(t *testing.T) { func TestConflicts(t *testing.T) { height, round := int64(1), 0 - voteSet, _, privValidators := randVoteSet(height, round, PrevoteType, 4, 1) + voteSet, _, _, privValidators := randVoteSet(height, round, PrevoteType, 4, 1) blockHash1 := tmrand.Bytes(32) blockHash2 := tmrand.Bytes(32) @@ -353,7 +385,10 @@ func TestConflicts(t *testing.T) { BlockID: BlockID{nil, PartSetHeader{}}, } - val0Addr := privValidators[0].GetPubKey().Address() + val0, err := privValidators[0].GetPubKey() + require.NoError(t, err) + val0Addr := val0.Address() + // val0 votes for nil. { vote := withValidator(voteProto, val0Addr, 0) @@ -407,7 +442,9 @@ func TestConflicts(t *testing.T) { // val1 votes for blockHash1. { - addr := privValidators[1].GetPubKey().Address() + pv, err := privValidators[1].GetPubKey() + assert.NoError(t, err) + addr := pv.Address() vote := withValidator(voteProto, addr, 1) added, err := signAddVote(privValidators[1], withBlockHash(vote, blockHash1), voteSet) if !added || err != nil { @@ -425,7 +462,9 @@ func TestConflicts(t *testing.T) { // val2 votes for blockHash2. { - addr := privValidators[2].GetPubKey().Address() + pv, err := privValidators[2].GetPubKey() + assert.NoError(t, err) + addr := pv.Address() vote := withValidator(voteProto, addr, 2) added, err := signAddVote(privValidators[2], withBlockHash(vote, blockHash2), voteSet) if !added || err != nil { @@ -446,7 +485,9 @@ func TestConflicts(t *testing.T) { // val2 votes for blockHash1. { - addr := privValidators[2].GetPubKey().Address() + pv, err := privValidators[2].GetPubKey() + assert.NoError(t, err) + addr := pv.Address() vote := withValidator(voteProto, addr, 2) added, err := signAddVote(privValidators[2], withBlockHash(vote, blockHash1), voteSet) if !added { @@ -473,7 +514,7 @@ func TestConflicts(t *testing.T) { func TestMakeCommit(t *testing.T) { height, round := int64(1), 0 - voteSet, _, privValidators := randVoteSet(height, round, PrecommitType, 10, 1) + voteSet, _, _, privValidators := randVoteSet(height, round, PrecommitType, 10, 1) blockHash, blockPartsHeader := crypto.CRandBytes(32), PartSetHeader{123, crypto.CRandBytes(32)} voteProto := &Vote{ @@ -488,9 +529,11 @@ func TestMakeCommit(t *testing.T) { // 6 out of 10 voted for some block. for i := 0; i < 6; i++ { - addr := privValidators[i].GetPubKey().Address() + pv, err := privValidators[i].GetPubKey() + assert.NoError(t, err) + addr := pv.Address() vote := withValidator(voteProto, addr, i) - _, err := signAddVote(privValidators[i], vote, voteSet) + _, err = signAddVote(privValidators[i], vote, voteSet) if err != nil { t.Error(err) } @@ -501,12 +544,14 @@ func TestMakeCommit(t *testing.T) { // 7th voted for some other block. { - addr := privValidators[6].GetPubKey().Address() + pv, err := privValidators[6].GetPubKey() + assert.NoError(t, err) + addr := pv.Address() vote := withValidator(voteProto, addr, 6) vote = withBlockHash(vote, tmrand.Bytes(32)) vote = withBlockPartsHeader(vote, PartSetHeader{123, tmrand.Bytes(32)}) - _, err := signAddVote(privValidators[6], vote, voteSet) + _, err = signAddVote(privValidators[6], vote, voteSet) if err != nil { t.Error(err) } @@ -514,9 +559,11 @@ func TestMakeCommit(t *testing.T) { // The 8th voted like everyone else. { - addr := privValidators[7].GetPubKey().Address() + pv, err := privValidators[7].GetPubKey() + assert.NoError(t, err) + addr := pv.Address() vote := withValidator(voteProto, addr, 7) - _, err := signAddVote(privValidators[7], vote, voteSet) + _, err = signAddVote(privValidators[7], vote, voteSet) if err != nil { t.Error(err) } @@ -524,11 +571,13 @@ func TestMakeCommit(t *testing.T) { // The 9th voted for nil. { - addr := privValidators[8].GetPubKey().Address() + pv, err := privValidators[8].GetPubKey() + assert.NoError(t, err) + addr := pv.Address() vote := withValidator(voteProto, addr, 8) vote.BlockID = BlockID{} - _, err := signAddVote(privValidators[8], vote, voteSet) + _, err = signAddVote(privValidators[8], vote, voteSet) if err != nil { t.Error(err) } diff --git a/types/vote_test.go b/types/vote_test.go index 40a9d650a..eb4f6a955 100644 --- a/types/vote_test.go +++ b/types/vote_test.go @@ -143,13 +143,14 @@ func TestVoteProposalNotEq(t *testing.T) { func TestVoteVerifySignature(t *testing.T) { privVal := NewMockPV() - pubkey := privVal.GetPubKey() + pubkey, err := privVal.GetPubKey() + require.NoError(t, err) vote := examplePrecommit() signBytes := vote.SignBytes("test_chain_id") // sign it - err := privVal.SignVote("test_chain_id", vote) + err = privVal.SignVote("test_chain_id", vote) require.NoError(t, err) // verify the same vote @@ -193,12 +194,13 @@ func TestIsVoteTypeValid(t *testing.T) { func TestVoteVerify(t *testing.T) { privVal := NewMockPV() - pubkey := privVal.GetPubKey() + pubkey, err := privVal.GetPubKey() + require.NoError(t, err) vote := examplePrevote() vote.ValidatorAddress = pubkey.Address() - err := vote.Verify("test_chain_id", ed25519.GenPrivKey().PubKey()) + err = vote.Verify("test_chain_id", ed25519.GenPrivKey().PubKey()) if assert.Error(t, err) { assert.Equal(t, ErrVoteInvalidValidatorAddress, err) } diff --git a/types/voter_set.go b/types/voter_set.go new file mode 100644 index 000000000..dc2308e88 --- /dev/null +++ b/types/voter_set.go @@ -0,0 +1,519 @@ +package types + +import ( + "bytes" + "encoding/binary" + "fmt" + "math" + "sort" + "strings" + + "github.com/datastream/probab/dst" + "github.com/pkg/errors" + "github.com/tendermint/tendermint/crypto/merkle" + "github.com/tendermint/tendermint/crypto/tmhash" + tmmath "github.com/tendermint/tendermint/libs/math" + tmrand "github.com/tendermint/tendermint/libs/rand" +) + +// VoterSet represent a set of *Validator at a given height. +type VoterSet struct { + // NOTE: persisted via reflect, must be exported. + Voters []*Validator `json:"voters"` + + // cached (unexported) + totalVotingPower int64 +} + +func WrapValidatorsToVoterSet(vals []*Validator) *VoterSet { + sort.Sort(ValidatorsByAddress(vals)) + voterSet := &VoterSet{Voters: vals, totalVotingPower: 0} + voterSet.updateTotalVotingPower() + return voterSet +} + +// IsNilOrEmpty returns true if validator set is nil or empty. +func (voters *VoterSet) IsNilOrEmpty() bool { + return voters == nil || len(voters.Voters) == 0 +} + +// HasAddress returns true if address given is in the validator set, false - +// otherwise. +func (voters *VoterSet) HasAddress(address []byte) bool { + idx := sort.Search(len(voters.Voters), func(i int) bool { + return bytes.Compare(address, voters.Voters[i].Address) <= 0 + }) + return idx < len(voters.Voters) && bytes.Equal(voters.Voters[idx].Address, address) +} + +// GetByAddress returns an index of the validator with address and validator +// itself if found. Otherwise, -1 and nil are returned. +func (voters *VoterSet) GetByAddress(address []byte) (index int, val *Validator) { + idx := sort.Search(len(voters.Voters), func(i int) bool { + return bytes.Compare(address, voters.Voters[i].Address) <= 0 + }) + if idx < len(voters.Voters) && bytes.Equal(voters.Voters[idx].Address, address) { + return idx, voters.Voters[idx].Copy() + } + return -1, nil +} + +// GetByIndex returns the validator's address and validator itself by index. +// It returns nil values if index is less than 0 or greater or equal to +// len(VoterSet.Validators). +func (voters *VoterSet) GetByIndex(index int) (address []byte, val *Validator) { + if index < 0 || index >= len(voters.Voters) { + return nil, nil + } + val = voters.Voters[index] + return val.Address, val.Copy() +} + +// Size returns the length of the validator set. +func (voters *VoterSet) Size() int { + return len(voters.Voters) +} + +func copyValidatorListShallow(vals []*Validator) []*Validator { + result := make([]*Validator, len(vals)) + copy(result, vals) + return result +} + +// VoterSet.Copy() copies validator list shallow +func (voters *VoterSet) Copy() *VoterSet { + return &VoterSet{ + Voters: copyValidatorListShallow(voters.Voters), + totalVotingPower: voters.totalVotingPower, + } +} + +// Forces recalculation of the set's total voting power. +// Panics if total voting power is bigger than MaxTotalVotingPower. +func (voters *VoterSet) updateTotalVotingPower() { + sum := int64(0) + for _, val := range voters.Voters { + // mind overflow + sum = safeAddClip(sum, val.VotingPower) + if sum > MaxTotalVotingPower { + panic(fmt.Sprintf( + "Total voting power should be guarded to not exceed %v; got: %v", + MaxTotalVotingPower, + sum)) + } + } + voters.totalVotingPower = sum +} + +func (voters *VoterSet) TotalVotingPower() int64 { + if voters.totalVotingPower == 0 { + voters.updateTotalVotingPower() + } + return voters.totalVotingPower +} + +// Hash returns the Merkle root hash build using validators (as leaves) in the +// set. +func (voters *VoterSet) Hash() []byte { + if len(voters.Voters) == 0 { + return nil + } + bzs := make([][]byte, len(voters.Voters)) + for i, val := range voters.Voters { + bzs[i] = val.Bytes() + } + return merkle.SimpleHashFromByteSlices(bzs) +} + +// VerifyCommit verifies +2/3 of the set had signed the given commit. +func (voters *VoterSet) VerifyCommit(chainID string, blockID BlockID, + height int64, commit *Commit) error { + + if voters.Size() != len(commit.Signatures) { + return NewErrInvalidCommitSignatures(voters.Size(), len(commit.Signatures)) + } + if err := verifyCommitBasic(commit, height, blockID); err != nil { + return err + } + + talliedVotingPower := int64(0) + votingPowerNeeded := voters.TotalVotingPower() * 2 / 3 + for idx, commitSig := range commit.Signatures { + if commitSig.Absent() { + continue // OK, some signatures can be absent. + } + + // The vals and commit have a 1-to-1 correspondance. + // This means we don't need the validator address or to do any lookup. + val := voters.Voters[idx] + + // Validate signature. + voteSignBytes := commit.VoteSignBytes(chainID, idx) + if !val.PubKey.VerifyBytes(voteSignBytes, commitSig.Signature) { + return fmt.Errorf("wrong signature (#%d): %X", idx, commitSig.Signature) + } + // Good! + if blockID.Equals(commitSig.BlockID(commit.BlockID)) { + talliedVotingPower += val.VotingPower + } + // else { + // It's OK that the BlockID doesn't match. We include stray + // signatures (~votes for nil) to measure validator availability. + // } + + // return as soon as +2/3 of the signatures are verified + if talliedVotingPower > votingPowerNeeded { + return nil + } + } + + // talliedVotingPower <= needed, thus return error + return ErrNotEnoughVotingPowerSigned{Got: talliedVotingPower, Needed: votingPowerNeeded} +} + +// VerifyFutureCommit will check to see if the set would be valid with a different +// validator set. +// +// vals is the old validator set that we know. Over 2/3 of the power in old +// signed this block. +// +// In Tendermint, 1/3 of the voting power can halt or fork the chain, but 1/3 +// can't make arbitrary state transitions. You still need > 2/3 Byzantine to +// make arbitrary state transitions. +// +// To preserve this property in the light client, we also require > 2/3 of the +// old vals to sign the future commit at H, that way we preserve the property +// that if they weren't being truthful about the validator set at H (block hash +// -> vals hash) or about the app state (block hash -> app hash) we can slash +// > 2/3. Otherwise, the lite client isn't providing the same security +// guarantees. +// +// Even if we added a slashing condition that if you sign a block header with +// the wrong validator set, then we would only need > 1/3 of signatures from +// the old vals on the new commit, it wouldn't be sufficient because the new +// vals can be arbitrary and commit some arbitrary app hash. +// +// newSet is the validator set that signed this block. Only votes from new are +// sufficient for 2/3 majority in the new set as well, for it to be a valid +// commit. +// +// NOTE: This doesn't check whether the commit is a future commit, because the +// current height isn't part of the VoterSet. Caller must check that the +// commit height is greater than the height for this validator set. +func (voters *VoterSet) VerifyFutureCommit(newSet *VoterSet, chainID string, + blockID BlockID, height int64, commit *Commit) error { + oldVoters := voters + + // Commit must be a valid commit for newSet. + err := newSet.VerifyCommit(chainID, blockID, height, commit) + if err != nil { + return err + } + + // Check old voting power. + oldVotingPower := int64(0) + seen := map[int]bool{} + + for idx, commitSig := range commit.Signatures { + if commitSig.Absent() { + continue // OK, some signatures can be absent. + } + + // See if this validator is in oldVals. + oldIdx, val := oldVoters.GetByAddress(commitSig.ValidatorAddress) + if val == nil || seen[oldIdx] { + continue // missing or double vote... + } + seen[oldIdx] = true + + // Validate signature. + voteSignBytes := commit.VoteSignBytes(chainID, idx) + if !val.PubKey.VerifyBytes(voteSignBytes, commitSig.Signature) { + return errors.Errorf("wrong signature (#%d): %X", idx, commitSig.Signature) + } + // Good! + if blockID.Equals(commitSig.BlockID(commit.BlockID)) { + oldVotingPower += val.VotingPower + } + // else { + // It's OK that the BlockID doesn't match. We include stray + // signatures (~votes for nil) to measure validator availability. + // } + } + + if got, needed := oldVotingPower, oldVoters.TotalVotingPower()*2/3; got <= needed { + return ErrNotEnoughVotingPowerSigned{Got: got, Needed: needed} + } + return nil +} + +// VerifyCommitTrusting verifies that trustLevel ([1/3, 1]) of the validator +// set signed this commit. +// NOTE the given validators do not necessarily correspond to the validator set +// for this commit, but there may be some intersection. +func (voters *VoterSet) VerifyCommitTrusting(chainID string, blockID BlockID, + height int64, commit *Commit, trustLevel tmmath.Fraction) error { + + if trustLevel.Numerator*3 < trustLevel.Denominator || // < 1/3 + trustLevel.Numerator > trustLevel.Denominator { // > 1 + panic(fmt.Sprintf("trustLevel must be within [1/3, 1], given %v", trustLevel)) + } + + if err := verifyCommitBasic(commit, height, blockID); err != nil { + return err + } + + var ( + talliedVotingPower int64 + seenVals = make(map[int]int, len(commit.Signatures)) // validator index -> commit index + votingPowerNeeded = (voters.TotalVotingPower() * trustLevel.Numerator) / trustLevel.Denominator + ) + + for idx, commitSig := range commit.Signatures { + if commitSig.Absent() { + continue // OK, some signatures can be absent. + } + + // We don't know the validators that committed this block, so we have to + // check for each vote if its validator is already known. + valIdx, val := voters.GetByAddress(commitSig.ValidatorAddress) + + if firstIndex, ok := seenVals[valIdx]; ok { // double vote + secondIndex := idx + return errors.Errorf("double vote from %v (%d and %d)", val, firstIndex, secondIndex) + } + + if val != nil { + seenVals[valIdx] = idx + + // Validate signature. + voteSignBytes := commit.VoteSignBytes(chainID, idx) + if !val.PubKey.VerifyBytes(voteSignBytes, commitSig.Signature) { + return errors.Errorf("wrong signature (#%d): %X", idx, commitSig.Signature) + } + + // Good! + if blockID.Equals(commitSig.BlockID(commit.BlockID)) { + talliedVotingPower += val.VotingPower + } + // else { + // It's OK that the BlockID doesn't match. We include stray + // signatures (~votes for nil) to measure validator availability. + // } + + if talliedVotingPower > votingPowerNeeded { + return nil + } + } + } + + return ErrNotEnoughVotingPowerSigned{Got: talliedVotingPower, Needed: votingPowerNeeded} +} + +func verifyCommitBasic(commit *Commit, height int64, blockID BlockID) error { + if err := commit.ValidateBasic(); err != nil { + return err + } + if height != commit.Height { + return NewErrInvalidCommitHeight(height, commit.Height) + } + if !blockID.Equals(commit.BlockID) { + return fmt.Errorf("invalid commit -- wrong block ID: want %v, got %v", + blockID, commit.BlockID) + } + return nil +} + +//----------------- + +// IsErrNotEnoughVotingPowerSigned returns true if err is +// ErrNotEnoughVotingPowerSigned. +func IsErrNotEnoughVotingPowerSigned(err error) bool { + _, ok := errors.Cause(err).(ErrNotEnoughVotingPowerSigned) + return ok +} + +// ErrNotEnoughVotingPowerSigned is returned when not enough validators signed +// a commit. +type ErrNotEnoughVotingPowerSigned struct { + Got int64 + Needed int64 +} + +func (e ErrNotEnoughVotingPowerSigned) Error() string { + return fmt.Sprintf("invalid commit -- insufficient voting power: got %d, needed more than %d", e.Got, e.Needed) +} + +//---------------- + +// Iterate will run the given function over the set. +func (voters *VoterSet) Iterate(fn func(index int, val *Validator) bool) { + for i, val := range voters.Voters { + stop := fn(i, val) + if stop { + break + } + } +} + +func (voters *VoterSet) String() string { + return voters.StringIndented("") +} + +// StringIndented returns an intended string representation of VoterSet. +func (voters *VoterSet) StringIndented(indent string) string { + if voters == nil { + return "nil-VoterSet" + } + var valStrings []string + voters.Iterate(func(index int, val *Validator) bool { + valStrings = append(valStrings, val.String()) + return false + }) + return fmt.Sprintf(`VoterSet{ +%s Validators: +%s %v +%s}`, + indent, indent, strings.Join(valStrings, "\n"+indent+" "), + indent) + +} + +type candidate struct { + priority uint64 + val *Validator +} + +// for implement Candidate of rand package +func (c *candidate) Priority() uint64 { + return c.priority +} + +func (c *candidate) LessThan(other tmrand.Candidate) bool { + o, ok := other.(*candidate) + if !ok { + panic("incompatible type") + } + return bytes.Compare(c.val.Address, o.val.Address) < 0 +} + +func (c *candidate) SetWinPoint(winPoint int64) { + if winPoint < 0 { + panic(fmt.Sprintf("VotingPower must not be negative: %d", winPoint)) + } + c.val.VotingPower = winPoint +} + +func accuracyFromElectionPrecision(precision int) float64 { + base := math.Pow10(precision) + result := (base - 1) / base + return result +} + +func SelectVoter(validators *ValidatorSet, proofHash []byte, voterParams *VoterParams) *VoterSet { + if len(proofHash) == 0 || validators.Size() <= voterParams.VoterElectionThreshold { + return ToVoterAll(validators.Validators) + } + + seed := hashToSeed(proofHash) + candidates := make([]tmrand.Candidate, len(validators.Validators)) + for i, val := range validators.Validators { + candidates[i] = &candidate{ + priority: uint64(val.StakingPower), + val: val.Copy(), + } + } + + minVoters := CalNumOfVoterToElect(int64(len(candidates)), float64(voterParams.MaxTolerableByzantinePercentage)/100, + accuracyFromElectionPrecision(voterParams.ElectionPrecision)) + if minVoters > math.MaxInt32 { + panic("CalNumOfVoterToElect is overflow for MaxInt32") + } + voterCount := tmmath.MaxInt(voterParams.VoterElectionThreshold, int(minVoters)) + winners := tmrand.RandomSamplingWithoutReplacement(seed, candidates, voterCount) + voters := make([]*Validator, len(winners)) + for i, winner := range winners { + voters[i] = winner.(*candidate).val + } + return WrapValidatorsToVoterSet(voters) +} + +func ToVoterAll(validators []*Validator) *VoterSet { + newVoters := make([]*Validator, len(validators)) + voterCount := 0 + for _, val := range validators { + if val.StakingPower == 0 { + // remove the validator with the staking power of 0 from the voter set + continue + } + newVoters[voterCount] = &Validator{ + Address: val.Address, + PubKey: val.PubKey, + StakingPower: val.StakingPower, + VotingPower: val.StakingPower, + ProposerPriority: val.ProposerPriority, + } + voterCount++ + } + if voterCount < len(newVoters) { + zeroRemoved := make([]*Validator, voterCount) + copy(zeroRemoved, newVoters[:voterCount]) + newVoters = zeroRemoved + } + sort.Sort(ValidatorsByAddress(newVoters)) + return WrapValidatorsToVoterSet(newVoters) +} + +func hashToSeed(hash []byte) uint64 { + for len(hash) < 8 { + hash = append(hash, byte(0)) + } + return binary.LittleEndian.Uint64(hash[:8]) +} + +// MakeRoundHash combines the VRF hash, block height, and round to create a hash value for each round. This value is +// used for random sampling of the Proposer. +func MakeRoundHash(proofHash []byte, height int64, round int) []byte { + b := make([]byte, 16) + binary.LittleEndian.PutUint64(b, uint64(height)) + binary.LittleEndian.PutUint64(b[8:], uint64(round)) + hash := tmhash.New() + hash.Write(proofHash) + hash.Write(b[:8]) + hash.Write(b[8:16]) + return hash.Sum(nil) +} + +// RandValidatorSet returns a randomized validator set, useful for testing. +// NOTE: PrivValidator are in order. +// UNSTABLE +func RandVoterSet(numVoters int, votingPower int64) (*ValidatorSet, *VoterSet, []PrivValidator) { + valz := make([]*Validator, numVoters) + privValidators := make([]PrivValidator, numVoters) + for i := 0; i < numVoters; i++ { + val, privValidator := RandValidator(false, votingPower) + valz[i] = val + privValidators[i] = privValidator + } + vals := NewValidatorSet(valz) + sort.Sort(PrivValidatorsByAddress(privValidators)) + return vals, SelectVoter(vals, []byte{}, DefaultVoterParams()), privValidators +} + +// CalNumOfVoterToElect calculate the number of voter to elect and return the number. +func CalNumOfVoterToElect(n int64, byzantineRatio float64, accuracy float64) int64 { + if byzantineRatio < 0 || byzantineRatio > 1 || accuracy < 0 || accuracy > 1 { + panic(fmt.Sprintf("byzantineRatio and accuracy should be the float between 0 and 1. Got: %f", + byzantineRatio)) + } + byzantine := int64(math.Floor(float64(n) * byzantineRatio)) + + for i := int64(1); i <= n; i++ { + q := dst.HypergeometricQtlFor(n, byzantine, i, accuracy) + if int64(q)*3 < i { + return i + } + } + + return n +} diff --git a/types/voter_set_test.go b/types/voter_set_test.go new file mode 100644 index 000000000..83ea31c11 --- /dev/null +++ b/types/voter_set_test.go @@ -0,0 +1,303 @@ +package types + +import ( + "math" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/tendermint/tendermint/crypto/vrf" + tmtime "github.com/tendermint/tendermint/types/time" +) + +func countZeroStakingPower(vals []*Validator) int { + count := 0 + for _, v := range vals { + if v.StakingPower == 0 { + count++ + } + } + return count +} + +func verifyVoterSetSame(t *testing.T, vset1, vset2 *VoterSet) { + assert.True(t, vset1.Size() == vset2.Size()) + for i, v1 := range vset1.Voters { + v2 := vset2.Voters[i] + assert.True(t, v1.Address.String() == v2.Address.String()) + assert.True(t, v1.VotingPower == v2.VotingPower) + assert.True(t, v1.StakingPower == v2.StakingPower) + } +} + +func verifyVoterSetDifferent(t *testing.T, vset1, vset2 *VoterSet) { + result := vset1.Size() != vset2.Size() + if !result { + for i, v1 := range vset1.Voters { + v2 := vset2.Voters[i] + if v1.Address.String() != v2.Address.String() || + v1.StakingPower != v2.StakingPower || + v1.VotingPower != v2.VotingPower { + result = true + break + } + } + } + assert.True(t, result) +} + +func TestSelectVoter(t *testing.T) { + valSet := randValidatorSet(30) + valSet.Validators[0].StakingPower = 0 + + zeroVals := countZeroStakingPower(valSet.Validators) + genDoc := &GenesisDoc{ + GenesisTime: tmtime.Now(), + ChainID: "tendermint-test", + VoterParams: &VoterParams{10, 20, 1}, + Validators: toGenesisValidators(valSet.Validators), + } + hash := genDoc.Hash() + + // verifying determinism + voterSet1 := SelectVoter(valSet, hash, genDoc.VoterParams) + voterSet2 := SelectVoter(valSet, hash, genDoc.VoterParams) + verifyVoterSetSame(t, voterSet1, voterSet2) + + // verifying randomness + hash[0] = (hash[0] & 0xFE) | (^(hash[0] & 0x01) & 0x01) // reverse 1 bit of hash + voterSet3 := SelectVoter(valSet, hash, genDoc.VoterParams) + verifyVoterSetDifferent(t, voterSet1, voterSet3) + + // verifying zero-staking removed + assert.True(t, countZeroStakingPower(voterSet1.Voters) == 0) + + // case that all validators are voters + voterSet := SelectVoter(valSet, hash, &VoterParams{30, 1, 1}) + assert.True(t, voterSet.Size() == 30-zeroVals) + voterSet = SelectVoter(valSet, nil, genDoc.VoterParams) + assert.True(t, voterSet.Size() == 30-zeroVals) + + // test VoterElectionThreshold + for i := 1; i < 100; i++ { + voterSet := SelectVoter(valSet, hash, &VoterParams{15, i, 1}) + assert.True(t, voterSet.Size() >= 15) + } +} + +func TestToVoterAll(t *testing.T) { + valSet := randValidatorSet(30) + vals := valSet.Validators + vals[0].StakingPower = 0 + vals[5].StakingPower = 0 + vals[28].StakingPower = 0 + zeroRemovedVoters := ToVoterAll(vals) + assert.True(t, zeroRemovedVoters.Size() == 27) + + valSet = randValidatorSet(3) + vals = valSet.Validators + vals[0].StakingPower = 0 + vals[1].StakingPower = 0 + vals[2].StakingPower = 0 + zeroRemovedVoters = ToVoterAll(vals) + assert.True(t, zeroRemovedVoters.Size() == 0) +} + +func toGenesisValidators(vals []*Validator) []GenesisValidator { + genVals := make([]GenesisValidator, len(vals)) + for i, val := range vals { + genVals[i] = GenesisValidator{Address: val.Address, PubKey: val.PubKey, Power: val.StakingPower, Name: "name"} + } + return genVals +} + +/** +The result when we set LoopCount to 10000 + << min power=100, max power=100, actual average voters=10, max voters=10 >> largest gap: 0.040000 + << min power=100, max power=100, actual average voters=20, max voters=20 >> largest gap: 0.030000 + << min power=100, max power=100, actual average voters=29, max voters=29 >> largest gap: 0.010000 + << min power=100, max power=10000, actual average voters=10, max voters=10 >> largest gap: 0.183673 + << min power=100, max power=10000, actual average voters=20, max voters=20 >> largest gap: 0.128788 + << min power=100, max power=10000, actual average voters=28, max voters=29 >> largest gap: 0.304348 + << min power=100, max power=1000000, actual average voters=10, max voters=10 >> largest gap: 0.093158 + << min power=100, max power=1000000, actual average voters=20, max voters=20 >> largest gap: 0.094404 + << min power=100, max power=1000000, actual average voters=28, max voters=29 >> largest gap: 0.194133 + << min power=100, max power=100000000, actual average voters=10, max voters=10 >> largest gap: 0.076536 + << min power=100, max power=100000000, actual average voters=20, max voters=20 >> largest gap: 0.076547 + << min power=100, max power=100000000, actual average voters=29, max voters=29 >> largest gap: 0.147867 +*/ +func TestSelectVoterReasonableStakingPower(t *testing.T) { + // Raise LoopCount to get smaller gap over 10000. But large LoopCount takes a lot of time + const LoopCount = 100 + for minMaxRate := 1; minMaxRate <= 1000000; minMaxRate *= 100 { + findLargestStakingPowerGap(t, LoopCount, minMaxRate, 10) + findLargestStakingPowerGap(t, LoopCount, minMaxRate, 20) + findLargestStakingPowerGap(t, LoopCount, minMaxRate, 29) + } +} + +func findLargestStakingPowerGap(t *testing.T, loopCount int, minMaxRate int, maxVoters int) { + valSet, privMap := randValidatorSetWithMinMax(30, 100, 100*int64(minMaxRate)) + genDoc := &GenesisDoc{ + GenesisTime: tmtime.Now(), + ChainID: "tendermint-test", + VoterParams: DefaultVoterParams(), + Validators: toGenesisValidators(valSet.Validators), + } + hash := genDoc.Hash() + accumulation := make(map[string]int64) + totalVoters := 0 + for i := 0; i < loopCount; i++ { + voterSet := SelectVoter(valSet, hash, genDoc.VoterParams) + for _, voter := range voterSet.Voters { + accumulation[voter.Address.String()] += voter.StakingPower + } + proposer := valSet.SelectProposer(hash, int64(i), 0) + message := MakeRoundHash(hash, int64(i), 0) + proof, _ := privMap[proposer.Address.String()].GenerateVRFProof(message) + hash, _ = vrf.ProofToHash(proof) + totalVoters += voterSet.Size() + } + largestGap := float64(0) + for _, val := range valSet.Validators { + acc := accumulation[val.Address.String()] / int64(loopCount) + if math.Abs(float64(val.StakingPower-acc))/float64(val.StakingPower) > largestGap { + largestGap = math.Abs(float64(val.StakingPower-acc)) / float64(val.StakingPower) + } + } + t.Logf("<< min power=100, max power=%d, actual average voters=%d, max voters=%d >> largest gap: %f", + 100*minMaxRate, totalVoters/loopCount, maxVoters, largestGap) +} + +/** + This test is a test to see the difference between MaxVoters and the actual number of elected voters. + This test is to identify the minimum MaxVoters that cannot be selected as much as MaxVoters by fixing + MaxSamplingLoopTry. + If MaxSamplingLoopTry is very large then actual elected voters is up to MaxVoters, + but large MaxSamplingLoopTry takes too much time. +*/ +func TestSelectVoterMaxVarious(t *testing.T) { + hash := 0 + for minMaxRate := 1; minMaxRate <= 100000000; minMaxRate *= 10000 { + t.Logf("<<< min: 100, max: %d >>>", 100*minMaxRate) + for validators := 16; validators <= 256; validators *= 4 { + for voters := 1; voters <= validators; voters += 10 { + valSet, _ := randValidatorSetWithMinMax(validators, 100, 100*int64(minMaxRate)) + voterSet := SelectVoter(valSet, []byte{byte(hash)}, &VoterParams{voters, 20, 5}) + if voterSet.Size() < voters { + t.Logf("Cannot elect voters up to MaxVoters: validators=%d, MaxVoters=%d, actual voters=%d", + validators, voters, voterSet.Size()) + break + } + hash++ + } + } + } +} + +func TestCalVotersNum(t *testing.T) { + total := int64(200) + byzantine := 0.2 + accuracy := 0.99999 + selection := CalNumOfVoterToElect(total, byzantine, accuracy) + assert.Equal(t, selection, int64(88)) + + total = int64(100) + selection = CalNumOfVoterToElect(total, byzantine, accuracy) + assert.Equal(t, selection, int64(58)) + + assert.Panics(t, func() { CalNumOfVoterToElect(total, 0.3, 10) }) + assert.Panics(t, func() { CalNumOfVoterToElect(total, 1.1, 0.9999) }) +} + +func makeByzantine(valSet *ValidatorSet, rate float64) map[string]bool { + result := make(map[string]bool) + byzantinePower := int64(0) + threshold := int64(float64(valSet.TotalStakingPower()) * rate) + for _, v := range valSet.Validators { + if byzantinePower+v.StakingPower > threshold { + break + } + result[v.Address.String()] = true + byzantinePower += v.StakingPower + } + return result +} + +func byzantinesPower(voters []*Validator, byzantines map[string]bool) int64 { + power := int64(0) + for _, v := range voters { + if byzantines[v.Address.String()] { + power += v.VotingPower + } + } + return power +} + +func countByzantines(voters []*Validator, byzantines map[string]bool) int { + count := 0 + for _, v := range voters { + if byzantines[v.Address.String()] { + count++ + } + } + return count +} + +func electVotersForLoop(t *testing.T, hash []byte, valSet *ValidatorSet, privMap map[string]PrivValidator, + byzantines map[string]bool, loopCount int, byzantinePercent, accuracy int) { + byzantineFault := 0 + totalVoters := 0 + totalByzantines := 0 + for i := 0; i < loopCount; i++ { + voterSet := SelectVoter(valSet, hash, &VoterParams{1, byzantinePercent, accuracy}) + byzantineThreshold := int64(float64(voterSet.TotalVotingPower())*0.33) + 1 + if byzantinesPower(voterSet.Voters, byzantines) >= byzantineThreshold { + byzantineFault++ + } + totalVoters += voterSet.Size() + totalByzantines += countByzantines(voterSet.Voters, byzantines) + proposer := valSet.SelectProposer(hash, int64(i), 0) + message := MakeRoundHash(hash, int64(i), 0) + proof, _ := privMap[proposer.Address.String()].GenerateVRFProof(message) + hash, _ = vrf.ProofToHash(proof) + } + t.Logf("[accuracy=%f] voters=%d, fault=%d, avg byzantines=%f", accuracyFromElectionPrecision(accuracy), + totalVoters/loopCount, byzantineFault, float64(totalByzantines)/float64(loopCount)) + assert.True(t, float64(byzantineFault) < float64(loopCount)*(1.0-accuracyFromElectionPrecision(accuracy))) +} + +func TestCalVotersNum2(t *testing.T) { + valSet, privMap := randValidatorSetWithMinMax(100, 100, 10000) + byzantinePercent := 20 + byzantines := makeByzantine(valSet, float64(byzantinePercent)/100) + genDoc := &GenesisDoc{ + GenesisTime: tmtime.Now(), + ChainID: "tendermint-test", + Validators: toGenesisValidators(valSet.Validators), + } + hash := genDoc.Hash() + + loopCount := 1000 + electVotersForLoop(t, hash, valSet, privMap, byzantines, loopCount, byzantinePercent, 1) + electVotersForLoop(t, hash, valSet, privMap, byzantines, loopCount, byzantinePercent, 2) + electVotersForLoop(t, hash, valSet, privMap, byzantines, loopCount, byzantinePercent, 3) + electVotersForLoop(t, hash, valSet, privMap, byzantines, loopCount, byzantinePercent, 4) + electVotersForLoop(t, hash, valSet, privMap, byzantines, loopCount, byzantinePercent, 5) +} + +func TestAccuracyFromElectionPrecision(t *testing.T) { + assert.True(t, accuracyFromElectionPrecision(2) == 0.99) + assert.True(t, accuracyFromElectionPrecision(3) == 0.999) + assert.True(t, accuracyFromElectionPrecision(4) == 0.9999) + assert.True(t, accuracyFromElectionPrecision(5) == 0.99999) + assert.True(t, accuracyFromElectionPrecision(6) == 0.999999) + assert.True(t, accuracyFromElectionPrecision(7) == 0.9999999) + assert.True(t, accuracyFromElectionPrecision(8) == 0.99999999) + assert.True(t, accuracyFromElectionPrecision(9) == 0.999999999) + assert.True(t, accuracyFromElectionPrecision(10) == 0.9999999999) + assert.True(t, accuracyFromElectionPrecision(11) == 0.99999999999) + assert.True(t, accuracyFromElectionPrecision(12) == 0.999999999999) + assert.True(t, accuracyFromElectionPrecision(13) == 0.9999999999999) + assert.True(t, accuracyFromElectionPrecision(14) == 0.99999999999999) + assert.True(t, accuracyFromElectionPrecision(15) == 0.999999999999999) +} diff --git a/version/version.go b/version/version.go index 20480e4f3..04c9fcc79 100644 --- a/version/version.go +++ b/version/version.go @@ -20,13 +20,14 @@ const ( // Must be a string because scripts like dist.sh read this file. // XXX: Don't change the name of this variable or you will break // automation :) - TMCoreSemVer = "0.33.3" + + TMCoreSemVer = "0.33.4" // LINECoreSemVer is the current version of LINE Tendermint Core. - LINECoreSemVer = "0.1" + LINECoreSemVer = "0.2" // ABCISemVer is the semantic version of the ABCI library - ABCISemVer = "0.16.1" + ABCISemVer = "0.16.2" ABCIVersion = ABCISemVer )