diff --git a/.circleci/config.yml b/.circleci/config.yml
index 5cfc869fc6..fb661b37e4 100644
--- a/.circleci/config.yml
+++ b/.circleci/config.yml
@@ -1,3 +1,7 @@
+# Disclaimer:
+# * Unless otherwise specified, assume `resource_class` and `parallelism` values are cherry-picked values that provided a reasonable enough build-duration-to-cost tradeoff at the time of choosing.
+# * There's too many variables (architecture types, CircleCI concurrency limitations, parallel pipeline runs, source code changes) to feel confident we've found a best-fit configuration.
+
version: 2.1
orbs:
@@ -38,19 +42,19 @@ executors:
resource_class: arm.large
mac_amd64_medium:
macos:
- xcode: 13.4.1
+ xcode: 13.2.1
resource_class: medium
environment:
HOMEBREW_NO_AUTO_UPDATE: "true"
mac_amd64_large:
macos:
- xcode: 13.4.1
+ xcode: 13.2.1
resource_class: large
environment:
HOMEBREW_NO_AUTO_UPDATE: "true"
mac_arm64: &executor-mac-arm64
machine: true
- resource_class: algorand/macstadium-m1
+ resource_class: algorand/macstadium-m1-macos11
environment:
HOMEBREW_NO_AUTO_UPDATE: "true"
# these are required b/c jobs explicitly assign sizes to the executors
@@ -64,19 +68,6 @@ workflows:
version: 2
"circleci_build_and_test":
jobs:
- - codegen_verification
-
- - build:
- name: << matrix.platform >>_build
- matrix: &matrix-default
- parameters:
- platform: ["amd64", "arm64", "mac_amd64"]
- filters: &filters-default
- branches:
- ignore:
- - /rel\/.*/
- - << pipeline.parameters.valid_nightly_branch >>
-
- build_nightly:
name: << matrix.platform >>_build_nightly
matrix: &matrix-nightly
@@ -91,10 +82,14 @@ workflows:
- test:
name: << matrix.platform >>_test
- matrix:
- <<: *matrix-default
- requires:
- - << matrix.platform >>_build
+ matrix: &matrix-default
+ parameters:
+ platform: ["amd64", "arm64"]
+ filters: &filters-default
+ branches:
+ ignore:
+ - /rel\/.*/
+ - << pipeline.parameters.valid_nightly_branch >>
- test_nightly:
name: << matrix.platform >>_test_nightly
@@ -108,8 +103,8 @@ workflows:
name: << matrix.platform >>_integration
matrix:
<<: *matrix-default
- requires:
- - << matrix.platform >>_build
+ filters:
+ <<: *filters-default
- integration_nightly:
name: << matrix.platform >>_integration_nightly
@@ -123,8 +118,8 @@ workflows:
name: << matrix.platform >>_e2e_expect
matrix:
<<: *matrix-default
- requires:
- - << matrix.platform >>_build
+ filters:
+ <<: *filters-default
- e2e_expect_nightly:
name: << matrix.platform >>_e2e_expect_nightly
@@ -138,8 +133,8 @@ workflows:
name: << matrix.platform >>_e2e_subs
matrix:
<<: *matrix-default
- requires:
- - << matrix.platform >>_build
+ filters:
+ <<: *filters-default
- e2e_subs_nightly:
name: << matrix.platform >>_e2e_subs_nightly
@@ -155,7 +150,7 @@ workflows:
name: << matrix.platform >>_<< matrix.job_type >>_verification
matrix:
parameters:
- platform: ["amd64", "arm64", "mac_amd64"]
+ platform: ["amd64", "arm64"]
job_type: ["test", "integration", "e2e_expect"]
requires:
- << matrix.platform >>_<< matrix.job_type >>
@@ -179,11 +174,11 @@ workflows:
- << matrix.platform >>_integration_nightly_verification
- << matrix.platform >>_e2e_expect_nightly_verification
- << matrix.platform >>_e2e_subs_nightly
- - codegen_verification
filters:
branches:
only:
- /rel\/.*/
+ - << pipeline.parameters.valid_nightly_branch >>
context:
- slack-secrets
- aws-secrets
@@ -219,7 +214,7 @@ commands:
shell: bash.exe
command: |
choco install -y msys2 pacman make wget --force
- choco install -y golang --version=1.17.9 --force
+ choco install -y golang --version=1.17.13 --force
choco install -y python3 --version=3.7.3 --force
export msys2='cmd //C RefreshEnv.cmd '
export msys2+='& set MSYS=winsymlinks:nativestrict '
@@ -229,21 +224,22 @@ commands:
$msys2 pacman --sync --noconfirm --needed mingw-w64-x86_64-toolchain mingw-w64-x86_64-libtool unzip autoconf automake
generic_build:
- description: Run basic build and store in workspace for re-use by different architectures
+ description: >
+ Run basic build.
+
+ If command execution time increases _appreciably_, revisit CI topology:
+ * Historically, the command executes _quickly_ (< 3m with resource class >= medium).
+ * Consequently, it's faster to embed the command in a combined build + test workflow rather than independent build and test workflows.
parameters:
build_dir:
type: string
default: << pipeline.parameters.build_dir >>
steps:
+ - prepare_build_dir
+ - checkout
+ - prepare_go
- restore_libsodium
- - restore_cache:
- keys:
- - 'go-mod-1.17.9-v3-{{ arch }}-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}'
- - restore_cache:
- keys:
- - 'go-cache-v3-{{ arch }}-{{ .Branch }}-{{ .Revision }}'
- - 'go-cache-v3-{{ arch }}-{{ .Branch }}-'
- - 'go-cache-v3-{{ arch }}-'
+ - restore_go_caches
- run:
name: scripts/travis/build.sh --make_debug
command: |
@@ -255,21 +251,41 @@ commands:
export GIMME_VERSION_PREFIX=<< parameters.build_dir >>/.gimme/versions
scripts/travis/build.sh --make_debug
- cache_libsodium
+ - save_go_caches
+
+ save_go_caches:
+ description: Cache Go source and build caches
+ parameters:
+ build_dir:
+ type: string
+ default: << pipeline.parameters.build_dir >>
+ steps:
- save_cache:
- key: 'go-mod-1.17.9-v3-{{ arch }}-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}'
+ name: Saving Go mod source cache
+ key: go-mod-v5-{{ .Branch }}-{{ checksum "go.sum" }}
paths:
- << parameters.build_dir >>/go/pkg/mod
- save_cache:
- key: 'go-cache-v3-{{ arch }}-{{ .Branch }}-{{ .Revision }}'
+ name: Saving Go build cache
+ key: go-cache-v5-{{ arch }}-{{ .Branch }}-{{ checksum "go.sum" }}
paths:
- tmp/go-cache
- - persist_to_workspace:
- root: << parameters.build_dir >>
- paths:
- - project
- - go
- - gimme
- - .gimme
+
+ restore_go_caches:
+ description: Restore Go source and build caches
+ steps:
+ - restore_cache:
+ name: Restoring Go mod source cache
+ keys:
+ - go-mod-v5-{{ .Branch }}-{{ checksum "go.sum" }}
+ - go-mod-v5-{{ .Branch }}-
+ - go-mod-v5-master-
+ - restore_cache:
+ name: Restoring Go build cache
+ keys:
+ - go-cache-v5-{{ arch }}-{{ .Branch }}-{{ checksum "go.sum" }}
+ - go-cache-v5-{{ arch }}-{{ .Branch }}-
+ - go-cache-v5-{{ arch }}-master-
cache_libsodium:
description: Cache libsodium for build
@@ -280,7 +296,8 @@ commands:
mkdir -p tmp
find crypto/libsodium-fork -type f -exec openssl md5 "{}" + > tmp/libsodium.md5
- save_cache:
- key: 'libsodium-fork-v2-{{ arch }}-{{ checksum "tmp/libsodium.md5" }}'
+ name: Save cached libsodium build
+ key: 'libsodium-fork-v4-{{ arch }}-{{ checksum "tmp/libsodium.md5" }}'
paths:
- crypto/libs
@@ -293,8 +310,9 @@ commands:
mkdir -p tmp
find crypto/libsodium-fork -type f -exec openssl md5 "{}" + > tmp/libsodium.md5
- restore_cache:
+ name: Restore cached libsodium build
keys:
- - 'libsodium-fork-v2-{{ arch }}-{{ checksum "tmp/libsodium.md5" }}'
+ - 'libsodium-fork-v4-{{ arch }}-{{ checksum "tmp/libsodium.md5" }}'
generic_test:
description: Run build tests from build workspace, for re-use by diferent architectures
@@ -316,17 +334,10 @@ commands:
type: string
default: << pipeline.parameters.result_path >>
steps:
- - attach_workspace:
- at: << parameters.build_dir >>
- run: |
mkdir -p << parameters.result_path >>/<< parameters.result_subdir >>/${CIRCLE_NODE_INDEX}
touch << parameters.result_path >>/<< parameters.result_subdir >>/${CIRCLE_NODE_INDEX}/results.xml
touch << parameters.result_path >>/<< parameters.result_subdir >>/${CIRCLE_NODE_INDEX}/testresults.json
- - restore_cache:
- keys:
- - 'go-cache-v3-{{ arch }}-{{ .Branch }}-{{ .Revision }}'
- - 'go-cache-v3-{{ arch }}-{{ .Branch }}-'
- - 'go-cache-v3-{{ arch }}-'
- run:
name: Run build tests
no_output_timeout: << parameters.no_output_timeout >>
@@ -388,8 +399,6 @@ commands:
type: string
default: << pipeline.parameters.result_path >>
steps:
- - attach_workspace:
- at: << parameters.build_dir >>
- run: |
mkdir -p << parameters.result_path >>/<< parameters.result_subdir >>/${CIRCLE_NODE_INDEX}
touch << parameters.result_path >>/<< parameters.result_subdir >>/${CIRCLE_NODE_INDEX}/results.xml
@@ -443,9 +452,21 @@ commands:
at: << parameters.result_path >>
- run:
name: Check if all tests were run
+ # Add to --ignored-tests when a test should _not_ be considered.
+ # * For example, E2E expect test runners (e.g. `TestAlgodWithExpect`)
+ # produce partitioned subtests.
+ # * The parent tests are deliberately _not_ partitioned. By ignoring
+ # these tests, `check_tests.py` won't provide conflicting advice to
+ # partition the parent tests.
command: |
cat << parameters.result_path >>/<< parameters.result_subdir >>/**/testresults.json > << parameters.result_path >>/<< parameters.result_subdir >>/combined_testresults.json
- python3 scripts/buildtools/check_tests.py << parameters.result_path >>/<< parameters.result_subdir >>/combined_testresults.json
+ python3 scripts/buildtools/check_tests.py \
+ --tests-results-filepath << parameters.result_path >>/<< parameters.result_subdir >>/combined_testresults.json \
+ --ignored-tests \
+ TestAlgodWithExpect \
+ TestAlgohWithExpect \
+ TestGoalWithExpect \
+ TestTealdbgWithExpect
- store_artifacts:
path: << parameters.result_path >>/<< parameters.result_subdir >>
destination: << parameters.result_subdir >>/combined-test-results
@@ -483,39 +504,25 @@ commands:
scripts/travis/test_release.sh
jobs:
- codegen_verification:
- executor: amd64_medium
- steps:
- - checkout
- - prepare_go
- - run: |
- export PATH=$(echo "$PATH" | sed -e 's|:/home/circleci/\.go_workspace/bin||g' | sed -e 's|:/usr/local/go/bin||g')
- export GOPATH="/home/circleci/go"
- scripts/travis/codegen_verification.sh
-
- build:
- parameters:
- platform:
- type: string
- executor: << parameters.platform >>_medium
- working_directory: << pipeline.parameters.build_dir >>/project
- steps:
- - prepare_build_dir
- - checkout
- - prepare_go
- - generic_build
-
build_nightly:
+ description: "Persists build artifacts to workspace in order to support `upload_binaries`."
parameters:
platform:
type: string
+ build_dir:
+ type: string
+ default: << pipeline.parameters.build_dir >>
executor: << parameters.platform >>_medium
working_directory: << pipeline.parameters.build_dir >>/project
steps:
- - prepare_build_dir
- - checkout
- - prepare_go
- generic_build
+ - persist_to_workspace:
+ root: << parameters.build_dir >>
+ paths:
+ - project
+ - go
+ - gimme
+ - .gimme
- slack/notify: &slack-fail-event
event: fail
template: basic_fail_1
@@ -526,10 +533,9 @@ jobs:
type: string
executor: << parameters.platform >>_medium
working_directory: << pipeline.parameters.build_dir >>/project
- parallelism: 4
+ parallelism: 32
steps:
- - prepare_build_dir
- - prepare_go
+ - generic_build
- generic_test:
platform: << parameters.platform >>
result_subdir: << parameters.platform >>_test
@@ -544,15 +550,15 @@ jobs:
working_directory: << pipeline.parameters.build_dir >>/project
parallelism: 4
steps:
- - prepare_build_dir
- - prepare_go
+ - generic_build
- generic_test:
platform: << parameters.platform >>
result_subdir: << parameters.platform >>_test_nightly
no_output_timeout: 45m
- upload_coverage
- - slack/notify:
- <<: *slack-fail-event
+ - slack/notify: &slack-fail-event
+ event: fail
+ template: basic_fail_1
integration:
parameters:
@@ -560,12 +566,11 @@ jobs:
type: string
executor: << parameters.platform >>_large
working_directory: << pipeline.parameters.build_dir >>/project
- parallelism: 2
+ parallelism: 16
environment:
E2E_TEST_FILTER: "GO"
steps:
- - prepare_build_dir
- - prepare_go
+ - generic_build
- generic_integration:
platform: << parameters.platform >>
result_subdir: << parameters.platform >>_integration
@@ -581,8 +586,7 @@ jobs:
environment:
E2E_TEST_FILTER: "GO"
steps:
- - prepare_build_dir
- - prepare_go
+ - generic_build
- generic_integration:
platform: << parameters.platform >>
result_subdir: << parameters.platform >>_integration_nightly
@@ -596,12 +600,11 @@ jobs:
type: string
executor: << parameters.platform >>_medium
working_directory: << pipeline.parameters.build_dir >>/project
- parallelism: 2
+ parallelism: 10
environment:
E2E_TEST_FILTER: "EXPECT"
steps:
- - prepare_build_dir
- - prepare_go
+ - generic_build
- generic_integration:
platform: << parameters.platform >>
result_subdir: << parameters.platform >>_e2e_expect
@@ -617,8 +620,7 @@ jobs:
environment:
E2E_TEST_FILTER: "EXPECT"
steps:
- - prepare_build_dir
- - prepare_go
+ - generic_build
- generic_integration:
platform: << parameters.platform >>
result_subdir: << parameters.platform>>_e2e_expect_nightly
@@ -635,8 +637,7 @@ jobs:
environment:
E2E_TEST_FILTER: "SCRIPTS"
steps:
- - prepare_build_dir
- - prepare_go
+ - generic_build
- generic_integration:
platform: << parameters.platform >>
result_subdir: << parameters.platform >>_e2e_subs
@@ -655,8 +656,7 @@ jobs:
# one of the platforms in the matrix.
CI_KEEP_TEMP_PLATFORM: "amd64"
steps:
- - prepare_build_dir
- - prepare_go
+ - generic_build
- generic_integration:
platform: << parameters.platform >>
result_subdir: << parameters.platform >>_e2e_subs_nightly
@@ -674,7 +674,7 @@ jobs:
- run:
no_output_timeout: 45m
command: |
- #export PATH=$(echo "$PATH" | sed -e 's|:/home/circleci/\.go_workspace/bin||g' | sed -e 's|:/usr/local/go/bin||g')
+ # export PATH=$(echo "$PATH" | sed -e 's|:/home/circleci/\.go_workspace/bin||g' | sed -e 's|:/usr/local/go/bin||g')
export GOPATH="/home/circleci/go"
export ALGORAND_DEADLOCK=enable
export SKIP_GO_INSTALLATION=True
@@ -689,9 +689,9 @@ jobs:
resource_class: small
working_directory: << pipeline.parameters.build_dir >>/project
parameters:
- platform: # platform: ["amd64", "arm64", "mac_amd64"]
+ platform:
type: string
- job_type: # job_type: ["test", "test_nightly", "integration", "integration_nightly", "e2e_expect", "e2e_expect_nightly"]
+ job_type:
type: string
steps:
- checkout
@@ -704,9 +704,9 @@ jobs:
resource_class: small
working_directory: << pipeline.parameters.build_dir >>/project
parameters:
- platform: # platform: ["amd64", "arm64", "mac_amd64"]
+ platform:
type: string
- job_type: # job_type: ["test", "test_nightly", "integration", "integration_nightly", "e2e_expect", "e2e_expect_nightly"]
+ job_type:
type: string
steps:
- checkout
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index 825056b1e1..58b4627236 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -1,8 +1,11 @@
-name: "Build workflow"
+name: "Build Windows"
on:
+ push:
+ branches:
+ - master
pull_request:
jobs:
- build-test-windows:
+ build-windows:
runs-on: windows-2022
defaults:
run:
@@ -14,14 +17,20 @@ jobs:
update: true
path-type: inherit
- name: Check out code
- uses: actions/checkout@v2
+ uses: actions/checkout@v3
with:
fetch-depth: 0
- name: Install golang
- uses: actions/setup-go@v2
+ uses: actions/setup-go@v3
with:
- go-version: '1.17.9'
- - name: Build Test
+ go-version: "1.17.13"
+ - name: Restore libsodium from cache
+ id: cache-libsodium
+ uses: actions/cache@v3
+ with:
+ path: crypto/libs
+ key: libsodium-fork-v2-${{ runner.os }}-${{ hashFiles('crypto/libsodium-fork/**') }}
+ - name: Build
run: |
export ALGORAND_DEADLOCK=enable
export SKIP_GO_INSTALLATION=True
diff --git a/.github/workflows/codegen_verification.yml b/.github/workflows/codegen_verification.yml
new file mode 100644
index 0000000000..cdeed288b9
--- /dev/null
+++ b/.github/workflows/codegen_verification.yml
@@ -0,0 +1,22 @@
+name: "codegen verification"
+on:
+ push:
+ branches:
+ - master
+ pull_request:
+jobs:
+ codegen_verification:
+ runs-on: ubuntu-20.04
+ steps:
+ - name: Check out code
+ uses: actions/checkout@v3
+ with:
+ fetch-depth: 0
+ path: go-algorand
+ - name: Uninstall existing go installation
+ run: sudo apt-get -y -q purge golang-go
+ - name: Run codegen_verification.sh
+ run: |
+ export GOPATH="${GITHUB_WORKSPACE}/go"
+ cd go-algorand
+ scripts/travis/codegen_verification.sh
diff --git a/.github/workflows/reviewdog.yml b/.github/workflows/reviewdog.yml
index 6cc82a6e55..736b8a6bcf 100644
--- a/.github/workflows/reviewdog.yml
+++ b/.github/workflows/reviewdog.yml
@@ -1,5 +1,8 @@
name: "ReviewDog workflow"
on:
+ push:
+ branches:
+ - master
pull_request:
jobs:
# Blocking Errors Section
@@ -18,12 +21,13 @@ jobs:
- name: reviewdog-golangci-lint
uses: reviewdog/action-golangci-lint@v2
with:
- golangci_lint_version: "v1.41.1"
+ golangci_lint_version: "v1.47.3"
golangci_lint_flags: "-c .golangci.yml --allow-parallel-runners"
- reporter: "github-pr-review"
+ reporter: "github-pr-check"
tool_name: "Lint Errors"
level: "error"
fail_on_error: true
+ filter_mode: "nofilter"
# Non-Blocking Warnings Section
reviewdog-warnings:
runs-on: ubuntu-latest
@@ -44,7 +48,7 @@ jobs:
- name: Install specific golang
uses: actions/setup-go@v2
with:
- go-version: '1.17.9'
+ go-version: '1.17.13'
- name: Create folders for golangci-lint
run: mkdir -p cicdtmp/golangci-lint
- name: Check if custom golangci-lint is already built
@@ -59,7 +63,7 @@ jobs:
run: |
cd cicdtmp/golangci-lint
git clone https://github.com/golangci/golangci-lint.git .
- git checkout tags/v1.41.1
+ git checkout tags/v1.47.3
CGO_ENABLED=true go build -trimpath -o golangci-lint-cgo ./cmd/golangci-lint
./golangci-lint-cgo --version
cd ../../
diff --git a/.golangci-warnings.yml b/.golangci-warnings.yml
index c0d9e1e387..f0f2eee48e 100644
--- a/.golangci-warnings.yml
+++ b/.golangci-warnings.yml
@@ -5,14 +5,12 @@ run:
linters:
disable-all: true
enable:
- - staticcheck
+ - deadcode
+ - partitiontest
- structcheck
- typecheck
- varcheck
- - deadcode
- - gosimple
- unused
- - partitiontest
linters-settings:
@@ -41,8 +39,6 @@ issues:
exclude:
# ignore govet false positive fixed in https://github.com/golang/go/issues/45043
- "sigchanyzer: misuse of unbuffered os.Signal channel as argument to signal.Notify"
- # ignore golint false positive fixed in https://github.com/golang/lint/pull/487
- - "exported method (.*).Unwrap` should have comment or be unexported"
# ignore issues about the way we use _struct fields to define encoding settings
- "`_struct` is unused"
diff --git a/.golangci.yml b/.golangci.yml
index 9cf49999f3..271c682e50 100644
--- a/.golangci.yml
+++ b/.golangci.yml
@@ -1,23 +1,44 @@
run:
timeout: 5m
- tests: false
+ tests: true
linters:
+ # default: deadcode, errcheck, gosimple, govet, ineffassign, staticcheck, typecheck, unused, varcheck
disable-all: true
enable:
- errcheck
- gofmt
- - golint
+ - gosimple
- govet
- ineffassign
- misspell
+ - nolintlint
+ - revive
+ - staticcheck
+ - typecheck
severity:
default-severity: error
+linters-settings:
+ nolintlint:
+ # require naming a specific linter X using //nolint:X
+ require-specific: true
+ # require comments like "//nolint:errcheck // Explanation of why we are ignoring linter here..."
+ require-explanation: true
+ errcheck:
+ exclude-functions:
+ # data/transactions/logic/assembler.go uses ops.error, warn, to append log messages: OK to ignore for this case
+ - (*github.com/algorand/go-algorand/data/transactions/logic.OpStream).errorf
+ - (*github.com/algorand/go-algorand/data/transactions/logic.OpStream).error
+ - (*github.com/algorand/go-algorand/data/transactions/logic.OpStream).warnf
+ - (*github.com/algorand/go-algorand/data/transactions/logic.OpStream).warn
+
issues:
- # use these new lint checks on code since #2574
- new-from-rev: eb019291beed556ec6ac1ceb4a15114ce4df0c57
+ # Work our way back over time to be clean against all these
+ # checkers. If you'd like to contribute, raise the number after ~,
+ # run the linter and dig in.
+ new-from-rev: eb019291beed556ec6ac1ceb4a15114ce4df0c57~25
# Disable default exclude rules listed in `golangci-lint run --help` (selectively re-enable some below)
exclude-use-default: false
@@ -41,14 +62,28 @@ issues:
- Error return value of .((os\.)?std(out|err)\..*|.*Close|.*Flush|os\.Remove(All)?|.*print(f|ln)?|os\.(Un)?Setenv). is not checked
# "EXC0005 staticcheck: Developers tend to write in C-style with an explicit 'break' in a 'switch', so it's ok to ignore"
- ineffective break statement. Did you mean to break out of the outer loop
+ # revive: irrelevant error about naming
+ - "var-naming: don't use leading k in Go names"
exclude-rules:
+ - path: _test\.go
+ linters:
+ - errcheck
+ - gofmt
+ - gosimple
+ - govet
+ - ineffassign
+ - misspell
+ - nolintlint
+ # - revive
+ - staticcheck
+ - typecheck
# Add all linters here -- Comment this block out for testing linters
- path: test/linttest/lintissues\.go
linters:
- errcheck
- gofmt
- - golint
+ - revive
- govet
- ineffassign
- misspell
diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md
index 1d691afed0..517fb17fa8 100644
--- a/CONTRIBUTING.md
+++ b/CONTRIBUTING.md
@@ -45,7 +45,7 @@ Again, if you have a patch for a critical security vulnerability, please use our
For Go code we use the [Golang guidelines defined here](https://golang.org/doc/effective_go.html).
* Code must adhere to the official Go formatting guidelines (i.e. uses gofmt).
-* We use **gofmt** and **golint**. Also make sure to run `make sanity` and `make generate` before opening a pull request.
+* We use **gofmt** and **golangci-lint**. Also make sure to run `make sanity` and `make generate` before opening a pull request.
* Code must be documented adhering to the official Go commentary guidelines.
For JavaScript code we use the [MDN formatting rules](https://developer.mozilla.org/en-US/docs/MDN/Contribute/Guidelines/Code_guidelines/JavaScript).
diff --git a/Makefile b/Makefile
index 0860d71f7d..a7613fbf42 100644
--- a/Makefile
+++ b/Makefile
@@ -8,7 +8,6 @@ else
export GOPATH := $(shell go env GOPATH)
GOPATH1 := $(firstword $(subst :, ,$(GOPATH)))
endif
-export GOPROXY := direct
SRCPATH := $(shell pwd)
ARCH := $(shell ./scripts/archtype.sh)
OS_TYPE := $(shell ./scripts/ostype.sh)
@@ -100,15 +99,12 @@ fix: build
$(GOPATH1)/bin/algofix */
lint: deps
- $(GOPATH1)/bin/golint ./...
-
-vet:
- go vet ./...
+ $(GOPATH1)/bin/golangci-lint run -c .golangci.yml
check_shell:
find . -type f -name "*.sh" -exec shellcheck {} +
-sanity: vet fix lint fmt
+sanity: fix lint fmt
cover:
go test $(GOTAGS) -coverprofile=cover.out $(UNIT_TEST_SOURCES)
@@ -331,7 +327,7 @@ dump: $(addprefix gen/,$(addsuffix /genesis.dump, $(NETWORKS)))
install: build
scripts/dev_install.sh -p $(GOPATH1)/bin
-.PHONY: default fmt vet lint check_shell sanity cover prof deps build test fulltest shorttest clean cleango deploy node_exporter install %gen gen NONGO_BIN check-go-version rebuild_swagger
+.PHONY: default fmt lint check_shell sanity cover prof deps build test fulltest shorttest clean cleango deploy node_exporter install %gen gen NONGO_BIN check-go-version rebuild_swagger
###### TARGETS FOR CICD PROCESS ######
include ./scripts/release/mule/Makefile.mule
diff --git a/agreement/fuzzer/tests_test.go b/agreement/fuzzer/tests_test.go
index 75a0cc67cf..2dfc837072 100644
--- a/agreement/fuzzer/tests_test.go
+++ b/agreement/fuzzer/tests_test.go
@@ -20,7 +20,6 @@ import (
"encoding/json"
"flag"
"fmt"
- "io/ioutil"
"log"
"math"
"math/rand"
@@ -440,7 +439,7 @@ func TestFuzzer(t *testing.T) {
t.Run(testName, func(t *testing.T) {
partitiontest.PartitionTest(t) // Check if this expect test should by run, may SKIP
jsonFilename := jsonFiles[testName]
- jsonBytes, err := ioutil.ReadFile(jsonFilename)
+ jsonBytes, err := os.ReadFile(jsonFilename)
require.NoError(t, err)
var fuzzerTest FuzzerTestFile
err = json.Unmarshal(jsonBytes, &fuzzerTest)
diff --git a/buildnumber.dat b/buildnumber.dat
index b8626c4cff..573541ac97 100644
--- a/buildnumber.dat
+++ b/buildnumber.dat
@@ -1 +1 @@
-4
+0
diff --git a/catchup/catchpointService.go b/catchup/catchpointService.go
index 5e86404e2e..c55b3ea8d1 100644
--- a/catchup/catchpointService.go
+++ b/catchup/catchpointService.go
@@ -41,7 +41,7 @@ const (
noPeersAvailableSleepInterval = 50 * time.Millisecond
)
-// CatchpointCatchupNodeServices defines the extenal node support needed
+// CatchpointCatchupNodeServices defines the external node support needed
// for the catchpoint service to switch the node between "regular" operational mode and catchup mode.
type CatchpointCatchupNodeServices interface {
SetCatchpointCatchupMode(bool) (newContextCh <-chan context.Context)
@@ -65,10 +65,10 @@ type CatchpointCatchupStats struct {
type CatchpointCatchupService struct {
// stats is the statistics object, updated async while downloading the ledger
stats CatchpointCatchupStats
- // statsMu syncronizes access to stats, as we could attempt to update it while querying for it's current state
+ // statsMu synchronizes access to stats, as we could attempt to update it while querying for it's current state
statsMu deadlock.Mutex
node CatchpointCatchupNodeServices
- // ctx is the node cancelation context, used when the node is being stopped.
+ // ctx is the node cancellation context, used when the node is being stopped.
ctx context.Context
cancelCtxFunc context.CancelFunc
// running is a waitgroup counting the running goroutine(1), and allow us to exit cleanly.
@@ -79,17 +79,17 @@ type CatchpointCatchupService struct {
stage ledger.CatchpointCatchupState
// log is the logger object
log logging.Logger
- // newService indicates whether this service was created after the node was running ( i.e. true ) or the node just started to find that it was previously perfoming catchup
+ // newService indicates whether this service was created after the node was running ( i.e. true ) or the node just started to find that it was previously performing catchup
newService bool
- // net is the underlaying network module
+ // net is the underlying network module
net network.GossipNode
// ledger points to the ledger object
- ledger *ledger.Ledger
+ ledger ledger.CatchupAccessorClientLedger
// lastBlockHeader is the latest block we have before going into catchpoint catchup mode. We use it to serve the node status requests instead of going to the ledger.
lastBlockHeader bookkeeping.BlockHeader
// config is a copy of the node configuration
config config.Local
- // abortCtx used as a syncronized flag to let us know when the user asked us to abort the catchpoint catchup process. note that it's not being used when we decided to abort
+ // abortCtx used as a synchronized flag to let us know when the user asked us to abort the catchpoint catchup process. note that it's not being used when we decided to abort
// the catchup due to an internal issue ( such as exceeding number of retries )
abortCtx context.Context
abortCtxFunc context.CancelFunc
@@ -98,19 +98,20 @@ type CatchpointCatchupService struct {
}
// MakeResumedCatchpointCatchupService creates a catchpoint catchup service for a node that is already in catchpoint catchup mode
-func MakeResumedCatchpointCatchupService(ctx context.Context, node CatchpointCatchupNodeServices, log logging.Logger, net network.GossipNode, l *ledger.Ledger, cfg config.Local) (service *CatchpointCatchupService, err error) {
+func MakeResumedCatchpointCatchupService(ctx context.Context, node CatchpointCatchupNodeServices, log logging.Logger, net network.GossipNode, accessor ledger.CatchpointCatchupAccessor, cfg config.Local) (service *CatchpointCatchupService, err error) {
service = &CatchpointCatchupService{
stats: CatchpointCatchupStats{
StartTime: time.Now(),
},
node: node,
- ledgerAccessor: ledger.MakeCatchpointCatchupAccessor(l, log),
+ ledgerAccessor: accessor,
log: log,
newService: false,
net: net,
- ledger: l,
+ ledger: accessor.Ledger(),
config: cfg,
}
+ l := accessor.Ledger()
service.lastBlockHeader, err = l.BlockHdr(l.Latest())
if err != nil {
return nil, err
@@ -124,7 +125,7 @@ func MakeResumedCatchpointCatchupService(ctx context.Context, node CatchpointCat
}
// MakeNewCatchpointCatchupService creates a new catchpoint catchup service for a node that is not in catchpoint catchup mode
-func MakeNewCatchpointCatchupService(catchpoint string, node CatchpointCatchupNodeServices, log logging.Logger, net network.GossipNode, l *ledger.Ledger, cfg config.Local) (service *CatchpointCatchupService, err error) {
+func MakeNewCatchpointCatchupService(catchpoint string, node CatchpointCatchupNodeServices, log logging.Logger, net network.GossipNode, accessor ledger.CatchpointCatchupAccessor, cfg config.Local) (service *CatchpointCatchupService, err error) {
if catchpoint == "" {
return nil, fmt.Errorf("MakeNewCatchpointCatchupService: catchpoint is invalid")
}
@@ -134,14 +135,15 @@ func MakeNewCatchpointCatchupService(catchpoint string, node CatchpointCatchupNo
StartTime: time.Now(),
},
node: node,
- ledgerAccessor: ledger.MakeCatchpointCatchupAccessor(l, log),
+ ledgerAccessor: accessor,
stage: ledger.CatchpointCatchupStateInactive,
log: log,
newService: true,
net: net,
- ledger: l,
+ ledger: accessor.Ledger(),
config: cfg,
}
+ l := accessor.Ledger()
service.lastBlockHeader, err = l.BlockHdr(l.Latest())
if err != nil {
return nil, err
@@ -162,7 +164,7 @@ func (cs *CatchpointCatchupService) Start(ctx context.Context) {
func (cs *CatchpointCatchupService) Abort() {
// In order to abort the catchpoint catchup process, we need to first set the flag of abortCtxFunc, and follow that by canceling the main context.
// The order of these calls is crucial : The various stages are blocked on the main context. When that one expires, it uses the abort context to determine
- // if the cancelation meaning that we want to shut down the process, or aborting the catchpoint catchup completly.
+ // if the cancellation meaning that we want to shut down the process, or aborting the catchpoint catchup completely.
cs.abortCtxFunc()
cs.cancelCtxFunc()
}
@@ -200,8 +202,8 @@ func (cs *CatchpointCatchupService) run() {
err = cs.processStageInactive()
case ledger.CatchpointCatchupStateLedgerDownload:
err = cs.processStageLedgerDownload()
- case ledger.CatchpointCatchupStateLastestBlockDownload:
- err = cs.processStageLastestBlockDownload()
+ case ledger.CatchpointCatchupStateLatestBlockDownload:
+ err = cs.processStageLatestBlockDownload()
case ledger.CatchpointCatchupStateBlocksDownload:
err = cs.processStageBlocksDownload()
case ledger.CatchpointCatchupStateSwitch:
@@ -258,7 +260,7 @@ func (cs *CatchpointCatchupService) processStageInactive() (err error) {
return cs.abort(fmt.Errorf("processStageInactive failed to update stage : %v", err))
}
if cs.newService {
- // we need to let the node know that it should shut down all the unneed services to avoid clashes.
+ // we need to let the node know that it should shut down all the unneeded services to avoid clashes.
cs.updateNodeCatchupMode(true)
}
return nil
@@ -272,7 +274,7 @@ func (cs *CatchpointCatchupService) processStageLedgerDownload() (err error) {
round, _, err0 := ledgercore.ParseCatchpointLabel(label)
if err0 != nil {
- return cs.abort(fmt.Errorf("processStageLedgerDownload failed to patse label : %v", err0))
+ return cs.abort(fmt.Errorf("processStageLedgerDownload failed to parse label : %v", err0))
}
// download balances file.
@@ -326,9 +328,9 @@ func (cs *CatchpointCatchupService) processStageLedgerDownload() (err error) {
cs.log.Warnf("unable to download ledger : %v", err)
}
- err = cs.updateStage(ledger.CatchpointCatchupStateLastestBlockDownload)
+ err = cs.updateStage(ledger.CatchpointCatchupStateLatestBlockDownload)
if err != nil {
- return cs.abort(fmt.Errorf("processStageLedgerDownload failed to update stage to CatchpointCatchupStateLastestBlockDownload : %v", err))
+ return cs.abort(fmt.Errorf("processStageLedgerDownload failed to update stage to CatchpointCatchupStateLatestBlockDownload : %v", err))
}
return nil
}
@@ -342,11 +344,11 @@ func (cs *CatchpointCatchupService) updateVerifiedAccounts(addedTrieHashes uint6
}
}
-// processStageLastestBlockDownload is the third catchpoint catchup stage. It downloads the latest block and verify that against the previously downloaded ledger.
-func (cs *CatchpointCatchupService) processStageLastestBlockDownload() (err error) {
+// processStageLatestBlockDownload is the third catchpoint catchup stage. It downloads the latest block and verify that against the previously downloaded ledger.
+func (cs *CatchpointCatchupService) processStageLatestBlockDownload() (err error) {
blockRound, err := cs.ledgerAccessor.GetCatchupBlockRound(cs.ctx)
if err != nil {
- return cs.abort(fmt.Errorf("processStageLastestBlockDownload failed to retrieve catchup block round : %v", err))
+ return cs.abort(fmt.Errorf("processStageLatestBlockDownload failed to retrieve catchup block round : %v", err))
}
attemptsCount := 0
@@ -375,7 +377,7 @@ func (cs *CatchpointCatchupService) processStageLastestBlockDownload() (err erro
// check block protocol version support.
if protoParams, ok = config.Consensus[blk.BlockHeader.CurrentProtocol]; !ok {
- cs.log.Warnf("processStageLastestBlockDownload: unsupported protocol version detected: '%v'", blk.BlockHeader.CurrentProtocol)
+ cs.log.Warnf("processStageLatestBlockDownload: unsupported protocol version detected: '%v'", blk.BlockHeader.CurrentProtocol)
if attemptsCount <= cs.config.CatchupBlockDownloadRetryAttempts {
// try again.
@@ -383,24 +385,24 @@ func (cs *CatchpointCatchupService) processStageLastestBlockDownload() (err erro
cs.blocksDownloadPeerSelector.rankPeer(psp, peerRankInvalidDownload)
continue
}
- return cs.abort(fmt.Errorf("processStageLastestBlockDownload: unsupported protocol version detected: '%v'", blk.BlockHeader.CurrentProtocol))
+ return cs.abort(fmt.Errorf("processStageLatestBlockDownload: unsupported protocol version detected: '%v'", blk.BlockHeader.CurrentProtocol))
}
- // We need to compare explicitly the genesis hash since we're not doing any block validation. This would ensure the genesis.json file matches the block that we've receieved.
+ // We need to compare explicitly the genesis hash since we're not doing any block validation. This would ensure the genesis.json file matches the block that we've received.
if protoParams.SupportGenesisHash && blk.GenesisHash() != cs.ledger.GenesisHash() {
- cs.log.Warnf("processStageLastestBlockDownload: genesis hash mismatches : genesis hash on genesis.json file is %v while genesis hash of downloaded block is %v", cs.ledger.GenesisHash(), blk.GenesisHash())
+ cs.log.Warnf("processStageLatestBlockDownload: genesis hash mismatches : genesis hash on genesis.json file is %v while genesis hash of downloaded block is %v", cs.ledger.GenesisHash(), blk.GenesisHash())
if attemptsCount <= cs.config.CatchupBlockDownloadRetryAttempts {
// try again.
blk = nil
cs.blocksDownloadPeerSelector.rankPeer(psp, peerRankInvalidDownload)
continue
}
- return cs.abort(fmt.Errorf("processStageLastestBlockDownload: genesis hash mismatches : genesis hash on genesis.json file is %v while genesis hash of downloaded block is %v", cs.ledger.GenesisHash(), blk.GenesisHash()))
+ return cs.abort(fmt.Errorf("processStageLatestBlockDownload: genesis hash mismatches : genesis hash on genesis.json file is %v while genesis hash of downloaded block is %v", cs.ledger.GenesisHash(), blk.GenesisHash()))
}
// check to see that the block header and the block payset aligns
if !blk.ContentsMatchHeader() {
- cs.log.Warnf("processStageLastestBlockDownload: downloaded block content does not match downloaded block header")
+ cs.log.Warnf("processStageLatestBlockDownload: downloaded block content does not match downloaded block header")
if attemptsCount <= cs.config.CatchupBlockDownloadRetryAttempts {
// try again.
@@ -408,7 +410,7 @@ func (cs *CatchpointCatchupService) processStageLastestBlockDownload() (err erro
cs.blocksDownloadPeerSelector.rankPeer(psp, peerRankInvalidDownload)
continue
}
- return cs.abort(fmt.Errorf("processStageLastestBlockDownload: downloaded block content does not match downloaded block header"))
+ return cs.abort(fmt.Errorf("processStageLatestBlockDownload: downloaded block content does not match downloaded block header"))
}
// verify that the catchpoint is valid.
@@ -420,15 +422,18 @@ func (cs *CatchpointCatchupService) processStageLastestBlockDownload() (err erro
if attemptsCount <= cs.config.CatchupBlockDownloadRetryAttempts {
// try again.
blk = nil
- cs.log.Infof("processStageLastestBlockDownload: block %d verification against catchpoint failed, another attempt will be made; err = %v", blockRound, err)
+ cs.log.Infof("processStageLatestBlockDownload: block %d verification against catchpoint failed, another attempt will be made; err = %v", blockRound, err)
cs.blocksDownloadPeerSelector.rankPeer(psp, peerRankInvalidDownload)
continue
}
- return cs.abort(fmt.Errorf("processStageLastestBlockDownload failed when calling VerifyCatchpoint : %v", err))
+ return cs.abort(fmt.Errorf("processStageLatestBlockDownload failed when calling VerifyCatchpoint : %v", err))
+ }
+ if psp != nil {
+ // give a rank to the download, as the download was successful.
+ // if the block might have been retrieved from the local ledger, nothing to rank
+ peerRank := cs.blocksDownloadPeerSelector.peerDownloadDurationToRank(psp, blockDownloadDuration)
+ cs.blocksDownloadPeerSelector.rankPeer(psp, peerRank)
}
- // give a rank to the download, as the download was successful.
- peerRank := cs.blocksDownloadPeerSelector.peerDownloadDurationToRank(psp, blockDownloadDuration)
- cs.blocksDownloadPeerSelector.rankPeer(psp, peerRank)
err = cs.ledgerAccessor.StoreBalancesRound(cs.ctx, blk)
if err != nil {
@@ -437,7 +442,7 @@ func (cs *CatchpointCatchupService) processStageLastestBlockDownload() (err erro
blk = nil
continue
}
- return cs.abort(fmt.Errorf("processStageLastestBlockDownload failed when calling StoreBalancesRound : %v", err))
+ return cs.abort(fmt.Errorf("processStageLatestBlockDownload failed when calling StoreBalancesRound : %v", err))
}
err = cs.ledgerAccessor.StoreFirstBlock(cs.ctx, blk)
@@ -447,7 +452,7 @@ func (cs *CatchpointCatchupService) processStageLastestBlockDownload() (err erro
blk = nil
continue
}
- return cs.abort(fmt.Errorf("processStageLastestBlockDownload failed when calling StoreFirstBlock : %v", err))
+ return cs.abort(fmt.Errorf("processStageLatestBlockDownload failed when calling StoreFirstBlock : %v", err))
}
err = cs.updateStage(ledger.CatchpointCatchupStateBlocksDownload)
@@ -457,7 +462,7 @@ func (cs *CatchpointCatchupService) processStageLastestBlockDownload() (err erro
blk = nil
continue
}
- return cs.abort(fmt.Errorf("processStageLastestBlockDownload failed to update stage : %v", err))
+ return cs.abort(fmt.Errorf("processStageLatestBlockDownload failed to update stage : %v", err))
}
// great ! everything is ready for next stage.
@@ -466,7 +471,7 @@ func (cs *CatchpointCatchupService) processStageLastestBlockDownload() (err erro
return nil
}
-// lookbackForStateproofsSupport calculates the lookback (from topblock round) needed to be downloaded
+// lookbackForStateproofsSupport calculates the lookback (from topBlock round) needed to be downloaded
// in order to support state proofs verification.
func lookbackForStateproofsSupport(topBlock *bookkeeping.Block) uint64 {
proto := config.Consensus[topBlock.CurrentProtocol]
@@ -764,10 +769,10 @@ func (cs *CatchpointCatchupService) GetStatistics() (out CatchpointCatchupStats)
}
// updateBlockRetrievalStatistics updates the blocks retrieval statistics by applying the provided deltas
-func (cs *CatchpointCatchupService) updateBlockRetrievalStatistics(aquiredBlocksDelta, verifiedBlocksDelta int64) {
+func (cs *CatchpointCatchupService) updateBlockRetrievalStatistics(acquiredBlocksDelta, verifiedBlocksDelta int64) {
cs.statsMu.Lock()
defer cs.statsMu.Unlock()
- cs.stats.AcquiredBlocks = uint64(int64(cs.stats.AcquiredBlocks) + aquiredBlocksDelta)
+ cs.stats.AcquiredBlocks = uint64(int64(cs.stats.AcquiredBlocks) + acquiredBlocksDelta)
cs.stats.VerifiedBlocks = uint64(int64(cs.stats.VerifiedBlocks) + verifiedBlocksDelta)
}
diff --git a/catchup/catchpointService_test.go b/catchup/catchpointService_test.go
new file mode 100644
index 0000000000..02f4a9b7a3
--- /dev/null
+++ b/catchup/catchpointService_test.go
@@ -0,0 +1,91 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see .
+
+package catchup
+
+import (
+ "context"
+ "testing"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/components/mocks"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/ledger"
+ "github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
+)
+
+type catchpointCatchupLedger struct {
+}
+
+func (l *catchpointCatchupLedger) Block(rnd basics.Round) (blk bookkeeping.Block, err error) {
+ blk = bookkeeping.Block{
+ BlockHeader: bookkeeping.BlockHeader{
+ UpgradeState: bookkeeping.UpgradeState{
+ CurrentProtocol: protocol.ConsensusCurrentVersion,
+ },
+ },
+ }
+ commitments, err := blk.PaysetCommit()
+ if err != nil {
+ return blk, err
+ }
+ blk.TxnCommitments = commitments
+
+ return blk, nil
+}
+
+func (l *catchpointCatchupLedger) GenesisHash() (d crypto.Digest) {
+ return
+}
+
+func (l *catchpointCatchupLedger) BlockHdr(rnd basics.Round) (blk bookkeeping.BlockHeader, err error) {
+ return
+}
+
+func (l *catchpointCatchupLedger) Latest() (rnd basics.Round) {
+ return
+}
+
+type catchpointCatchupAccessorMock struct {
+ mocks.MockCatchpointCatchupAccessor
+ l *catchpointCatchupLedger
+}
+
+func (m *catchpointCatchupAccessorMock) GetCatchupBlockRound(ctx context.Context) (round basics.Round, err error) {
+ return 1, nil
+}
+
+func (m *catchpointCatchupAccessorMock) Ledger() (l ledger.CatchupAccessorClientLedger) {
+ return m.l
+}
+
+// TestCatchpointServicePeerRank ensures CatchpointService does not crash when a block fetched
+// from the local ledger and not from network when ranking a peer
+func TestCatchpointServicePeerRank(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ l := catchpointCatchupLedger{}
+ a := catchpointCatchupAccessorMock{l: &l}
+ cs := CatchpointCatchupService{ledgerAccessor: &a, ledger: &l}
+ cs.initDownloadPeerSelector()
+
+ err := cs.processStageLatestBlockDownload()
+ require.NoError(t, err)
+}
diff --git a/catchup/ledgerFetcher.go b/catchup/ledgerFetcher.go
index afc39414dc..fa965a1543 100644
--- a/catchup/ledgerFetcher.go
+++ b/catchup/ledgerFetcher.go
@@ -36,14 +36,14 @@ import (
"github.com/algorand/go-algorand/util"
)
-var errNoLedgerForRound = errors.New("No ledger available for given round")
+var errNoLedgerForRound = errors.New("no ledger available for given round")
const (
// maxCatchpointFileChunkSize is a rough estimate for the worst-case scenario we're going to have of all the accounts data per a single catchpoint file chunk.
maxCatchpointFileChunkSize = ledger.BalancesPerCatchpointFileChunk * basics.MaxEncodedAccountDataSize
// defaultMinCatchpointFileDownloadBytesPerSecond defines the worst-case scenario download speed we expect to get while downloading a catchpoint file
defaultMinCatchpointFileDownloadBytesPerSecond = 20 * 1024
- // catchpointFileStreamReadSize defines the number of bytes we would attempt to read at each itration from the incoming http data stream
+ // catchpointFileStreamReadSize defines the number of bytes we would attempt to read at each iteration from the incoming http data stream
catchpointFileStreamReadSize = 4096
)
@@ -114,7 +114,7 @@ func (lf *ledgerFetcher) getPeerLedger(ctx context.Context, peer network.HTTPPee
return fmt.Errorf("getPeerLedger error response status code %d", response.StatusCode)
}
- // at this point, we've already receieved the response headers. ensure that the
+ // at this point, we've already received the response headers. ensure that the
// response content type is what we'd like it to be.
contentTypes := response.Header["Content-Type"]
if len(contentTypes) != 1 {
diff --git a/catchup/ledgerFetcher_test.go b/catchup/ledgerFetcher_test.go
index 637064c97d..4cb57d7fd3 100644
--- a/catchup/ledgerFetcher_test.go
+++ b/catchup/ledgerFetcher_test.go
@@ -30,7 +30,6 @@ import (
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/ledger"
"github.com/algorand/go-algorand/logging"
- "github.com/algorand/go-algorand/network"
"github.com/algorand/go-algorand/test/partitiontest"
)
@@ -44,8 +43,7 @@ func TestNoPeersAvailable(t *testing.T) {
partitiontest.PartitionTest(t)
lf := makeLedgerFetcher(&mocks.MockNetwork{}, &mocks.MockCatchpointCatchupAccessor{}, logging.TestingLog(t), &dummyLedgerFetcherReporter{}, config.GetDefaultLocal())
- var peer network.Peer
- peer = &lf // The peer is an opaque interface.. we can add anything as a Peer.
+ peer := &lf // The peer is an opaque interface.. we can add anything as a Peer.
err := lf.downloadLedger(context.Background(), peer, basics.Round(0))
require.Equal(t, errNonHTTPPeer, err)
}
diff --git a/catchup/networkFetcher.go b/catchup/networkFetcher.go
new file mode 100644
index 0000000000..d82395e8d3
--- /dev/null
+++ b/catchup/networkFetcher.go
@@ -0,0 +1,134 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see .
+
+package catchup
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "time"
+
+ "github.com/algorand/go-algorand/agreement"
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/network"
+)
+
+// NetworkFetcher is the struct used to export fetchBlock function from universalFetcher
+type NetworkFetcher struct {
+ log logging.Logger
+ cfg config.Local
+ auth BlockAuthenticator
+ peerSelector *peerSelector
+ fetcher *universalBlockFetcher
+}
+
+// MakeNetworkFetcher initializes a NetworkFetcher service
+func MakeNetworkFetcher(log logging.Logger, net network.GossipNode, cfg config.Local, auth BlockAuthenticator, pipelineFetch bool) *NetworkFetcher {
+ netFetcher := &NetworkFetcher{
+ log: log,
+ cfg: cfg,
+ auth: auth,
+ peerSelector: createPeerSelector(net, cfg, pipelineFetch),
+ fetcher: makeUniversalBlockFetcher(log, net, cfg),
+ }
+ return netFetcher
+}
+
+func (netFetcher *NetworkFetcher) getHTTPPeer() (network.HTTPPeer, *peerSelectorPeer, error) {
+ for retryCount := 0; retryCount < netFetcher.cfg.CatchupBlockDownloadRetryAttempts; retryCount++ {
+ psp, err := netFetcher.peerSelector.getNextPeer()
+ if err != nil {
+ if err != errPeerSelectorNoPeerPoolsAvailable {
+ err = fmt.Errorf("FetchBlock: unable to obtain a list of peers to download the block from : %w", err)
+ return nil, nil, err
+ }
+ // this is a possible on startup, since the network package might have yet to retrieve the list of peers.
+ netFetcher.log.Infof("FetchBlock: unable to obtain a list of peers to download the block from; will retry shortly.")
+ time.Sleep(noPeersAvailableSleepInterval)
+ continue
+ }
+ peer := psp.Peer
+ httpPeer, ok := peer.(network.HTTPPeer)
+ if ok {
+ return httpPeer, psp, nil
+ }
+ netFetcher.log.Warnf("FetchBlock: non-HTTP peer was provided by the peer selector")
+ netFetcher.peerSelector.rankPeer(psp, peerRankInvalidDownload)
+ }
+ return nil, nil, errors.New("FetchBlock: recurring non-HTTP peer was provided by the peer selector")
+}
+
+// FetchBlock function given a round number returns a block from a http peer
+func (netFetcher *NetworkFetcher) FetchBlock(ctx context.Context, round basics.Round) (*bookkeeping.Block,
+ *agreement.Certificate, time.Duration, error) {
+ // internal retry attempt to fetch the block
+ for retryCount := 0; retryCount < netFetcher.cfg.CatchupBlockDownloadRetryAttempts; retryCount++ {
+ httpPeer, psp, err := netFetcher.getHTTPPeer()
+ if err != nil {
+ return nil, nil, time.Duration(0), err
+ }
+
+ blk, cert, downloadDuration, err := netFetcher.fetcher.fetchBlock(ctx, round, httpPeer)
+ if err != nil {
+ if ctx.Err() != nil {
+ // caller of the function decided to cancel the download
+ return nil, nil, time.Duration(0), err
+ }
+ netFetcher.log.Infof("FetchBlock: failed to download block %d on attempt %d out of %d. %v",
+ round, retryCount+1, netFetcher.cfg.CatchupBlockDownloadRetryAttempts, err)
+ netFetcher.peerSelector.rankPeer(psp, peerRankDownloadFailed)
+ continue // retry the fetch
+ }
+
+ // Check that the block's contents match the block header
+ if !blk.ContentsMatchHeader() && blk.Round() > 0 {
+ netFetcher.peerSelector.rankPeer(psp, peerRankInvalidDownload)
+ // Check if this mismatch is due to an unsupported protocol version
+ if _, ok := config.Consensus[blk.BlockHeader.CurrentProtocol]; !ok {
+ netFetcher.log.Errorf("FetchBlock: downloaded block(%v) unsupported protocol version detected: '%v'",
+ round, blk.BlockHeader.CurrentProtocol)
+ }
+ netFetcher.log.Warnf("FetchBlock: downloaded block(%v) contents do not match header", round)
+ netFetcher.log.Infof("FetchBlock: failed to download block %d on attempt %d out of %d. %v",
+ round, retryCount+1, netFetcher.cfg.CatchupBlockDownloadRetryAttempts, err)
+ continue // retry the fetch
+ }
+
+ // Authenticate the block. for correct execution, caller should call FetchBlock only when the lookback block is available
+ if netFetcher.cfg.CatchupVerifyCertificate() {
+ err = netFetcher.auth.Authenticate(blk, cert)
+ if err != nil {
+ netFetcher.log.Warnf("FetchBlock: cert authenticatation failed for block %d on attempt %d out of %d. %v",
+ round, retryCount+1, netFetcher.cfg.CatchupBlockDownloadRetryAttempts, err)
+ netFetcher.peerSelector.rankPeer(psp, peerRankInvalidDownload)
+ continue // retry the fetch
+ }
+ }
+
+ // upon successful download rank the peer according to the download speed
+ peerRank := netFetcher.peerSelector.peerDownloadDurationToRank(psp, downloadDuration)
+ netFetcher.peerSelector.rankPeer(psp, peerRank)
+ return blk, cert, downloadDuration, err
+
+ }
+ err := fmt.Errorf("FetchBlock failed after multiple blocks download attempts: %v unsuccessful attempts",
+ netFetcher.cfg.CatchupBlockDownloadRetryAttempts)
+ return nil, nil, time.Duration(0), err
+}
diff --git a/catchup/networkFetcher_test.go b/catchup/networkFetcher_test.go
new file mode 100644
index 0000000000..7c6a2c885b
--- /dev/null
+++ b/catchup/networkFetcher_test.go
@@ -0,0 +1,190 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see .
+
+package catchup
+
+import (
+ "context"
+ "sync"
+ "testing"
+
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/rpcs"
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+)
+
+func TestFetchBlock(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ ledger, next, b, err := buildTestLedger(t, bookkeeping.Block{})
+ if err != nil {
+ t.Fatal(err)
+ return
+ }
+
+ blockServiceConfig := config.GetDefaultLocal()
+ blockServiceConfig.EnableBlockService = true
+ blockServiceConfig.EnableBlockServiceFallbackToArchiver = false
+
+ net := &httpTestPeerSource{}
+ ls := rpcs.MakeBlockService(logging.Base(), blockServiceConfig, ledger, net, "test genesisID")
+
+ node := basicRPCNode{}
+ node.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls)
+ node.start()
+ defer node.stop()
+ rootURL := node.rootURL()
+
+ net.addPeer(rootURL)
+
+ // Disable block authentication
+ cfg := config.GetDefaultLocal()
+ cfg.CatchupBlockValidateMode = 1
+ fetcher := MakeNetworkFetcher(logging.TestingLog(t), net, cfg, nil, false)
+
+ block, _, duration, err := fetcher.FetchBlock(context.Background(), next)
+
+ require.NoError(t, err)
+ require.Equal(t, &b, block)
+ require.GreaterOrEqual(t, int64(duration), int64(0))
+
+ block, cert, duration, err := fetcher.FetchBlock(context.Background(), next+1)
+
+ require.Error(t, errNoBlockForRound, err)
+ require.Contains(t, err.Error(), "FetchBlock failed after multiple blocks download attempts")
+ require.Nil(t, block)
+ require.Nil(t, cert)
+ require.Equal(t, int64(duration), int64(0))
+}
+
+func TestConcurrentAttemptsToFetchBlockSuccess(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ ledger, next, b, err := buildTestLedger(t, bookkeeping.Block{})
+ if err != nil {
+ t.Fatal(err)
+ return
+ }
+
+ blockServiceConfig := config.GetDefaultLocal()
+ blockServiceConfig.EnableBlockService = true
+ blockServiceConfig.EnableBlockServiceFallbackToArchiver = false
+
+ net := &httpTestPeerSource{}
+ ls := rpcs.MakeBlockService(logging.Base(), blockServiceConfig, ledger, net, "test genesisID")
+
+ node := basicRPCNode{}
+ node.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls)
+ node.start()
+ defer node.stop()
+ rootURL := node.rootURL()
+
+ net.addPeer(rootURL)
+
+ // Disable block authentication
+ cfg := config.GetDefaultLocal()
+ cfg.CatchupBlockValidateMode = 1
+ fetcher := MakeNetworkFetcher(logging.TestingLog(t), net, cfg, nil, false)
+
+ // start is used to synchronize concurrent fetchBlock attempts
+ // parallelRequests represents number of concurrent attempts
+ start := make(chan struct{})
+ parallelRequests := int(cfg.CatchupParallelBlocks)
+ var wg sync.WaitGroup
+ wg.Add(parallelRequests)
+ for i := 0; i < parallelRequests; i++ {
+ go func() {
+ <-start
+ block, _, duration, err := fetcher.FetchBlock(context.Background(), next)
+ require.NoError(t, err)
+ require.Equal(t, &b, block)
+ require.GreaterOrEqual(t, int64(duration), int64(0))
+ wg.Done()
+ }()
+ }
+ close(start)
+ wg.Wait()
+}
+
+func TestHTTPPeerNotAvailable(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ net := &httpTestPeerSource{}
+
+ // Disable block authentication
+ cfg := config.GetDefaultLocal()
+ cfg.CatchupBlockValidateMode = 1
+ cfg.CatchupBlockDownloadRetryAttempts = 1
+
+ fetcher := MakeNetworkFetcher(logging.TestingLog(t), net, cfg, nil, false)
+
+ _, _, _, err := fetcher.FetchBlock(context.Background(), 1)
+ require.Contains(t, err.Error(), "recurring non-HTTP peer was provided by the peer selector")
+}
+
+func TestFetchBlockFailed(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ net := &httpTestPeerSource{}
+ wsPeer := makeTestUnicastPeer(net, t)
+ net.addPeer(wsPeer.GetAddress())
+
+ // Disable block authentication
+ cfg := config.GetDefaultLocal()
+ cfg.CatchupBlockValidateMode = 1
+ cfg.CatchupBlockDownloadRetryAttempts = 1
+
+ fetcher := MakeNetworkFetcher(logging.TestingLog(t), net, cfg, nil, false)
+
+ _, _, _, err := fetcher.FetchBlock(context.Background(), 1)
+ require.Contains(t, err.Error(), "FetchBlock failed after multiple blocks download attempts")
+}
+
+func TestFetchBlockAuthenticationFailed(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ ledger, next, _, err := buildTestLedger(t, bookkeeping.Block{})
+ if err != nil {
+ t.Fatal(err)
+ return
+ }
+
+ blockServiceConfig := config.GetDefaultLocal()
+ blockServiceConfig.EnableBlockService = true
+ blockServiceConfig.EnableBlockServiceFallbackToArchiver = false
+
+ net := &httpTestPeerSource{}
+ ls := rpcs.MakeBlockService(logging.Base(), blockServiceConfig, ledger, net, "test genesisID")
+
+ node := basicRPCNode{}
+ node.RegisterHTTPHandler(rpcs.BlockServiceBlockPath, ls)
+ node.start()
+ defer node.stop()
+ rootURL := node.rootURL()
+
+ net.addPeer(rootURL)
+
+ cfg := config.GetDefaultLocal()
+ cfg.CatchupBlockDownloadRetryAttempts = 1
+
+ fetcher := MakeNetworkFetcher(logging.TestingLog(t), net, cfg, &mockedAuthenticator{errorRound: int(next)}, false)
+
+ _, _, _, err = fetcher.FetchBlock(context.Background(), next)
+ require.Contains(t, err.Error(), "FetchBlock failed after multiple blocks download attempts")
+}
diff --git a/catchup/service.go b/catchup/service.go
index adc313db64..1ebaf0fd3b 100644
--- a/catchup/service.go
+++ b/catchup/service.go
@@ -425,7 +425,7 @@ func (s *Service) pipelinedFetch(seedLookback uint64) {
close(completed)
}()
- peerSelector := s.createPeerSelector(true)
+ peerSelector := createPeerSelector(s.net, s.cfg, true)
if _, err := peerSelector.getNextPeer(); err == errPeerSelectorNoPeerPoolsAvailable {
s.log.Debugf("pipelinedFetch: was unable to obtain a peer to retrieve the block from")
@@ -653,7 +653,7 @@ func (s *Service) fetchRound(cert agreement.Certificate, verifier *agreement.Asy
}
blockHash := bookkeeping.BlockHash(cert.Proposal.BlockDigest) // semantic digest (i.e., hash of the block header), not byte-for-byte digest
- peerSelector := s.createPeerSelector(false)
+ peerSelector := createPeerSelector(s.net, s.cfg, false)
for s.ledger.LastRound() < cert.Round {
psp, getPeerErr := peerSelector.getNextPeer()
if getPeerErr != nil {
@@ -755,11 +755,11 @@ func (s *Service) handleUnsupportedRound(nextUnsupportedRound basics.Round) {
}
}
-func (s *Service) createPeerSelector(pipelineFetch bool) *peerSelector {
+func createPeerSelector(net network.GossipNode, cfg config.Local, pipelineFetch bool) *peerSelector {
var peerClasses []peerClass
- if s.cfg.EnableCatchupFromArchiveServers {
+ if cfg.EnableCatchupFromArchiveServers {
if pipelineFetch {
- if s.cfg.NetAddress != "" { // Relay node
+ if cfg.NetAddress != "" { // Relay node
peerClasses = []peerClass{
{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut},
{initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookArchivers},
@@ -774,7 +774,7 @@ func (s *Service) createPeerSelector(pipelineFetch bool) *peerSelector {
}
}
} else {
- if s.cfg.NetAddress != "" { // Relay node
+ if cfg.NetAddress != "" { // Relay node
peerClasses = []peerClass{
{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut},
{initialRank: peerRankInitialSecondPriority, peerClass: network.PeersConnectedIn},
@@ -791,7 +791,7 @@ func (s *Service) createPeerSelector(pipelineFetch bool) *peerSelector {
}
} else {
if pipelineFetch {
- if s.cfg.NetAddress != "" { // Relay node
+ if cfg.NetAddress != "" { // Relay node
peerClasses = []peerClass{
{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut},
{initialRank: peerRankInitialSecondPriority, peerClass: network.PeersPhonebookRelays},
@@ -804,7 +804,7 @@ func (s *Service) createPeerSelector(pipelineFetch bool) *peerSelector {
}
}
} else {
- if s.cfg.NetAddress != "" { // Relay node
+ if cfg.NetAddress != "" { // Relay node
peerClasses = []peerClass{
{initialRank: peerRankInitialFirstPriority, peerClass: network.PeersConnectedOut},
{initialRank: peerRankInitialSecondPriority, peerClass: network.PeersConnectedIn},
@@ -818,5 +818,5 @@ func (s *Service) createPeerSelector(pipelineFetch bool) *peerSelector {
}
}
}
- return makePeerSelector(s.net, peerClasses)
+ return makePeerSelector(net, peerClasses)
}
diff --git a/catchup/service_test.go b/catchup/service_test.go
index 676a283bad..f364b7a465 100644
--- a/catchup/service_test.go
+++ b/catchup/service_test.go
@@ -834,7 +834,7 @@ func TestCreatePeerSelector(t *testing.T) {
cfg.NetAddress = "someAddress"
s := MakeService(logging.Base(), cfg, &httpTestPeerSource{}, new(mockedLedger), &mockedAuthenticator{errorRound: int(0 + 1)}, nil, nil)
- ps := s.createPeerSelector(true)
+ ps := createPeerSelector(s.net, s.cfg, true)
require.Equal(t, 4, len(ps.peerClasses))
require.Equal(t, peerRankInitialFirstPriority, ps.peerClasses[0].initialRank)
require.Equal(t, peerRankInitialSecondPriority, ps.peerClasses[1].initialRank)
@@ -850,7 +850,7 @@ func TestCreatePeerSelector(t *testing.T) {
cfg.EnableCatchupFromArchiveServers = true
cfg.NetAddress = ""
s = MakeService(logging.Base(), cfg, &httpTestPeerSource{}, new(mockedLedger), &mockedAuthenticator{errorRound: int(0 + 1)}, nil, nil)
- ps = s.createPeerSelector(true)
+ ps = createPeerSelector(s.net, s.cfg, true)
require.Equal(t, 3, len(ps.peerClasses))
require.Equal(t, peerRankInitialFirstPriority, ps.peerClasses[0].initialRank)
require.Equal(t, peerRankInitialSecondPriority, ps.peerClasses[1].initialRank)
@@ -864,7 +864,7 @@ func TestCreatePeerSelector(t *testing.T) {
cfg.EnableCatchupFromArchiveServers = true
cfg.NetAddress = "someAddress"
s = MakeService(logging.Base(), cfg, &httpTestPeerSource{}, new(mockedLedger), &mockedAuthenticator{errorRound: int(0 + 1)}, nil, nil)
- ps = s.createPeerSelector(false)
+ ps = createPeerSelector(s.net, s.cfg, false)
require.Equal(t, 4, len(ps.peerClasses))
require.Equal(t, peerRankInitialFirstPriority, ps.peerClasses[0].initialRank)
@@ -881,7 +881,7 @@ func TestCreatePeerSelector(t *testing.T) {
cfg.EnableCatchupFromArchiveServers = true
cfg.NetAddress = ""
s = MakeService(logging.Base(), cfg, &httpTestPeerSource{}, new(mockedLedger), &mockedAuthenticator{errorRound: int(0 + 1)}, nil, nil)
- ps = s.createPeerSelector(false)
+ ps = createPeerSelector(s.net, s.cfg, false)
require.Equal(t, 3, len(ps.peerClasses))
require.Equal(t, peerRankInitialFirstPriority, ps.peerClasses[0].initialRank)
@@ -896,7 +896,7 @@ func TestCreatePeerSelector(t *testing.T) {
cfg.EnableCatchupFromArchiveServers = false
cfg.NetAddress = "someAddress"
s = MakeService(logging.Base(), cfg, &httpTestPeerSource{}, new(mockedLedger), &mockedAuthenticator{errorRound: int(0 + 1)}, nil, nil)
- ps = s.createPeerSelector(true)
+ ps = createPeerSelector(s.net, s.cfg, true)
require.Equal(t, 3, len(ps.peerClasses))
require.Equal(t, peerRankInitialFirstPriority, ps.peerClasses[0].initialRank)
@@ -911,7 +911,7 @@ func TestCreatePeerSelector(t *testing.T) {
cfg.EnableCatchupFromArchiveServers = false
cfg.NetAddress = ""
s = MakeService(logging.Base(), cfg, &httpTestPeerSource{}, new(mockedLedger), &mockedAuthenticator{errorRound: int(0 + 1)}, nil, nil)
- ps = s.createPeerSelector(true)
+ ps = createPeerSelector(s.net, s.cfg, true)
require.Equal(t, 2, len(ps.peerClasses))
require.Equal(t, peerRankInitialFirstPriority, ps.peerClasses[0].initialRank)
@@ -924,7 +924,7 @@ func TestCreatePeerSelector(t *testing.T) {
cfg.EnableCatchupFromArchiveServers = false
cfg.NetAddress = "someAddress"
s = MakeService(logging.Base(), cfg, &httpTestPeerSource{}, new(mockedLedger), &mockedAuthenticator{errorRound: int(0 + 1)}, nil, nil)
- ps = s.createPeerSelector(false)
+ ps = createPeerSelector(s.net, s.cfg, false)
require.Equal(t, 3, len(ps.peerClasses))
require.Equal(t, peerRankInitialFirstPriority, ps.peerClasses[0].initialRank)
@@ -939,7 +939,7 @@ func TestCreatePeerSelector(t *testing.T) {
cfg.EnableCatchupFromArchiveServers = false
cfg.NetAddress = ""
s = MakeService(logging.Base(), cfg, &httpTestPeerSource{}, new(mockedLedger), &mockedAuthenticator{errorRound: int(0 + 1)}, nil, nil)
- ps = s.createPeerSelector(false)
+ ps = createPeerSelector(s.net, s.cfg, false)
require.Equal(t, 2, len(ps.peerClasses))
require.Equal(t, peerRankInitialFirstPriority, ps.peerClasses[0].initialRank)
diff --git a/cmd/algod/main.go b/cmd/algod/main.go
index b0a45bc6ae..b67747984c 100644
--- a/cmd/algod/main.go
+++ b/cmd/algod/main.go
@@ -19,7 +19,6 @@ package main
import (
"flag"
"fmt"
- "io/ioutil"
"math/rand"
"os"
"path/filepath"
@@ -119,7 +118,7 @@ func run() int {
}
// Load genesis
- genesisText, err := ioutil.ReadFile(genesisPath)
+ genesisText, err := os.ReadFile(genesisPath)
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot read genesis file %s: %v\n", genesisPath, err)
return 1
@@ -328,12 +327,17 @@ func run() int {
}
currentVersion := config.GetCurrentVersion()
+ var overrides []telemetryspec.NameValue
+ for name, val := range config.GetNonDefaultConfigValues(cfg, startupConfigCheckFields) {
+ overrides = append(overrides, telemetryspec.NameValue{Name: name, Value: val})
+ }
startupDetails := telemetryspec.StartupEventDetails{
Version: currentVersion.String(),
CommitHash: currentVersion.CommitHash,
Branch: currentVersion.Branch,
Channel: currentVersion.Channel,
InstanceHash: crypto.Hash([]byte(absolutePath)).String(),
+ Overrides: overrides,
}
log.EventWithDetails(telemetryspec.ApplicationState, telemetryspec.StartupEvent, startupDetails)
@@ -370,6 +374,30 @@ func run() int {
return 0
}
+var startupConfigCheckFields = []string{
+ "AgreementIncomingBundlesQueueLength",
+ "AgreementIncomingProposalsQueueLength",
+ "AgreementIncomingVotesQueueLength",
+ "BroadcastConnectionsLimit",
+ "CatchupBlockValidateMode",
+ "ConnectionsRateLimitingCount",
+ "ConnectionsRateLimitingWindowSeconds",
+ "GossipFanout",
+ "IncomingConnectionsLimit",
+ "IncomingMessageFilterBucketCount",
+ "IncomingMessageFilterBucketSize",
+ "LedgerSynchronousMode",
+ "MaxAcctLookback",
+ "MaxConnectionsPerIP",
+ "OutgoingMessageFilterBucketCount",
+ "OutgoingMessageFilterBucketSize",
+ "ProposalAssemblyTime",
+ "ReservedFDs",
+ "TxPoolExponentialIncreaseFactor",
+ "TxPoolSize",
+ "VerifiedTranscationsCacheSize",
+}
+
func resolveDataDir() string {
// Figure out what data directory to tell algod to use.
// If not specified on cmdline with '-d', look for default in environment.
diff --git a/cmd/algod/main_test.go b/cmd/algod/main_test.go
index 13fa72092b..c25505167c 100644
--- a/cmd/algod/main_test.go
+++ b/cmd/algod/main_test.go
@@ -18,7 +18,6 @@ package main
import (
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"testing"
@@ -30,7 +29,7 @@ import (
func BenchmarkAlgodStartup(b *testing.B) {
tmpDir := b.TempDir()
- genesisFile, err := ioutil.ReadFile("../../installer/genesis/devnet/genesis.json")
+ genesisFile, err := os.ReadFile("../../installer/genesis/devnet/genesis.json")
require.NoError(b, err)
dataDirectory = &tmpDir
@@ -38,7 +37,7 @@ func BenchmarkAlgodStartup(b *testing.B) {
initAndExit = &bInitAndExit
b.StartTimer()
for n := 0; n < b.N; n++ {
- err := ioutil.WriteFile(filepath.Join(tmpDir, config.GenesisJSONFile), genesisFile, 0766)
+ err := os.WriteFile(filepath.Join(tmpDir, config.GenesisJSONFile), genesisFile, 0766)
require.NoError(b, err)
fmt.Printf("file %s was written\n", filepath.Join(tmpDir, config.GenesisJSONFile))
run()
diff --git a/cmd/algofix/main.go b/cmd/algofix/main.go
index 66585e42c8..09df524928 100644
--- a/cmd/algofix/main.go
+++ b/cmd/algofix/main.go
@@ -13,7 +13,7 @@ import (
"go/parser"
"go/scanner"
"go/token"
- "io/ioutil"
+ "io"
"os"
"os/exec"
"path/filepath"
@@ -135,7 +135,7 @@ func processFile(filename string, useStdin bool) error {
defer f.Close()
}
- src, err := ioutil.ReadAll(f)
+ src, err := io.ReadAll(f)
if err != nil {
return err
}
@@ -209,7 +209,7 @@ func processFile(filename string, useStdin bool) error {
}
fixedSome = true
- return ioutil.WriteFile(f.Name(), newSrc, 0)
+ return os.WriteFile(f.Name(), newSrc, 0)
}
var gofmtBuf bytes.Buffer
@@ -248,7 +248,7 @@ func isGoFile(f os.FileInfo) bool {
}
func writeTempFile(dir, prefix string, data []byte) (string, error) {
- file, err := ioutil.TempFile(dir, prefix)
+ file, err := os.CreateTemp(dir, prefix)
if err != nil {
return "", err
}
diff --git a/cmd/algofix/typecheck.go b/cmd/algofix/typecheck.go
index 4550fe4f98..2b55355a26 100644
--- a/cmd/algofix/typecheck.go
+++ b/cmd/algofix/typecheck.go
@@ -9,7 +9,6 @@ import (
"go/ast"
"go/parser"
"go/token"
- "io/ioutil"
"os"
"os/exec"
"path/filepath"
@@ -161,12 +160,12 @@ func typecheck(cfg *TypeConfig, f *ast.File) (typeof map[interface{}]string, ass
if err != nil {
return err
}
- dir, err := ioutil.TempDir(os.TempDir(), "fix_cgo_typecheck")
+ dir, err := os.MkdirTemp(os.TempDir(), "fix_cgo_typecheck")
if err != nil {
return err
}
defer os.RemoveAll(dir)
- err = ioutil.WriteFile(filepath.Join(dir, "in.go"), txt, 0600)
+ err = os.WriteFile(filepath.Join(dir, "in.go"), txt, 0600)
if err != nil {
return err
}
@@ -175,7 +174,7 @@ func typecheck(cfg *TypeConfig, f *ast.File) (typeof map[interface{}]string, ass
if err != nil {
return err
}
- out, err := ioutil.ReadFile(filepath.Join(dir, "_cgo_gotypes.go"))
+ out, err := os.ReadFile(filepath.Join(dir, "_cgo_gotypes.go"))
if err != nil {
return err
}
diff --git a/cmd/algoh/main.go b/cmd/algoh/main.go
index a458929aa2..4e75ae837b 100644
--- a/cmd/algoh/main.go
+++ b/cmd/algoh/main.go
@@ -19,7 +19,6 @@ package main
import (
"flag"
"fmt"
- "io/ioutil"
"os"
"os/exec"
"os/signal"
@@ -351,8 +350,8 @@ func captureErrorLogs(algohConfig algoh.HostConfig, errorOutput stdCollector, ou
log.EventWithDetails(telemetryspec.HostApplicationState, telemetryspec.ErrorOutputEvent, details)
// Write stdout & stderr streams to disk
- _ = ioutil.WriteFile(filepath.Join(absolutePath, nodecontrol.StdOutFilename), []byte(output.output), os.ModePerm)
- _ = ioutil.WriteFile(filepath.Join(absolutePath, nodecontrol.StdErrFilename), []byte(errorOutput.output), os.ModePerm)
+ _ = os.WriteFile(filepath.Join(absolutePath, nodecontrol.StdOutFilename), []byte(output.output), os.ModePerm)
+ _ = os.WriteFile(filepath.Join(absolutePath, nodecontrol.StdErrFilename), []byte(errorOutput.output), os.ModePerm)
}
if errorCondition && algohConfig.UploadOnError {
fmt.Fprintf(os.Stdout, "Uploading logs...\n")
diff --git a/cmd/algokey/common.go b/cmd/algokey/common.go
index 37f4e1ca47..9362fab74f 100644
--- a/cmd/algokey/common.go
+++ b/cmd/algokey/common.go
@@ -18,7 +18,7 @@ package main
import (
"fmt"
- "io/ioutil"
+ "io"
"os"
"github.com/algorand/go-algorand/crypto"
@@ -63,7 +63,7 @@ func loadMnemonic(mnemonic string) crypto.Seed {
}
func loadKeyfile(keyfile string) crypto.Seed {
- seedbytes, err := ioutil.ReadFile(keyfile)
+ seedbytes, err := os.ReadFile(keyfile)
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot read key seed from %s: %v\n", keyfile, err)
os.Exit(1)
@@ -75,7 +75,7 @@ func loadKeyfile(keyfile string) crypto.Seed {
}
func writePrivateKey(keyfile string, seed crypto.Seed) {
- err := ioutil.WriteFile(keyfile, seed[:], 0600)
+ err := os.WriteFile(keyfile, seed[:], 0600)
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot write key to %s: %v\n", keyfile, err)
os.Exit(1)
@@ -84,7 +84,7 @@ func writePrivateKey(keyfile string, seed crypto.Seed) {
func writePublicKey(pubkeyfile string, checksummed string) {
data := fmt.Sprintf("%s\n", checksummed)
- err := ioutil.WriteFile(pubkeyfile, []byte(data), 0666)
+ err := os.WriteFile(pubkeyfile, []byte(data), 0666)
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot write public key to %s: %v\n", pubkeyfile, err)
os.Exit(1)
@@ -100,7 +100,7 @@ func computeMnemonic(seed crypto.Seed) string {
return mnemonic
}
-// writeFile is a wrapper of ioutil.WriteFile which considers the special
+// writeFile is a wrapper of os.WriteFile which considers the special
// case of stdout filename
func writeFile(filename string, data []byte, perm os.FileMode) error {
var err error
@@ -111,14 +111,14 @@ func writeFile(filename string, data []byte, perm os.FileMode) error {
}
return nil
}
- return ioutil.WriteFile(filename, data, perm)
+ return os.WriteFile(filename, data, perm)
}
-// readFile is a wrapper of ioutil.ReadFile which considers the
+// readFile is a wrapper of os.ReadFile which considers the
// special case of stdin filename
func readFile(filename string) ([]byte, error) {
if filename == stdinFileNameValue {
- return ioutil.ReadAll(os.Stdin)
+ return io.ReadAll(os.Stdin)
}
- return ioutil.ReadFile(filename)
+ return os.ReadFile(filename)
}
diff --git a/cmd/algokey/keyreg.go b/cmd/algokey/keyreg.go
index 24ddafdd1e..156697e7fc 100644
--- a/cmd/algokey/keyreg.go
+++ b/cmd/algokey/keyreg.go
@@ -20,7 +20,6 @@ import (
"encoding/base64"
"errors"
"fmt"
- "io/ioutil"
"os"
"strings"
@@ -75,10 +74,14 @@ func init() {
keyregCmd.Flags().Uint64Var(¶ms.fee, "fee", minFee, "transaction fee")
keyregCmd.Flags().Uint64Var(¶ms.firstValid, "firstvalid", 0, "first round where the transaction may be committed to the ledger")
- keyregCmd.MarkFlagRequired("firstvalid") // nolint:errcheck
+ if err := keyregCmd.MarkFlagRequired("firstvalid"); err != nil {
+ panic(err)
+ }
keyregCmd.Flags().Uint64Var(¶ms.lastValid, "lastvalid", 0, fmt.Sprintf("last round where the generated transaction may be committed to the ledger, defaults to firstvalid + %d", txnLife))
keyregCmd.Flags().StringVar(¶ms.network, "network", "mainnet", "the network where the provided keys will be registered, one of mainnet/testnet/betanet")
- keyregCmd.MarkFlagRequired("network") // nolint:errcheck
+ if err := keyregCmd.MarkFlagRequired("network"); err != nil {
+ panic(err)
+ }
keyregCmd.Flags().BoolVar(¶ms.offline, "offline", false, "set to bring an account offline")
keyregCmd.Flags().StringVarP(¶ms.txFile, "outputFile", "o", "", fmt.Sprintf("write signed transaction to this file, or '%s' to write to stdout", stdoutFilenameValue))
keyregCmd.Flags().StringVar(¶ms.partkeyFile, "keyfile", "", "participation keys to register, file is opened to fetch metadata for the transaction; only specify when bringing an account online to vote in Algorand consensus")
@@ -244,7 +247,7 @@ func run(params keyregCmdParams) error {
return fmt.Errorf("failed to write transaction to stdout: %w", err)
}
} else {
- if err = ioutil.WriteFile(params.txFile, data, 0600); err != nil {
+ if err = os.WriteFile(params.txFile, data, 0600); err != nil {
return fmt.Errorf("failed to write transaction to '%s': %w", params.txFile, err)
}
}
diff --git a/cmd/algokey/multisig.go b/cmd/algokey/multisig.go
index 7c8ae81040..b6d0bb108a 100644
--- a/cmd/algokey/multisig.go
+++ b/cmd/algokey/multisig.go
@@ -19,7 +19,6 @@ package main
import (
"fmt"
"io"
- "io/ioutil"
"os"
"strconv"
"strings"
@@ -66,14 +65,14 @@ var multisigCmd = &cobra.Command{
seed := loadKeyfileOrMnemonic(multisigKeyfile, multisigMnemonic)
key := crypto.GenerateSignatureSecrets(seed)
- txdata, err := ioutil.ReadFile(multisigTxfile)
+ txdata, err := os.ReadFile(multisigTxfile)
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot read transactions from %s: %v\n", multisigTxfile, err)
os.Exit(1)
}
var outBytes []byte
- dec := protocol.NewDecoderBytes(txdata)
+ dec := protocol.NewMsgpDecoderBytes(txdata)
for {
var stxn transactions.SignedTxn
err = dec.Decode(&stxn)
@@ -101,7 +100,7 @@ var multisigCmd = &cobra.Command{
outBytes = append(outBytes, protocol.Encode(&stxn)...)
}
- err = ioutil.WriteFile(multisigOutfile, outBytes, 0600)
+ err = os.WriteFile(multisigOutfile, outBytes, 0600)
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot write signed transactions to %s: %v\n", multisigOutfile, err)
os.Exit(1)
@@ -123,7 +122,7 @@ var appendAuthAddrCmd = &cobra.Command{
}
var outBytes []byte
- dec := protocol.NewDecoderBytes(txdata)
+ dec := protocol.NewMsgpDecoderBytes(txdata)
var stxn transactions.SignedTxn
err = dec.Decode(&stxn)
diff --git a/cmd/algokey/sign.go b/cmd/algokey/sign.go
index 9afa5a3d34..14f14e58b5 100644
--- a/cmd/algokey/sign.go
+++ b/cmd/algokey/sign.go
@@ -19,7 +19,6 @@ package main
import (
"fmt"
"io"
- "io/ioutil"
"os"
"github.com/spf13/cobra"
@@ -52,14 +51,14 @@ var signCmd = &cobra.Command{
seed := loadKeyfileOrMnemonic(signKeyfile, signMnemonic)
key := crypto.GenerateSignatureSecrets(seed)
- txdata, err := ioutil.ReadFile(signTxfile)
+ txdata, err := os.ReadFile(signTxfile)
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot read transactions from %s: %v\n", signTxfile, err)
os.Exit(1)
}
var outBytes []byte
- dec := protocol.NewDecoderBytes(txdata)
+ dec := protocol.NewMsgpDecoderBytes(txdata)
for {
var stxn transactions.SignedTxn
err = dec.Decode(&stxn)
@@ -78,7 +77,7 @@ var signCmd = &cobra.Command{
outBytes = append(outBytes, protocol.Encode(&stxn)...)
}
- err = ioutil.WriteFile(signOutfile, outBytes, 0600)
+ err = os.WriteFile(signOutfile, outBytes, 0600)
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot write signed transactions to %s: %v\n", signOutfile, err)
os.Exit(1)
diff --git a/cmd/algons/dnsCmd.go b/cmd/algons/dnsCmd.go
index c6b6a54f86..a1771414c1 100644
--- a/cmd/algons/dnsCmd.go
+++ b/cmd/algons/dnsCmd.go
@@ -20,7 +20,6 @@ import (
"bufio"
"context"
"fmt"
- "io/ioutil"
"net"
"os"
"regexp"
@@ -477,7 +476,7 @@ func doExportZone(network string, outputFilename string) bool {
return false
}
if outputFilename != "" {
- err = ioutil.WriteFile(outputFilename, exportedZone, 0666)
+ err = os.WriteFile(outputFilename, exportedZone, 0666)
if err != nil {
fmt.Fprintf(os.Stderr, "Unable to write exported zone file : %v\n", err)
return false
diff --git a/cmd/buildtools/genesis.go b/cmd/buildtools/genesis.go
index 98cb60ca67..e1aab257ae 100644
--- a/cmd/buildtools/genesis.go
+++ b/cmd/buildtools/genesis.go
@@ -18,7 +18,6 @@ package main
import (
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"time"
@@ -102,7 +101,7 @@ var timestampCmd = &cobra.Command{
// Write out the genesis file in the same way we do to generate originally
// (see gen/generate.go)
jsonData := protocol.EncodeJSON(genesis)
- err = ioutil.WriteFile(timestampFile, append(jsonData, '\n'), 0666)
+ err = os.WriteFile(timestampFile, append(jsonData, '\n'), 0666)
if err != nil {
reportErrorf("Error saving genesis file '%s': %v\n", timestampFile, err)
}
@@ -117,7 +116,7 @@ var dumpGenesisIDCmd = &cobra.Command{
Short: "Dump the genesis ID for the specified genesis file",
Run: func(cmd *cobra.Command, args []string) {
// Load genesis
- genesisText, err := ioutil.ReadFile(genesisFile)
+ genesisText, err := os.ReadFile(genesisFile)
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot read genesis file %s: %v\n", genesisFile, err)
os.Exit(1)
@@ -139,7 +138,7 @@ var dumpGenesisHashCmd = &cobra.Command{
Short: "Dump the genesis Hash for the specified genesis file",
Run: func(cmd *cobra.Command, args []string) {
// Load genesis
- genesisText, err := ioutil.ReadFile(genesisFile)
+ genesisText, err := os.ReadFile(genesisFile)
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot read genesis file %s: %v\n", genesisFile, err)
os.Exit(1)
@@ -206,7 +205,7 @@ var ensureCmd = &cobra.Command{
} else {
// Write source genesis (now updated with release timestamp, if applicable)
jsonData := protocol.EncodeJSON(sourceGenesis)
- err = ioutil.WriteFile(targetFile, jsonData, 0666)
+ err = os.WriteFile(targetFile, jsonData, 0666)
if err != nil {
reportErrorf("Error writing target genesis file '%s': %v\n", targetFile, err)
}
@@ -231,13 +230,13 @@ func ensureReleaseGenesis(src bookkeeping.Genesis, releaseFile string) (err erro
releaseGenesis = src
jsonData := protocol.EncodeJSON(releaseGenesis)
- err = ioutil.WriteFile(releaseFile, jsonData, 0666)
+ err = os.WriteFile(releaseFile, jsonData, 0666)
if err != nil {
return fmt.Errorf("error saving file: %v", err)
}
hash := releaseGenesis.Hash()
- err = ioutil.WriteFile(releaseFileHash, []byte(hash.String()), 0666)
+ err = os.WriteFile(releaseFileHash, []byte(hash.String()), 0666)
if err != nil {
return fmt.Errorf("error saving hash file '%s': %v", releaseFileHash, err)
}
@@ -278,7 +277,7 @@ func verifyGenesisHashes(src, release bookkeeping.Genesis, hashFile string) (err
return fmt.Errorf("source and release hashes differ - genesis.json may have diverge from released version")
}
- relHashBytes, err := ioutil.ReadFile(hashFile)
+ relHashBytes, err := os.ReadFile(hashFile)
if err != nil {
return fmt.Errorf("error loading release hash file '%s'", hashFile)
}
diff --git a/cmd/catchpointdump/net.go b/cmd/catchpointdump/net.go
index 9e8dc45611..9073ece66e 100644
--- a/cmd/catchpointdump/net.go
+++ b/cmd/catchpointdump/net.go
@@ -303,7 +303,9 @@ func deleteLedgerFiles(deleteTracker bool) error {
func loadAndDump(addr string, tarFile string, genesisInitState ledgercore.InitState) error {
// delete current ledger files.
- deleteLedgerFiles(true)
+ if err := deleteLedgerFiles(true); err != nil {
+ reportWarnf("Error deleting ledger files: %v", err)
+ }
cfg := config.GetDefaultLocal()
l, err := ledger.OpenLedger(logging.Base(), "./ledger", false, genesisInitState, cfg)
if err != nil {
@@ -311,7 +313,11 @@ func loadAndDump(addr string, tarFile string, genesisInitState ledgercore.InitSt
return err
}
- defer deleteLedgerFiles(!loadOnly)
+ defer func() {
+ if err := deleteLedgerFiles(!loadOnly); err != nil {
+ reportWarnf("Error deleting ledger files: %v", err)
+ }
+ }()
defer l.Close()
catchupAccessor := ledger.MakeCatchpointCatchupAccessor(l, logging.Base())
diff --git a/cmd/catchupsrv/download.go b/cmd/catchupsrv/download.go
index 813da9d03b..6a5880d42a 100644
--- a/cmd/catchupsrv/download.go
+++ b/cmd/catchupsrv/download.go
@@ -20,7 +20,7 @@ import (
"context"
"flag"
"fmt"
- "io/ioutil"
+ "io"
"net"
"net/http"
"os"
@@ -138,7 +138,7 @@ func fetchBlock(server string, blk uint64) error {
return fmt.Errorf("HTTP response: %s", resp.Status)
}
- body, err := ioutil.ReadAll(resp.Body)
+ body, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
@@ -156,7 +156,7 @@ func fetchBlock(server string, blk uint64) error {
panic(err)
}
- return ioutil.WriteFile(fn, body, 0666)
+ return os.WriteFile(fn, body, 0666)
}
func fetcher(server string, wg *sync.WaitGroup) {
diff --git a/cmd/catchupsrv/main.go b/cmd/catchupsrv/main.go
index 86fd9645af..1f0de542aa 100644
--- a/cmd/catchupsrv/main.go
+++ b/cmd/catchupsrv/main.go
@@ -20,7 +20,6 @@ import (
"encoding/base64"
"flag"
"fmt"
- "io/ioutil"
"math/rand"
"net/http"
"os"
@@ -118,7 +117,7 @@ func main() {
var data []byte
if *dirFlag != "" {
blkPath := blockToPath(roundNumber)
- data, err = ioutil.ReadFile(
+ data, err = os.ReadFile(
path.Join(
*dirFlag,
"v"+versionStr,
diff --git a/cmd/dbgen/main.go b/cmd/dbgen/main.go
index 078f30a3b6..f73809ed76 100644
--- a/cmd/dbgen/main.go
+++ b/cmd/dbgen/main.go
@@ -21,7 +21,7 @@ package main
import (
"flag"
"fmt"
- "io/ioutil"
+ "os"
"strings"
"time"
)
@@ -59,13 +59,13 @@ func main() {
if *inputfilename == "" {
panic("error: No database schema file specified")
}
- input, err := ioutil.ReadFile(*inputfilename)
+ input, err := os.ReadFile(*inputfilename)
if err != nil {
panic(err)
}
header := ""
if *headerfilename != "" {
- headerBytes, err := ioutil.ReadFile(*headerfilename)
+ headerBytes, err := os.ReadFile(*headerfilename)
if err != nil {
panic(err)
}
@@ -78,7 +78,7 @@ func main() {
if *outputfilename == "" {
fmt.Println(payload)
} else {
- err := ioutil.WriteFile(*outputfilename, []byte(payload), 0666)
+ err := os.WriteFile(*outputfilename, []byte(payload), 0666)
if err != nil {
panic(err)
}
diff --git a/cmd/dispenser/server.go b/cmd/dispenser/server.go
index 39ecb41841..d4ec0b5b87 100644
--- a/cmd/dispenser/server.go
+++ b/cmd/dispenser/server.go
@@ -21,7 +21,7 @@ import (
"encoding/json"
"flag"
"fmt"
- "io/ioutil"
+ "io"
"log"
"net/http"
"net/url"
@@ -150,7 +150,7 @@ func (cfg dispenserSiteConfig) checkRecaptcha(remoteip, response string) (r reca
}
defer resp.Body.Close()
- body, err := ioutil.ReadAll(resp.Body)
+ body, err := io.ReadAll(resp.Body)
if err != nil {
return
}
@@ -219,7 +219,7 @@ func main() {
os.Exit(1)
}
- configText, err := ioutil.ReadFile(*configFile)
+ configText, err := os.ReadFile(*configFile)
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot read config file (%s): %v\n", *configFile, err)
os.Exit(1)
@@ -237,7 +237,7 @@ func main() {
var hosts []string
for h, cfg := range configMap {
// Make a cache dir for wallet handle tokens
- cacheDir, err := ioutil.TempDir("", "dispenser")
+ cacheDir, err := os.MkdirTemp("", "dispenser")
if err != nil {
fmt.Fprintf(os.Stderr, "Cannot make temp dir: %v\n", err)
os.Exit(1)
diff --git a/cmd/goal/account.go b/cmd/goal/account.go
index 6cd3b9a87e..62f1b08c80 100644
--- a/cmd/goal/account.go
+++ b/cmd/goal/account.go
@@ -20,7 +20,6 @@ import (
"bufio"
"encoding/base64"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"sort"
@@ -540,9 +539,7 @@ func printAccountInfo(client libgoal.Client, address string, onlyShowAssetIds bo
var createdAssets []generatedV2.Asset
if account.CreatedAssets != nil {
createdAssets = make([]generatedV2.Asset, len(*account.CreatedAssets))
- for i, asset := range *account.CreatedAssets {
- createdAssets[i] = asset
- }
+ copy(createdAssets, *account.CreatedAssets)
sort.Slice(createdAssets, func(i, j int) bool {
return createdAssets[i].Index < createdAssets[j].Index
})
@@ -551,9 +548,7 @@ func printAccountInfo(client libgoal.Client, address string, onlyShowAssetIds bo
var heldAssets []generatedV2.AssetHolding
if account.Assets != nil {
heldAssets = make([]generatedV2.AssetHolding, len(*account.Assets))
- for i, assetHolding := range *account.Assets {
- heldAssets[i] = assetHolding
- }
+ copy(heldAssets, *account.Assets)
sort.Slice(heldAssets, func(i, j int) bool {
return heldAssets[i].AssetId < heldAssets[j].AssetId
})
@@ -562,9 +557,7 @@ func printAccountInfo(client libgoal.Client, address string, onlyShowAssetIds bo
var createdApps []generatedV2.Application
if account.CreatedApps != nil {
createdApps = make([]generatedV2.Application, len(*account.CreatedApps))
- for i, app := range *account.CreatedApps {
- createdApps[i] = app
- }
+ copy(createdApps, *account.CreatedApps)
sort.Slice(createdApps, func(i, j int) bool {
return createdApps[i].Id < createdApps[j].Id
})
@@ -573,9 +566,7 @@ func printAccountInfo(client libgoal.Client, address string, onlyShowAssetIds bo
var optedInApps []generatedV2.ApplicationLocalState
if account.AppsLocalState != nil {
optedInApps = make([]generatedV2.ApplicationLocalState, len(*account.AppsLocalState))
- for i, appLocalState := range *account.AppsLocalState {
- optedInApps[i] = appLocalState
- }
+ copy(optedInApps, *account.AppsLocalState)
sort.Slice(optedInApps, func(i, j int) bool {
return optedInApps[i].Id < optedInApps[j].Id
})
@@ -1299,7 +1290,7 @@ var importRootKeysCmd = &cobra.Command{
}
keyDir := filepath.Join(dataDir, genID)
- files, err := ioutil.ReadDir(keyDir)
+ files, err := os.ReadDir(keyDir)
if err != nil {
return
}
@@ -1483,7 +1474,7 @@ func listParticipationKeyFiles(c *libgoal.Client) (partKeyFiles map[string]algod
// Get a list of files in the participation keys directory
keyDir := filepath.Join(c.DataDir(), genID)
- files, err := ioutil.ReadDir(keyDir)
+ files, err := os.ReadDir(keyDir)
if err != nil {
return
}
diff --git a/cmd/goal/accountsList.go b/cmd/goal/accountsList.go
index 56de35deb8..dc646ffb00 100644
--- a/cmd/goal/accountsList.go
+++ b/cmd/goal/accountsList.go
@@ -19,7 +19,6 @@ package main
import (
"encoding/json"
"fmt"
- "io/ioutil"
"os"
"os/user"
"path/filepath"
@@ -184,7 +183,7 @@ func (accountList *AccountsList) getNameByAddress(address string) string {
func (accountList *AccountsList) dumpList() {
accountsListJSON, _ := json.MarshalIndent(accountList, "", " ")
accountsListJSON = append(accountsListJSON, '\n')
- err := ioutil.WriteFile(accountList.accountListFileName(), accountsListJSON, 0644)
+ err := os.WriteFile(accountList.accountListFileName(), accountsListJSON, 0644)
if err != nil {
log.Error(err.Error())
@@ -197,7 +196,7 @@ func (accountList *AccountsList) loadList() {
// First, check if the file exists.
filename := accountList.accountListFileName()
if _, err := os.Stat(filename); err == nil {
- raw, err := ioutil.ReadFile(filename)
+ raw, err := os.ReadFile(filename)
if err != nil {
log.Error(err.Error())
}
diff --git a/cmd/goal/application.go b/cmd/goal/application.go
index 884c3d6e68..eaa7de91eb 100644
--- a/cmd/goal/application.go
+++ b/cmd/goal/application.go
@@ -23,6 +23,7 @@ import (
"encoding/base64"
"encoding/binary"
"encoding/hex"
+ "encoding/json"
"errors"
"fmt"
"net/http"
@@ -32,9 +33,9 @@ import (
"github.com/spf13/cobra"
+ "github.com/algorand/avm-abi/abi"
"github.com/algorand/go-algorand/crypto"
apiclient "github.com/algorand/go-algorand/daemon/algod/api/client"
- "github.com/algorand/go-algorand/data/abi"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
@@ -188,9 +189,15 @@ func init() {
infoAppCmd.MarkFlagRequired("app-id")
- methodAppCmd.MarkFlagRequired("method") // nolint:errcheck // follow previous required flag format
- methodAppCmd.MarkFlagRequired("from") // nolint:errcheck
- methodAppCmd.Flags().MarkHidden("app-arg") // nolint:errcheck
+ panicIfErr(methodAppCmd.MarkFlagRequired("method"))
+ panicIfErr(methodAppCmd.MarkFlagRequired("from"))
+ panicIfErr(appCmd.PersistentFlags().MarkHidden("app-arg"))
+}
+
+func panicIfErr(err error) {
+ if err != nil {
+ panic(err)
+ }
}
type appCallArg struct {
@@ -1169,6 +1176,76 @@ func populateMethodCallReferenceArgs(sender string, currentApp uint64, types []s
return resolvedIndexes, nil
}
+// maxAppArgs is the maximum number of arguments for an application call transaction, in compliance
+// with ARC-4. Currently this is the same as the MaxAppArgs consensus parameter, but the
+// difference is that the consensus parameter is liable to change in a future consensus upgrade.
+// However, the ARC-4 ABI argument encoding **MUST** always remain the same.
+const maxAppArgs = 16
+
+// The tuple threshold is maxAppArgs, minus 1 for the method selector in the first app arg,
+// minus 1 for the final app argument becoming a tuple of the remaining method args
+const methodArgsTupleThreshold = maxAppArgs - 2
+
+// parseArgJSONtoByteSlice convert input method arguments to ABI encoded bytes
+// it converts funcArgTypes into a tuple type and apply changes over input argument string (in JSON format)
+// if there are greater or equal to 15 inputs, then we compact the tailing inputs into one tuple
+func parseMethodArgJSONtoByteSlice(argTypes []string, jsonArgs []string, applicationArgs *[][]byte) error {
+ abiTypes := make([]abi.Type, len(argTypes))
+ for i, typeString := range argTypes {
+ abiType, err := abi.TypeOf(typeString)
+ if err != nil {
+ return err
+ }
+ abiTypes[i] = abiType
+ }
+
+ if len(abiTypes) != len(jsonArgs) {
+ return fmt.Errorf("input argument number %d != method argument number %d", len(jsonArgs), len(abiTypes))
+ }
+
+ // Up to 16 app arguments can be passed to app call. First is reserved for method selector,
+ // and the rest are for method call arguments. But if more than 15 method call arguments
+ // are present, then the method arguments after the 14th are placed in a tuple in the last
+ // app argument slot
+ if len(abiTypes) > maxAppArgs-1 {
+ typesForTuple := make([]abi.Type, len(abiTypes)-methodArgsTupleThreshold)
+ copy(typesForTuple, abiTypes[methodArgsTupleThreshold:])
+
+ compactedType, err := abi.MakeTupleType(typesForTuple)
+ if err != nil {
+ return err
+ }
+
+ abiTypes = append(abiTypes[:methodArgsTupleThreshold], compactedType)
+
+ tupleValues := make([]json.RawMessage, len(jsonArgs)-methodArgsTupleThreshold)
+ for i, jsonArg := range jsonArgs[methodArgsTupleThreshold:] {
+ tupleValues[i] = []byte(jsonArg)
+ }
+
+ remainingJSON, err := json.Marshal(tupleValues)
+ if err != nil {
+ return err
+ }
+
+ jsonArgs = append(jsonArgs[:methodArgsTupleThreshold], string(remainingJSON))
+ }
+
+ // parse JSON value to ABI encoded bytes
+ for i := 0; i < len(jsonArgs); i++ {
+ interfaceVal, err := abiTypes[i].UnmarshalFromJSON([]byte(jsonArgs[i]))
+ if err != nil {
+ return err
+ }
+ abiEncoded, err := abiTypes[i].Encode(interfaceVal)
+ if err != nil {
+ return err
+ }
+ *applicationArgs = append(*applicationArgs, abiEncoded)
+ }
+ return nil
+}
+
var methodAppCmd = &cobra.Command{
Use: "method",
Short: "Invoke an ABI method",
@@ -1284,7 +1361,7 @@ var methodAppCmd = &cobra.Command{
basicArgValues[basicArgIndex] = strconv.Itoa(resolved)
}
- err = abi.ParseArgJSONtoByteSlice(basicArgTypes, basicArgValues, &applicationArgs)
+ err = parseMethodArgJSONtoByteSlice(basicArgTypes, basicArgValues, &applicationArgs)
if err != nil {
reportErrorf("cannot parse arguments to ABI encoding: %v", err)
}
diff --git a/cmd/goal/application_test.go b/cmd/goal/application_test.go
new file mode 100644
index 0000000000..7de23a5be0
--- /dev/null
+++ b/cmd/goal/application_test.go
@@ -0,0 +1,143 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see .
+
+package main
+
+import (
+ "fmt"
+ "testing"
+
+ "github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/stretchr/testify/require"
+)
+
+func TestParseMethodArgJSONtoByteSlice(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ makeRepeatSlice := func(size int, value string) []string {
+ slice := make([]string, size)
+ for i := range slice {
+ slice[i] = value
+ }
+ return slice
+ }
+
+ tests := []struct {
+ argTypes []string
+ jsonArgs []string
+ expectedAppArgs [][]byte
+ }{
+ {
+ argTypes: []string{},
+ jsonArgs: []string{},
+ expectedAppArgs: [][]byte{},
+ },
+ {
+ argTypes: []string{"uint8"},
+ jsonArgs: []string{"100"},
+ expectedAppArgs: [][]byte{{100}},
+ },
+ {
+ argTypes: []string{"uint8", "uint16"},
+ jsonArgs: []string{"100", "65535"},
+ expectedAppArgs: [][]byte{{100}, {255, 255}},
+ },
+ {
+ argTypes: makeRepeatSlice(15, "string"),
+ jsonArgs: []string{
+ `"a"`,
+ `"b"`,
+ `"c"`,
+ `"d"`,
+ `"e"`,
+ `"f"`,
+ `"g"`,
+ `"h"`,
+ `"i"`,
+ `"j"`,
+ `"k"`,
+ `"l"`,
+ `"m"`,
+ `"n"`,
+ `"o"`,
+ },
+ expectedAppArgs: [][]byte{
+ {00, 01, 97},
+ {00, 01, 98},
+ {00, 01, 99},
+ {00, 01, 100},
+ {00, 01, 101},
+ {00, 01, 102},
+ {00, 01, 103},
+ {00, 01, 104},
+ {00, 01, 105},
+ {00, 01, 106},
+ {00, 01, 107},
+ {00, 01, 108},
+ {00, 01, 109},
+ {00, 01, 110},
+ {00, 01, 111},
+ },
+ },
+ {
+ argTypes: makeRepeatSlice(16, "string"),
+ jsonArgs: []string{
+ `"a"`,
+ `"b"`,
+ `"c"`,
+ `"d"`,
+ `"e"`,
+ `"f"`,
+ `"g"`,
+ `"h"`,
+ `"i"`,
+ `"j"`,
+ `"k"`,
+ `"l"`,
+ `"m"`,
+ `"n"`,
+ `"o"`,
+ `"p"`,
+ },
+ expectedAppArgs: [][]byte{
+ {00, 01, 97},
+ {00, 01, 98},
+ {00, 01, 99},
+ {00, 01, 100},
+ {00, 01, 101},
+ {00, 01, 102},
+ {00, 01, 103},
+ {00, 01, 104},
+ {00, 01, 105},
+ {00, 01, 106},
+ {00, 01, 107},
+ {00, 01, 108},
+ {00, 01, 109},
+ {00, 01, 110},
+ {00, 04, 00, 07, 00, 01, 111, 00, 01, 112},
+ },
+ },
+ }
+
+ for i, test := range tests {
+ t.Run(fmt.Sprintf("index=%d", i), func(t *testing.T) {
+ applicationArgs := [][]byte{}
+ err := parseMethodArgJSONtoByteSlice(test.argTypes, test.jsonArgs, &applicationArgs)
+ require.NoError(t, err)
+ require.Equal(t, test.expectedAppArgs, applicationArgs)
+ })
+ }
+}
diff --git a/cmd/goal/clerk.go b/cmd/goal/clerk.go
index 2be5ff3322..8ab3293267 100644
--- a/cmd/goal/clerk.go
+++ b/cmd/goal/clerk.go
@@ -544,7 +544,7 @@ var rawsendCmd = &cobra.Command{
reportErrorf(fileReadError, txFilename, err)
}
- dec := protocol.NewDecoderBytes(data)
+ dec := protocol.NewMsgpDecoderBytes(data)
client := ensureAlgodClient(ensureSingleDataDir())
txnIDs := make(map[transactions.Txid]transactions.SignedTxn)
@@ -673,7 +673,7 @@ var inspectCmd = &cobra.Command{
reportErrorf(fileReadError, txFilename, err)
}
- dec := protocol.NewDecoderBytes(data)
+ dec := protocol.NewMsgpDecoderBytes(data)
count := 0
for {
var txn transactions.SignedTxn
@@ -773,7 +773,7 @@ var signCmd = &cobra.Command{
}
var outData []byte
- dec := protocol.NewDecoderBytes(data)
+ dec := protocol.NewMsgpDecoderBytes(data)
// read the entire file and prepare in-memory copy of each signed transaction, with grouping.
txnGroups := make(map[crypto.Digest][]*transactions.SignedTxn)
var groupsOrder []crypto.Digest
@@ -868,7 +868,7 @@ var groupCmd = &cobra.Command{
reportErrorf(fileReadError, txFilename, err)
}
- dec := protocol.NewDecoderBytes(data)
+ dec := protocol.NewMsgpDecoderBytes(data)
var stxns []transactions.SignedTxn
var group transactions.TxGroup
@@ -920,7 +920,7 @@ var splitCmd = &cobra.Command{
reportErrorf(fileReadError, txFilename, err)
}
- dec := protocol.NewDecoderBytes(data)
+ dec := protocol.NewMsgpDecoderBytes(data)
var txns []transactions.SignedTxn
for {
@@ -1120,7 +1120,7 @@ var dryrunCmd = &cobra.Command{
if err != nil {
reportErrorf(fileReadError, txFilename, err)
}
- dec := protocol.NewDecoderBytes(data)
+ dec := protocol.NewMsgpDecoderBytes(data)
stxns := make([]transactions.SignedTxn, 0, 10)
for {
var txn transactions.SignedTxn
diff --git a/cmd/goal/commands.go b/cmd/goal/commands.go
index 4f93b6fb4b..c6103d259e 100644
--- a/cmd/goal/commands.go
+++ b/cmd/goal/commands.go
@@ -19,7 +19,6 @@ package main
import (
"fmt"
"io"
- "io/ioutil"
"os"
"os/exec"
"os/user"
@@ -237,7 +236,7 @@ var protoCmd = &cobra.Command{
func readGenesis(dataDir string) (genesis bookkeeping.Genesis, err error) {
path := filepath.Join(dataDir, config.GenesisJSONFile)
- genesisText, err := ioutil.ReadFile(path)
+ genesisText, err := os.ReadFile(path)
if err != nil {
return
}
@@ -564,7 +563,7 @@ func reportErrorf(format string, args ...interface{}) {
reportErrorln(fmt.Sprintf(format, args...))
}
-// writeFile is a wrapper of ioutil.WriteFile which considers the special
+// writeFile is a wrapper of os.WriteFile which considers the special
// case of stdout filename
func writeFile(filename string, data []byte, perm os.FileMode) error {
var err error
@@ -575,7 +574,7 @@ func writeFile(filename string, data []byte, perm os.FileMode) error {
}
return nil
}
- return ioutil.WriteFile(filename, data, perm)
+ return os.WriteFile(filename, data, perm)
}
// writeDryrunReqToFile creates dryrun request object and writes to a file
@@ -593,13 +592,13 @@ func writeDryrunReqToFile(client libgoal.Client, txnOrStxn interface{}, outFilen
return
}
-// readFile is a wrapper of ioutil.ReadFile which considers the
+// readFile is a wrapper of os.ReadFile which considers the
// special case of stdin filename
func readFile(filename string) ([]byte, error) {
if filename == stdinFileNameValue {
- return ioutil.ReadAll(os.Stdin)
+ return io.ReadAll(os.Stdin)
}
- return ioutil.ReadFile(filename)
+ return os.ReadFile(filename)
}
func checkTxValidityPeriodCmdFlags(cmd *cobra.Command) {
diff --git a/cmd/goal/multisig.go b/cmd/goal/multisig.go
index 6e55fcc9e1..643b335d0e 100644
--- a/cmd/goal/multisig.go
+++ b/cmd/goal/multisig.go
@@ -19,7 +19,6 @@ package main
import (
"fmt"
"io"
- "io/ioutil"
"os"
"github.com/spf13/cobra"
@@ -96,7 +95,7 @@ var addSigCmd = &cobra.Command{
wh, pw := ensureWalletHandleMaybePassword(dataDir, walletName, true)
var outData []byte
- dec := protocol.NewDecoderBytes(data)
+ dec := protocol.NewMsgpDecoderBytes(data)
for {
var stxn transactions.SignedTxn
err = dec.Decode(&stxn)
@@ -240,12 +239,12 @@ var mergeSigCmd = &cobra.Command{
var txnLists [][]transactions.SignedTxn
for _, arg := range args {
- data, err := ioutil.ReadFile(arg)
+ data, err := os.ReadFile(arg)
if err != nil {
reportErrorf(fileReadError, arg, err)
}
- dec := protocol.NewDecoderBytes(data)
+ dec := protocol.NewMsgpDecoderBytes(data)
var txns []transactions.SignedTxn
for {
var txn transactions.SignedTxn
diff --git a/cmd/goal/node.go b/cmd/goal/node.go
index 1624603e34..68654055aa 100644
--- a/cmd/goal/node.go
+++ b/cmd/goal/node.go
@@ -23,7 +23,7 @@ import (
"encoding/json"
"errors"
"fmt"
- "io/ioutil"
+ "io"
"net"
"net/http"
"os"
@@ -131,7 +131,7 @@ func getMissingCatchpointLabel(URL string) (label string, err error) {
err = errors.New(resp.Status)
return
}
- body, err := ioutil.ReadAll(resp.Body)
+ body, err := io.ReadAll(resp.Body)
if err != nil {
return
}
@@ -648,7 +648,7 @@ var createCmd = &cobra.Command{
}
// copy genesis block to destination
- err = ioutil.WriteFile(destPath, genesisContent, 0644)
+ err = os.WriteFile(destPath, genesisContent, 0644)
if err != nil {
reportErrorf(errorNodeCreation, err)
}
diff --git a/cmd/goal/tealsign.go b/cmd/goal/tealsign.go
index 3d35624a1d..5ff7617d06 100644
--- a/cmd/goal/tealsign.go
+++ b/cmd/goal/tealsign.go
@@ -19,7 +19,7 @@ package main
import (
"encoding/base32"
"encoding/base64"
- "io/ioutil"
+ "os"
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/data/basics"
@@ -83,7 +83,7 @@ The base64 encoding of the signature will always be printed to stdout. Optionall
var kdata []byte
var err error
if keyFilename != "" {
- kdata, err = ioutil.ReadFile(keyFilename)
+ kdata, err = os.ReadFile(keyFilename)
if err != nil {
reportErrorf(tealsignKeyfileFail, err)
}
@@ -123,7 +123,7 @@ The base64 encoding of the signature will always be printed to stdout. Optionall
if lsigTxnFilename != "" {
// If passed a SignedTxn with a logic sig, compute
// the hash of the program within the logic sig
- stxnBytes, err := ioutil.ReadFile(lsigTxnFilename)
+ stxnBytes, err := os.ReadFile(lsigTxnFilename)
if err != nil {
reportErrorf(fileReadError, lsigTxnFilename, err)
}
@@ -159,7 +159,7 @@ The base64 encoding of the signature will always be printed to stdout. Optionall
var dataToSign []byte
if dataFile != "" {
- dataToSign, err = ioutil.ReadFile(dataFile)
+ dataToSign, err = os.ReadFile(dataFile)
if err != nil {
reportErrorf(tealsignParseData, err)
}
diff --git a/cmd/loadgenerator/main.go b/cmd/loadgenerator/main.go
index 25026f7f90..37e1134719 100644
--- a/cmd/loadgenerator/main.go
+++ b/cmd/loadgenerator/main.go
@@ -20,7 +20,6 @@ import (
"flag"
"fmt"
"io/fs"
- "io/ioutil"
"net/url"
"os"
"path/filepath"
@@ -66,9 +65,9 @@ func loadMnemonic(mnemonic string) crypto.Seed {
// Like shared/pingpong/accounts.go
func findRootKeys(algodDir string) []*crypto.SignatureSecrets {
keylist := make([]*crypto.SignatureSecrets, 0, 5)
- err := filepath.Walk(algodDir, func(path string, info fs.FileInfo, err error) error {
+ err := filepath.Walk(algodDir, func(path string, info fs.FileInfo, _ error) error {
var handle db.Accessor
- handle, err = db.MakeErasableAccessor(path)
+ handle, err := db.MakeErasableAccessor(path)
if err != nil {
return nil // don't care, move on
}
@@ -107,10 +106,10 @@ func main() {
if (cfg.ClientURL == nil || cfg.ClientURL.String() == "") || cfg.APIToken == "" {
if algodDir != "" {
path := filepath.Join(algodDir, "algod.net")
- net, err := ioutil.ReadFile(path)
+ net, err := os.ReadFile(path)
maybefail(err, "%s: %v\n", path, err)
path = filepath.Join(algodDir, "algod.token")
- token, err := ioutil.ReadFile(path)
+ token, err := os.ReadFile(path)
maybefail(err, "%s: %v\n", path, err)
cfg.ClientURL, err = url.Parse(fmt.Sprintf("http://%s", string(strings.TrimSpace(string(net)))))
maybefail(err, "bad net url %v\n", err)
@@ -126,8 +125,9 @@ func main() {
var publicKeys []basics.Address
addKey := func(mnemonic string) {
seed := loadMnemonic(mnemonic)
- privateKeys = append(privateKeys, crypto.GenerateSignatureSecrets(seed))
- publicKeys = append(publicKeys, basics.Address(privateKeys[0].SignatureVerifier))
+ secrets := crypto.GenerateSignatureSecrets(seed)
+ privateKeys = append(privateKeys, secrets)
+ publicKeys = append(publicKeys, basics.Address(secrets.SignatureVerifier))
}
if cfg.AccountMnemonic != "" { // one mnemonic provided
addKey(cfg.AccountMnemonic)
@@ -241,7 +241,7 @@ func generateTransactions(restClient client.RestClient, cfg config, privateKeys
sendSize = transactionBlockSize
}
// create sendSize transaction to send.
- txns := make([]transactions.SignedTxn, sendSize, sendSize)
+ txns := make([]transactions.SignedTxn, sendSize)
for i := range txns {
tx := transactions.Transaction{
Header: transactions.Header{
@@ -289,7 +289,7 @@ func generateTransactions(restClient client.RestClient, cfg config, privateKeys
for i := 0; i < nroutines; i++ {
totalSent += sent[i]
}
- dt := time.Now().Sub(start)
+ dt := time.Since(start)
fmt.Fprintf(os.Stdout, "sent %d/%d in %s (%.1f/s)\n", totalSent, sendSize, dt.String(), float64(totalSent)/dt.Seconds())
if cfg.TxnsToSend != 0 {
// We attempted what we were asked. We're done.
diff --git a/cmd/nodecfg/apply.go b/cmd/nodecfg/apply.go
index 70008ff9bc..77e302c7b6 100644
--- a/cmd/nodecfg/apply.go
+++ b/cmd/nodecfg/apply.go
@@ -18,7 +18,6 @@ package main
import (
"fmt"
- "io/ioutil"
"os"
"path/filepath"
@@ -101,7 +100,7 @@ func doApply(rootDir string, rootNodeDir, channel string, hostName string, dnsNa
// If config doesn't already exist, download it to specified root dir
if missing {
fmt.Fprintf(os.Stdout, "Configuration rootdir not specified - downloading latest version...\n")
- rootDir, err = ioutil.TempDir("", channel)
+ rootDir, err = os.MkdirTemp("", channel)
if err != nil {
return fmt.Errorf("error creating temp dir for extracting config package: %v", err)
}
diff --git a/cmd/opdoc/opdoc.go b/cmd/opdoc/opdoc.go
index 94a394bbca..226d87a78b 100644
--- a/cmd/opdoc/opdoc.go
+++ b/cmd/opdoc/opdoc.go
@@ -28,7 +28,7 @@ import (
"github.com/algorand/go-algorand/protocol"
)
-var docVersion = 7
+var docVersion = 8
func opGroupMarkdownTable(names []string, out io.Writer) {
fmt.Fprint(out, `| Opcode | Description |
diff --git a/cmd/pingpong/runCmd.go b/cmd/pingpong/runCmd.go
index 779ed3f29e..9df0052ac1 100644
--- a/cmd/pingpong/runCmd.go
+++ b/cmd/pingpong/runCmd.go
@@ -19,12 +19,14 @@ package main
import (
"context"
"encoding/base64"
+ "encoding/json"
"fmt"
- "io/ioutil"
+ "math/rand"
"os"
"path/filepath"
"runtime/pprof"
"strconv"
+ "strings"
"time"
"github.com/spf13/cobra"
@@ -68,16 +70,21 @@ var rekey bool
var nftAsaPerSecond uint32
var pidFile string
var cpuprofile string
+var randSeed int64
+var deterministicKeys bool
+var generatedAccountsCount uint32
+var generatedAccountSampleMethod string
+var configPath string
func init() {
rootCmd.AddCommand(runCmd)
runCmd.PersistentFlags().StringVarP(&dataDir, "datadir", "d", "", "Data directory for the node")
- runCmd.Flags().StringVarP(&srcAddress, "src", "s", "", "Account address to use as funding source for new accounts)")
+ runCmd.Flags().StringVarP(&srcAddress, "src", "s", "", "Account address to use as funding source for new accounts")
runCmd.Flags().Uint32VarP(&numAccounts, "numaccounts", "n", 0, "The number of accounts to include in the transfers")
runCmd.Flags().Uint64VarP(&maxAmount, "ma", "a", 0, "The (max) amount to be transferred")
runCmd.Flags().Uint64VarP(&minAccountFunds, "minaccount", "", 0, "The minimum amount to fund a test account with")
- runCmd.Flags().Uint64VarP(&txnPerSec, "tps", "t", 200, "Number of Txn per second that pingpong sends")
+ runCmd.Flags().Uint64VarP(&txnPerSec, "tps", "t", 0, "Number of Txn per second that pingpong sends")
runCmd.Flags().Int64VarP(&maxFee, "mf", "f", -1, "The MAX fee to be used for transactions, a value of '0' tells the server to use a suggested fee.")
runCmd.Flags().Uint64VarP(&minFee, "minf", "m", 1000, "The MIN fee to be used for randomFee transactions")
runCmd.Flags().BoolVar(&randomAmount, "ra", false, "Set to enable random amounts (up to maxamount)")
@@ -88,6 +95,7 @@ func init() {
runCmd.Flags().StringVar(&runTime, "run", "", "Duration of time (seconds) to run transfers before resting (0 means non-stop)")
runCmd.Flags().StringVar(&refreshTime, "refresh", "", "Duration of time (seconds) between refilling accounts with money (0 means no refresh)")
runCmd.Flags().StringVar(&logicProg, "program", "", "File containing the compiled program to include as a logic sig")
+ runCmd.Flags().StringVar(&configPath, "config", "", "path to read config json from, or json literal")
runCmd.Flags().BoolVar(&saveConfig, "save", false, "Save the effective configuration to disk")
runCmd.Flags().BoolVar(&useDefault, "reset", false, "Reset to the default configuration (not read from disk)")
runCmd.Flags().BoolVar(&quietish, "quiet", false, "quietish stdout logging")
@@ -108,6 +116,10 @@ func init() {
runCmd.Flags().Uint32Var(&nftAsaPerSecond, "nftasapersecond", 0, "The number of NFT-style ASAs to create per second")
runCmd.Flags().StringVar(&pidFile, "pidfile", "", "path to write process id of this pingpong")
runCmd.Flags().StringVar(&cpuprofile, "cpuprofile", "", "write cpu profile to `file`")
+ runCmd.Flags().Int64Var(&randSeed, "seed", 0, "input to math/rand.Seed(), defaults to time.Now().UnixNano()")
+ runCmd.Flags().BoolVar(&deterministicKeys, "deterministicKeys", false, "Draw from set of netgoal-created accounts using deterministic keys")
+ runCmd.Flags().Uint32Var(&generatedAccountsCount, "genaccounts", 0, "The total number of accounts pre-generated by netgoal")
+ runCmd.Flags().StringVar(&generatedAccountSampleMethod, "gensamplemethod", "random", "The method of sampling from the total # of pre-generated accounts")
}
var runCmd = &cobra.Command{
@@ -116,7 +128,7 @@ var runCmd = &cobra.Command{
Long: ``,
Run: func(cmd *cobra.Command, args []string) {
// Make a cache dir for wallet handle tokens
- cacheDir, err := ioutil.TempDir("", "pingpong")
+ cacheDir, err := os.MkdirTemp("", "pingpong")
if err != nil {
reportErrorf("Cannot make temp dir: %v\n", err)
}
@@ -156,17 +168,45 @@ var runCmd = &cobra.Command{
}
// Prepare configuration
+ dataDirCfgPath := filepath.Join(ac.DataDir(), pingpong.ConfigFilename)
var cfg pingpong.PpConfig
- cfgPath := filepath.Join(ac.DataDir(), pingpong.ConfigFilename)
- if useDefault {
- cfg = pingpong.DefaultConfig
+ if configPath != "" {
+ if configPath[0] == '{' {
+ // json literal as arg
+ cfg = pingpong.DefaultConfig
+ lf := strings.NewReader(configPath)
+ dec := json.NewDecoder(lf)
+ err = dec.Decode(&cfg)
+ if err != nil {
+ reportErrorf("-config: bad config json, %v", err)
+ }
+ fmt.Fprintf(os.Stdout, "config from --config:\n")
+ cfg.Dump(os.Stdout)
+ } else {
+ cfg, err = pingpong.LoadConfigFromFile(configPath)
+ if err != nil {
+ reportErrorf("%s: bad config json, %v", configPath, err)
+ }
+ fmt.Fprintf(os.Stdout, "config from %#v:\n", configPath)
+ cfg.Dump(os.Stdout)
+ }
} else {
- cfg, err = pingpong.LoadConfigFromFile(cfgPath)
- if err != nil && !os.IsNotExist(err) {
- reportErrorf("Error loading configuration from '%s': %v\n", cfgPath, err)
+ if useDefault {
+ cfg = pingpong.DefaultConfig
+ } else {
+ cfg, err = pingpong.LoadConfigFromFile(dataDirCfgPath)
+ if err != nil && !os.IsNotExist(err) {
+ reportErrorf("Error loading configuration from '%s': %v\n", dataDirCfgPath, err)
+ }
}
}
+ if randSeed == 0 {
+ rand.Seed(time.Now().UnixNano())
+ } else {
+ rand.Seed(randSeed)
+ }
+
if srcAddress != "" {
cfg.SrcAccount = srcAddress
}
@@ -186,10 +226,12 @@ var runCmd = &cobra.Command{
cfg.MinAccountFunds = minAccountFunds
}
- if txnPerSec == 0 {
+ if txnPerSec != 0 {
+ cfg.TxnPerSec = txnPerSec
+ }
+ if cfg.TxnPerSec == 0 {
reportErrorf("cannot set tps to 0")
}
- cfg.TxnPerSec = txnPerSec
if randomFee {
if cfg.MinFee > cfg.MaxFee {
@@ -206,15 +248,15 @@ var runCmd = &cobra.Command{
if randomAmount {
cfg.RandomizeAmt = true
}
- cfg.RandomLease = randomLease
+ cfg.RandomLease = randomLease || cfg.RandomLease
if noRandomAmount {
if randomAmount {
reportErrorf("Error --ra and --nra can't both be specified\n")
}
cfg.RandomizeAmt = false
}
- cfg.RandomizeDst = randomDst
- cfg.Quiet = quietish
+ cfg.RandomizeDst = randomDst || cfg.RandomizeDst
+ cfg.Quiet = quietish || cfg.Quiet
if runTime != "" {
val, err := strconv.ParseUint(runTime, 10, 32)
if err != nil {
@@ -263,7 +305,7 @@ var runCmd = &cobra.Command{
}
if logicProg != "" {
- cfg.Program, err = ioutil.ReadFile(logicProg)
+ cfg.Program, err = os.ReadFile(logicProg)
if err != nil {
reportErrorf("Error opening logic program: %v\n", err)
}
@@ -275,17 +317,27 @@ var runCmd = &cobra.Command{
reportErrorf("Invalid group size: %v\n", groupSize)
}
- if numAsset <= 1000 {
+ if numAsset == 0 {
+ // nop
+ } else if numAsset <= 1000 {
cfg.NumAsset = numAsset
} else {
reportErrorf("Invalid number of assets: %d, (valid number: 0 - 1000)\n", numAsset)
}
- cfg.AppProgOps = appProgOps
- cfg.AppProgHashes = appProgHashes
- cfg.AppProgHashSize = appProgHashSize
+ if appProgOps != 0 {
+ cfg.AppProgOps = appProgOps
+ }
+ if appProgHashes != 0 {
+ cfg.AppProgHashes = appProgHashes
+ }
+ if appProgHashSize != "sha256" {
+ cfg.AppProgHashSize = appProgHashSize
+ }
- if numApp <= 1000 {
+ if numApp == 0 {
+ // nop
+ } else if numApp <= 1000 {
cfg.NumApp = numApp
} else {
reportErrorf("Invalid number of apps: %d, (valid number: 0 - 1000)\n", numApp)
@@ -295,7 +347,9 @@ var runCmd = &cobra.Command{
reportErrorf("Cannot opt in %d times of %d total apps\n", numAppOptIn, numApp)
}
- cfg.NumAppOptIn = numAppOptIn
+ if numAppOptIn != 0 {
+ cfg.NumAppOptIn = numAppOptIn
+ }
if appProgGlobKeys > 0 {
cfg.AppGlobKeys = appProgGlobKeys
@@ -304,10 +358,6 @@ var runCmd = &cobra.Command{
cfg.AppLocalKeys = appProgLocalKeys
}
- if numAsset != 0 && numApp != 0 {
- reportErrorf("only one of numapp and numasset may be specified\n")
- }
-
if rekey {
cfg.Rekey = rekey
if !cfg.RandomLease && !cfg.RandomNote && !cfg.RandomizeFee && !cfg.RandomizeAmt {
@@ -318,7 +368,32 @@ var runCmd = &cobra.Command{
}
}
- cfg.NftAsaPerSecond = nftAsaPerSecond
+ if nftAsaPerSecond != 0 {
+ cfg.NftAsaPerSecond = nftAsaPerSecond
+ }
+
+ if deterministicKeys && generatedAccountsCount == 0 {
+ reportErrorf("deterministicKeys requires setting generatedAccountsCount")
+ }
+ if !deterministicKeys && generatedAccountsCount > 0 {
+ reportErrorf("generatedAccountsCount requires deterministicKeys=true")
+ }
+ if deterministicKeys && numAccounts > generatedAccountsCount {
+ reportErrorf("numAccounts must be <= generatedAccountsCount")
+ }
+ cfg.DeterministicKeys = deterministicKeys || cfg.DeterministicKeys
+ if generatedAccountsCount != 0 {
+ cfg.GeneratedAccountsCount = generatedAccountsCount
+ }
+ if generatedAccountSampleMethod != "" {
+ cfg.GeneratedAccountSampleMethod = generatedAccountSampleMethod
+ }
+
+ cfg.SetDefaultWeights()
+ err = cfg.Check()
+ if err != nil {
+ reportErrorf("%v", err)
+ }
reportInfof("Preparing to initialize PingPong with config:\n")
cfg.Dump(os.Stdout)
@@ -326,20 +401,23 @@ var runCmd = &cobra.Command{
pps := pingpong.NewPingpong(cfg)
// Initialize accounts if necessary
- err = pps.PrepareAccounts(ac)
+ err = pps.PrepareAccounts(&ac)
if err != nil {
reportErrorf("Error preparing accounts for transfers: %v\n", err)
}
if saveConfig {
- cfg.Save(cfgPath)
+ err = cfg.Save(dataDirCfgPath)
+ if err != nil {
+ reportErrorf("%s: could not save config, %v\n", dataDirCfgPath, err)
+ }
}
reportInfof("Preparing to run PingPong with config:\n")
cfg.Dump(os.Stdout)
// Kick off the real processing
- pps.RunPingPong(context.Background(), ac)
+ pps.RunPingPong(context.Background(), &ac)
},
}
diff --git a/cmd/tealdbg/local.go b/cmd/tealdbg/local.go
index c9cba4de39..fa3c5d6fc5 100644
--- a/cmd/tealdbg/local.go
+++ b/cmd/tealdbg/local.go
@@ -73,7 +73,7 @@ func txnGroupFromParams(dp *DebugParams) (txnGroup []transactions.SignedTxn, err
}
// 3. Attempt msgp - array of transactions
- dec := protocol.NewDecoderBytes(data)
+ dec := protocol.NewMsgpDecoderBytes(data)
for {
var txn transactions.SignedTxn
err = dec.Decode(&txn)
@@ -124,7 +124,7 @@ func balanceRecordsFromParams(dp *DebugParams) (records []basics.BalanceRecord,
}
// 3. Attempt msgp - a array of records
- dec := protocol.NewDecoderBytes(data)
+ dec := protocol.NewMsgpDecoderBytes(data)
for {
var record basics.BalanceRecord
err = dec.Decode(&record)
diff --git a/cmd/tealdbg/localLedger.go b/cmd/tealdbg/localLedger.go
index cce2ae75d6..c0a6cd7236 100644
--- a/cmd/tealdbg/localLedger.go
+++ b/cmd/tealdbg/localLedger.go
@@ -19,7 +19,7 @@ package main
import (
"encoding/json"
"fmt"
- "io/ioutil"
+ "io"
"math/rand"
"net/http"
@@ -198,7 +198,7 @@ func getAppCreatorFromIndexer(indexerURL string, indexerToken string, app basics
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
- msg, _ := ioutil.ReadAll(resp.Body)
+ msg, _ := io.ReadAll(resp.Body)
return basics.Address{}, fmt.Errorf("application response error: %s, status code: %d, request: %s", string(msg), resp.StatusCode, queryString)
}
var appResp ApplicationIndexerResponse
@@ -229,7 +229,7 @@ func getBalanceFromIndexer(indexerURL string, indexerToken string, account basic
}
defer resp.Body.Close()
if resp.StatusCode != 200 {
- msg, _ := ioutil.ReadAll(resp.Body)
+ msg, _ := io.ReadAll(resp.Body)
return basics.AccountData{}, fmt.Errorf("account response error: %s, status code: %d, request: %s", string(msg), resp.StatusCode, queryString)
}
var accountResp AccountIndexerResponse
diff --git a/cmd/tealdbg/main.go b/cmd/tealdbg/main.go
index 9ab3d3dec0..75e437c149 100644
--- a/cmd/tealdbg/main.go
+++ b/cmd/tealdbg/main.go
@@ -17,7 +17,6 @@
package main
import (
- "io/ioutil"
"log"
"os"
@@ -205,7 +204,7 @@ func debugLocal(args []string) {
programNames = make([]string, len(args))
programBlobs = make([][]byte, len(args))
for i, file := range args {
- data, err := ioutil.ReadFile(file)
+ data, err := os.ReadFile(file)
if err != nil {
log.Fatalf("Error program reading %s: %s", file, err)
}
@@ -217,7 +216,7 @@ func debugLocal(args []string) {
var err error
var txnBlob []byte
if len(txnFile) > 0 {
- txnBlob, err = ioutil.ReadFile(txnFile)
+ txnBlob, err = os.ReadFile(txnFile)
if err != nil {
log.Fatalf("Error txn reading %s: %s", txnFile, err)
}
@@ -225,7 +224,7 @@ func debugLocal(args []string) {
var balanceBlob []byte
if len(balanceFile) > 0 {
- balanceBlob, err = ioutil.ReadFile(balanceFile)
+ balanceBlob, err = os.ReadFile(balanceFile)
if err != nil {
log.Fatalf("Error balance reading %s: %s", balanceFile, err)
}
@@ -233,7 +232,7 @@ func debugLocal(args []string) {
var ddrBlob []byte
if len(ddrFile) > 0 {
- ddrBlob, err = ioutil.ReadFile(ddrFile)
+ ddrBlob, err = os.ReadFile(ddrFile)
if err != nil {
log.Fatalf("Error dryrun-dump reading %s: %s", ddrFile, err)
}
diff --git a/cmd/updater/update.sh b/cmd/updater/update.sh
index b613cfc32c..1da213ccf0 100755
--- a/cmd/updater/update.sh
+++ b/cmd/updater/update.sh
@@ -272,7 +272,7 @@ function check_for_updater() {
# try signature validation
if [ "$GPG_VERIFY" = "1" ]; then
- local UPDATER_SIGFILE="$UPDATER_TEMPDIR/updater.sig" UPDATER_PUBKEYFILE="key.pub"
+ local UPDATER_SIGFILE="$UPDATER_TEMPDIR/updater.sig" UPDATER_PUBKEYFILE="$UPDATER_TEMPDIR/key.pub"
# try downloading public key
if curl -sSL "$UPDATER_PUBKEYURL" -o "$UPDATER_PUBKEYFILE"; then
GNUPGHOME="$(mktemp -d)"; export GNUPGHOME
diff --git a/components/mocks/mockCatchpointCatchupAccessor.go b/components/mocks/mockCatchpointCatchupAccessor.go
index bd55d29e9b..c92113d70d 100644
--- a/components/mocks/mockCatchpointCatchupAccessor.go
+++ b/components/mocks/mockCatchpointCatchupAccessor.go
@@ -103,3 +103,8 @@ func (m *MockCatchpointCatchupAccessor) EnsureFirstBlock(ctx context.Context) (b
func (m *MockCatchpointCatchupAccessor) CompleteCatchup(ctx context.Context) (err error) {
return nil
}
+
+// Ledger returns ledger instance as CatchupAccessorClientLedger interface
+func (m *MockCatchpointCatchupAccessor) Ledger() (l ledger.CatchupAccessorClientLedger) {
+ return nil
+}
diff --git a/config/config.go b/config/config.go
index 023561f683..8e8dc4c276 100644
--- a/config/config.go
+++ b/config/config.go
@@ -34,6 +34,9 @@ const Devnet protocol.NetworkID = "devnet"
// Betanet identifies the 'beta network' use for early releases of feature to the public prior to releasing these to mainnet/testnet
const Betanet protocol.NetworkID = "betanet"
+// Alphanet identifies the 'alpha network' use for performance releases of feature/alphanet to the public prior to releasing these to mainnet/testnet
+const Alphanet protocol.NetworkID = "alphanet"
+
// Devtestnet identifies the 'development network for tests' use for running tests against development and not generally accessible publicly
const Devtestnet protocol.NetworkID = "devtestnet"
diff --git a/config/config_test.go b/config/config_test.go
index c11edd3797..1e1915faa3 100644
--- a/config/config_test.go
+++ b/config/config_test.go
@@ -20,12 +20,13 @@ import (
"bytes"
"encoding/json"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"reflect"
+ "strings"
"testing"
+ "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/protocol"
@@ -245,7 +246,7 @@ func TestConfigExampleIsCorrect(t *testing.T) {
// see their default (zero) values and instead see the
// new default because they won't exist in the old file.
func loadWithoutDefaults(cfg Local) (Local, error) {
- file, err := ioutil.TempFile("", "lwd")
+ file, err := os.CreateTemp("", "lwd")
if err != nil {
return Local{}, err
}
@@ -345,21 +346,35 @@ func TestConsensusUpgrades(t *testing.T) {
currentVersionName := protocol.ConsensusV7
latestVersionName := protocol.ConsensusCurrentVersion
- leadsTo := consensusUpgradesTo(a, currentVersionName, latestVersionName)
+ leadsTo := consensusUpgradesTo(a, currentVersionName, latestVersionName, checkConsensusVersionName)
a.True(leadsTo, "Consensus protocol must have upgrade path from %v to %v", currentVersionName, latestVersionName)
}
-func consensusUpgradesTo(a *require.Assertions, currentName, targetName protocol.ConsensusVersion) bool {
+func checkConsensusVersionName(a *require.Assertions, name string) {
+ // ensure versions come from official specs repo
+ prefix1 := "https://github.com/algorandfoundation/specs/tree/"
+ prefix2 := "https://github.com/algorand/spec/tree/"
+
+ whitelist := map[string]bool{"v7": true, "v8": true, "v9": true, "v10": true, "v11": true, "v12": true}
+ if !whitelist[name] {
+ a.True(strings.HasPrefix(name, prefix1) || strings.HasPrefix(name, prefix2),
+ "Consensus version %s does not start with allowed prefix", name)
+ }
+}
+
+func consensusUpgradesTo(a *require.Assertions, currentName, targetName protocol.ConsensusVersion, nameCheckFn func(*require.Assertions, string)) bool {
+ nameCheckFn(a, string(currentName))
if currentName == targetName {
return true
}
currentVersion, has := Consensus[currentName]
a.True(has, "Consensus map should contain all references consensus versions: Missing '%v'", currentName)
for upgrade := range currentVersion.ApprovedUpgrades {
+ nameCheckFn(a, string(upgrade))
if upgrade == targetName {
return true
}
- return consensusUpgradesTo(a, upgrade, targetName)
+ return consensusUpgradesTo(a, upgrade, targetName, nameCheckFn)
}
return false
}
@@ -537,3 +552,31 @@ func TestLocalVersionField(t *testing.T) {
expectedTag = expectedTag[:len(expectedTag)-1]
require.Equal(t, expectedTag, string(field.Tag))
}
+
+func TestGetNonDefaultConfigValues(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ cfg := GetDefaultLocal()
+
+ // set 4 non-default values
+ cfg.AgreementIncomingBundlesQueueLength = 2
+ cfg.AgreementIncomingProposalsQueueLength = 200
+ cfg.TxPoolSize = 30
+ cfg.Archival = true
+
+ // ask for 2 of them
+ ndmap := GetNonDefaultConfigValues(cfg, []string{"AgreementIncomingBundlesQueueLength", "TxPoolSize"})
+
+ // assert correct
+ expected := map[string]interface{}{
+ "AgreementIncomingBundlesQueueLength": uint64(2),
+ "TxPoolSize": int(30),
+ }
+ assert.Equal(t, expected, ndmap)
+
+ // ask for field that doesn't exist: should skip
+ assert.Equal(t, expected, GetNonDefaultConfigValues(cfg, []string{"Blah", "AgreementIncomingBundlesQueueLength", "TxPoolSize"}))
+
+ // check unmodified defaults
+ assert.Empty(t, GetNonDefaultConfigValues(GetDefaultLocal(), []string{"AgreementIncomingBundlesQueueLength", "TxPoolSize"}))
+}
diff --git a/config/consensus.go b/config/consensus.go
index 9e95d76ec8..71b54daa7f 100644
--- a/config/consensus.go
+++ b/config/consensus.go
@@ -18,7 +18,6 @@ package config
import (
"encoding/json"
- "io/ioutil"
"os"
"path/filepath"
"time"
@@ -599,7 +598,7 @@ func SaveConfigurableConsensus(dataDirectory string, params ConsensusProtocols)
if err != nil {
return err
}
- err = ioutil.WriteFile(consensusProtocolPath, encodedConsensusParams, 0644)
+ err = os.WriteFile(consensusProtocolPath, encodedConsensusParams, 0644)
return err
}
@@ -1217,6 +1216,31 @@ func initConsensusProtocols() {
vFuture.LogicSigVersion = 8 // When moving this to a release, put a new higher LogicSigVersion here
Consensus[protocol.ConsensusFuture] = vFuture
+
+ // vAlphaX versions are an separate series of consensus parameters and versions for alphanet
+ vAlpha1 := v32
+ vAlpha1.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+ vAlpha1.AgreementFilterTimeoutPeriod0 = 2 * time.Second
+ vAlpha1.MaxTxnBytesPerBlock = 5000000
+ Consensus[protocol.ConsensusVAlpha1] = vAlpha1
+
+ vAlpha2 := vAlpha1
+ vAlpha2.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+ vAlpha2.AgreementFilterTimeoutPeriod0 = 3500 * time.Millisecond
+ vAlpha2.MaxTxnBytesPerBlock = 5 * 1024 * 1024
+ Consensus[protocol.ConsensusVAlpha2] = vAlpha2
+ vAlpha1.ApprovedUpgrades[protocol.ConsensusVAlpha2] = 10000
+
+ // vAlpha3 and vAlpha4 use the same parameters as v33 and v34
+ vAlpha3 := v33
+ vAlpha3.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+ Consensus[protocol.ConsensusVAlpha3] = vAlpha3
+ vAlpha2.ApprovedUpgrades[protocol.ConsensusVAlpha3] = 10000
+
+ vAlpha4 := v34
+ vAlpha4.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{}
+ Consensus[protocol.ConsensusVAlpha4] = vAlpha4
+ vAlpha3.ApprovedUpgrades[protocol.ConsensusVAlpha4] = 10000
}
// Global defines global Algorand protocol parameters which should not be overridden.
diff --git a/config/defaultsGenerator/defaultsGenerator.go b/config/defaultsGenerator/defaultsGenerator.go
index 47b5ac51e7..4ee4a1671a 100644
--- a/config/defaultsGenerator/defaultsGenerator.go
+++ b/config/defaultsGenerator/defaultsGenerator.go
@@ -19,7 +19,6 @@ package main
import (
"flag"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"reflect"
@@ -56,7 +55,7 @@ func main() {
printExit("one or more of the required input arguments was not provided\n")
}
- localDefaultsBytes, err := ioutil.ReadFile(*headerFileName)
+ localDefaultsBytes, err := os.ReadFile(*headerFileName)
if err != nil {
printExit("Unable to load file %s : %v", *headerFileName, err)
}
@@ -70,14 +69,14 @@ func main() {
localDefaultsBytes = append(localDefaultsBytes, autoDefaultsBytes...)
- err = ioutil.WriteFile(*outputfilename, localDefaultsBytes, 0644)
+ err = os.WriteFile(*outputfilename, localDefaultsBytes, 0644)
if err != nil {
printExit("Unable to write file %s : %v", *outputfilename, err)
}
// generate an update json for the example as well.
autoDefaultsBytes = []byte(prettyPrint(config.AutogenLocal, "json"))
- err = ioutil.WriteFile(*jsonExampleFileName, autoDefaultsBytes, 0644)
+ err = os.WriteFile(*jsonExampleFileName, autoDefaultsBytes, 0644)
if err != nil {
printExit("Unable to write file %s : %v", *jsonExampleFileName, err)
}
diff --git a/config/localTemplate.go b/config/localTemplate.go
index 8a8120c5f9..c5535e793d 100644
--- a/config/localTemplate.go
+++ b/config/localTemplate.go
@@ -477,6 +477,8 @@ func (cfg Local) DNSBootstrap(network protocol.NetworkID) string {
return "devnet.algodev.network"
} else if network == Betanet {
return "betanet.algodev.network"
+ } else if network == Alphanet {
+ return "alphanet.algodev.network"
}
}
return strings.Replace(cfg.DNSBootstrapID, "", string(network), -1)
diff --git a/config/migrate.go b/config/migrate.go
index 9fd9c86076..405314c628 100644
--- a/config/migrate.go
+++ b/config/migrate.go
@@ -198,3 +198,25 @@ func getVersionedDefaultLocalConfig(version uint32) (local Local) {
}
return
}
+
+// GetNonDefaultConfigValues takes a provided cfg and list of field names, and returns a map of all values in cfg
+// that are not set to the default for the latest version.
+func GetNonDefaultConfigValues(cfg Local, fieldNames []string) map[string]interface{} {
+ defCfg := GetDefaultLocal()
+ ret := make(map[string]interface{})
+
+ for _, fieldName := range fieldNames {
+ defField := reflect.ValueOf(defCfg).FieldByName(fieldName)
+ if !defField.IsValid() {
+ continue
+ }
+ cfgField := reflect.ValueOf(cfg).FieldByName(fieldName)
+ if !cfgField.IsValid() {
+ continue
+ }
+ if !reflect.DeepEqual(defField.Interface(), cfgField.Interface()) {
+ ret[fieldName] = cfgField.Interface()
+ }
+ }
+ return ret
+}
diff --git a/config/version.go b/config/version.go
index 37752c90e8..20743216eb 100644
--- a/config/version.go
+++ b/config/version.go
@@ -33,7 +33,7 @@ const VersionMajor = 3
// VersionMinor is the Minor semantic version number (x.#.z) - changed when backwards-compatible features are introduced.
// Not enforced until after initial public release (x > 0).
-const VersionMinor = 9
+const VersionMinor = 10
// Version is the type holding our full version information.
type Version struct {
diff --git a/crypto/merklesignature/const.go b/crypto/merklesignature/const.go
index c98321b514..767f14aaef 100644
--- a/crypto/merklesignature/const.go
+++ b/crypto/merklesignature/const.go
@@ -18,6 +18,7 @@ package merklesignature
import (
"fmt"
+
"github.com/algorand/go-algorand/crypto"
)
@@ -40,7 +41,7 @@ const (
var NoKeysCommitment = Commitment{}
func init() {
- // no keys generated, inner tree of merkle siganture scheme is empty.
+ // no keys generated, inner tree of merkle signature scheme is empty.
o, err := New(KeyLifetimeDefault+1, KeyLifetimeDefault+2, KeyLifetimeDefault)
if err != nil {
panic(fmt.Errorf("initializing empty merkle signature scheme failed, err: %w", err))
diff --git a/crypto/stateproof/coinGenerator.go b/crypto/stateproof/coinGenerator.go
index 320232fbaa..fa88c57706 100644
--- a/crypto/stateproof/coinGenerator.go
+++ b/crypto/stateproof/coinGenerator.go
@@ -18,9 +18,10 @@ package stateproof
import (
"encoding/binary"
- "golang.org/x/crypto/sha3"
"math/big"
+ "golang.org/x/crypto/sha3"
+
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/protocol"
)
@@ -75,7 +76,7 @@ func makeCoinGenerator(choice *coinChoiceSeed) coinGenerator {
choice.version = VersionForCoinGenerator
rep := crypto.HashRep(choice)
shk := sha3.NewShake256()
- shk.Write(rep)
+ shk.Write(rep) //nolint:errcheck // ShakeHash.Write may panic, but does not return error
threshold := prepareRejectionSamplingThreshold(choice.signedWeight)
return coinGenerator{shkContext: shk, signedWeight: choice.signedWeight, threshold: threshold}
@@ -111,7 +112,7 @@ func (cg *coinGenerator) getNextCoin() uint64 {
var randNumFromXof uint64
for {
var shakeDigest [8]byte
- cg.shkContext.Read(shakeDigest[:])
+ cg.shkContext.Read(shakeDigest[:]) //nolint:errcheck // ShakeHash.Read never returns error
randNumFromXof = binary.LittleEndian.Uint64(shakeDigest[:])
z := &big.Int{}
diff --git a/daemon/algod/api/algod2.oas2.json b/daemon/algod/api/algod2.oas2.json
deleted file mode 100644
index e69de29bb2..0000000000
diff --git a/daemon/algod/api/client/restClient.go b/daemon/algod/api/client/restClient.go
index b5c3097565..b5d25816d1 100644
--- a/daemon/algod/api/client/restClient.go
+++ b/daemon/algod/api/client/restClient.go
@@ -23,7 +23,6 @@ import (
"encoding/json"
"fmt"
"io"
- "io/ioutil"
"net/http"
"net/url"
"strings"
@@ -135,7 +134,7 @@ func extractError(resp *http.Response) error {
return nil
}
- errorBuf, _ := ioutil.ReadAll(resp.Body) // ignore returned error
+ errorBuf, _ := io.ReadAll(resp.Body) // ignore returned error
errorString := filterASCII(string(errorBuf))
if resp.StatusCode == http.StatusUnauthorized {
@@ -221,7 +220,7 @@ func (client RestClient) submitForm(response interface{}, path string, request i
return fmt.Errorf("can only decode raw response into type implementing v1.RawResponse")
}
- bodyBytes, err := ioutil.ReadAll(resp.Body)
+ bodyBytes, err := io.ReadAll(resp.Body)
if err != nil {
return err
}
@@ -638,7 +637,7 @@ func (client RestClient) doGetWithQuery(ctx context.Context, path string, queryA
return
}
- bytes, err := ioutil.ReadAll(resp.Body)
+ bytes, err := io.ReadAll(resp.Body)
if err != nil {
return
}
diff --git a/daemon/algod/api/server/v2/handlers.go b/daemon/algod/api/server/v2/handlers.go
index 5898c2835b..353cb0335f 100644
--- a/daemon/algod/api/server/v2/handlers.go
+++ b/daemon/algod/api/server/v2/handlers.go
@@ -589,7 +589,12 @@ func (v2 *Handlers) GetBlock(ctx echo.Context, round uint64, params generated.Ge
if handle == protocol.CodecHandle {
blockbytes, err := rpcs.RawBlockBytes(v2.Node.LedgerForAPI(), basics.Round(round))
if err != nil {
- return internalError(ctx, err, err.Error(), v2.Log)
+ switch err.(type) {
+ case ledgercore.ErrNoEntry:
+ return notFound(ctx, err, errFailedLookingUpLedger, v2.Log)
+ default:
+ return internalError(ctx, err, err.Error(), v2.Log)
+ }
}
ctx.Response().Writer.Header().Add("X-Algorand-Struct", "block-v1")
@@ -599,7 +604,12 @@ func (v2 *Handlers) GetBlock(ctx echo.Context, round uint64, params generated.Ge
ledger := v2.Node.LedgerForAPI()
block, _, err := ledger.BlockCert(basics.Round(round))
if err != nil {
- return internalError(ctx, err, errFailedLookingUpLedger, v2.Log)
+ switch err.(type) {
+ case ledgercore.ErrNoEntry:
+ return notFound(ctx, err, errFailedLookingUpLedger, v2.Log)
+ default:
+ return internalError(ctx, err, errFailedLookingUpLedger, v2.Log)
+ }
}
// Encoding wasn't working well without embedding "real" objects.
diff --git a/daemon/algod/api/server/v2/test/handlers_test.go b/daemon/algod/api/server/v2/test/handlers_test.go
index ad3d2ee69c..b587f75308 100644
--- a/daemon/algod/api/server/v2/test/handlers_test.go
+++ b/daemon/algod/api/server/v2/test/handlers_test.go
@@ -122,7 +122,8 @@ func TestGetBlock(t *testing.T) {
getBlockTest(t, 0, "json", 200)
getBlockTest(t, 0, "msgpack", 200)
- getBlockTest(t, 1, "json", 500)
+ getBlockTest(t, 1, "json", 404)
+ getBlockTest(t, 1, "msgpack", 404)
getBlockTest(t, 0, "bad format", 400)
}
diff --git a/daemon/algod/server.go b/daemon/algod/server.go
index c423e8de2d..4f1ce35158 100644
--- a/daemon/algod/server.go
+++ b/daemon/algod/server.go
@@ -21,7 +21,6 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"net"
"net/http"
_ "net/http/pprof" // net/http/pprof is for registering the pprof URLs with the web server, so http://localhost:8080/debug/pprof/ works.
@@ -264,13 +263,25 @@ func (s *Server) Start() {
// quit earlier than these service files get created
s.pidFile = filepath.Join(s.RootPath, "algod.pid")
s.netFile = filepath.Join(s.RootPath, "algod.net")
- ioutil.WriteFile(s.pidFile, []byte(fmt.Sprintf("%d\n", os.Getpid())), 0644)
- ioutil.WriteFile(s.netFile, []byte(fmt.Sprintf("%s\n", addr)), 0644)
+ err = os.WriteFile(s.pidFile, []byte(fmt.Sprintf("%d\n", os.Getpid())), 0644)
+ if err != nil {
+ fmt.Printf("pidfile error: %v\n", err)
+ os.Exit(1)
+ }
+ err = os.WriteFile(s.netFile, []byte(fmt.Sprintf("%s\n", addr)), 0644)
+ if err != nil {
+ fmt.Printf("netfile error: %v\n", err)
+ os.Exit(1)
+ }
listenAddr, listening := s.node.ListeningAddress()
if listening {
s.netListenFile = filepath.Join(s.RootPath, "algod-listen.net")
- ioutil.WriteFile(s.netListenFile, []byte(fmt.Sprintf("%s\n", listenAddr)), 0644)
+ err = os.WriteFile(s.netListenFile, []byte(fmt.Sprintf("%s\n", listenAddr)), 0644)
+ if err != nil {
+ fmt.Printf("netlistenfile error: %v\n", err)
+ os.Exit(1)
+ }
}
errChan := make(chan error, 1)
diff --git a/daemon/kmd/config/config.go b/daemon/kmd/config/config.go
index 9b932d2162..95a03fe0d7 100644
--- a/daemon/kmd/config/config.go
+++ b/daemon/kmd/config/config.go
@@ -18,7 +18,7 @@ package config
import (
"encoding/json"
- "io/ioutil"
+ "os"
"path/filepath"
"github.com/algorand/go-algorand/util/codecs"
@@ -103,7 +103,7 @@ func (k KMDConfig) Validate() error {
func LoadKMDConfig(dataDir string) (cfg KMDConfig, err error) {
cfg = defaultConfig(dataDir)
configFilename := filepath.Join(dataDir, kmdConfigFilename)
- dat, err := ioutil.ReadFile(configFilename)
+ dat, err := os.ReadFile(configFilename)
// If there is no config file, then return the default configuration, and dump the default config to disk
if err != nil {
exampleFilename := filepath.Join(dataDir, kmdConfigExampleFilename)
diff --git a/daemon/kmd/server/server.go b/daemon/kmd/server/server.go
index 973df186b3..b36c2859c3 100644
--- a/daemon/kmd/server/server.go
+++ b/daemon/kmd/server/server.go
@@ -19,7 +19,6 @@ package server
import (
"context"
"fmt"
- "io/ioutil"
"net"
"net/http"
"os"
@@ -144,12 +143,12 @@ func (ws *WalletServer) releaseFileLock() error {
// Write out a file containing the address kmd is listening on
func (ws *WalletServer) writeStateFiles(netAddr string) (err error) {
// netPath file contains path to sock file
- err = ioutil.WriteFile(ws.netPath, []byte(netAddr), 0640)
+ err = os.WriteFile(ws.netPath, []byte(netAddr), 0640)
if err != nil {
return
}
// pidPath file contains current process ID
- err = ioutil.WriteFile(ws.pidPath, []byte(fmt.Sprintf("%d", os.Getpid())), 0640)
+ err = os.WriteFile(ws.pidPath, []byte(fmt.Sprintf("%d", os.Getpid())), 0640)
return
}
diff --git a/daemon/kmd/wallet/driver/sqlite.go b/daemon/kmd/wallet/driver/sqlite.go
index eb78f4a77b..8ad659c282 100644
--- a/daemon/kmd/wallet/driver/sqlite.go
+++ b/daemon/kmd/wallet/driver/sqlite.go
@@ -20,7 +20,6 @@ import (
"bytes"
"crypto/subtle"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"regexp"
@@ -231,7 +230,7 @@ func walletMetadataFromDBPath(dbPath string) (metadata wallet.Metadata, err erro
func (swd *SQLiteWalletDriver) potentialWalletPaths() (paths []string, err error) {
// List all files and folders in the wallets directory
wDir := swd.walletsDir()
- files, err := ioutil.ReadDir(wDir)
+ files, err := os.ReadDir(wDir)
if err != nil {
return
}
diff --git a/data/abi/abi_encode.go b/data/abi/abi_encode.go
deleted file mode 100644
index b0f8b6c12e..0000000000
--- a/data/abi/abi_encode.go
+++ /dev/null
@@ -1,617 +0,0 @@
-// Copyright (C) 2019-2022 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see .
-
-package abi
-
-import (
- "encoding/binary"
- "encoding/json"
- "fmt"
- "math/big"
- "reflect"
- "strings"
-)
-
-// typeCastToTuple cast an array-like ABI type into an ABI tuple type.
-func (t Type) typeCastToTuple(tupLen ...int) (Type, error) {
- var childT []Type
-
- switch t.abiTypeID {
- case String:
- if len(tupLen) != 1 {
- return Type{}, fmt.Errorf("string type conversion to tuple need 1 length argument")
- }
- childT = make([]Type, tupLen[0])
- for i := 0; i < tupLen[0]; i++ {
- childT[i] = byteType
- }
- case Address:
- childT = make([]Type, addressByteSize)
- for i := 0; i < addressByteSize; i++ {
- childT[i] = byteType
- }
- case ArrayStatic:
- childT = make([]Type, t.staticLength)
- for i := 0; i < int(t.staticLength); i++ {
- childT[i] = t.childTypes[0]
- }
- case ArrayDynamic:
- if len(tupLen) != 1 {
- return Type{}, fmt.Errorf("dynamic array type conversion to tuple need 1 length argument")
- }
- childT = make([]Type, tupLen[0])
- for i := 0; i < tupLen[0]; i++ {
- childT[i] = t.childTypes[0]
- }
- default:
- return Type{}, fmt.Errorf("type cannot support conversion to tuple")
- }
-
- tuple, err := MakeTupleType(childT)
- if err != nil {
- return Type{}, err
- }
- return tuple, nil
-}
-
-// Encode is an ABI type method to encode go values into bytes following ABI encoding rules
-func (t Type) Encode(value interface{}) ([]byte, error) {
- switch t.abiTypeID {
- case Uint, Ufixed:
- return encodeInt(value, t.bitSize)
- case Bool:
- boolValue, ok := value.(bool)
- if !ok {
- return nil, fmt.Errorf("cannot cast value to bool in bool encoding")
- }
- if boolValue {
- return []byte{0x80}, nil
- }
- return []byte{0x00}, nil
- case Byte:
- byteValue, ok := value.(byte)
- if !ok {
- return nil, fmt.Errorf("cannot cast value to byte in byte encoding")
- }
- return []byte{byteValue}, nil
- case ArrayStatic, Address:
- castedType, err := t.typeCastToTuple()
- if err != nil {
- return nil, err
- }
- return castedType.Encode(value)
- case ArrayDynamic:
- dynamicArray, err := inferToSlice(value)
- if err != nil {
- return nil, err
- }
- castedType, err := t.typeCastToTuple(len(dynamicArray))
- if err != nil {
- return nil, err
- }
- lengthEncode := make([]byte, lengthEncodeByteSize)
- binary.BigEndian.PutUint16(lengthEncode, uint16(len(dynamicArray)))
- encoded, err := castedType.Encode(value)
- if err != nil {
- return nil, err
- }
- encoded = append(lengthEncode, encoded...)
- return encoded, nil
- case String:
- stringValue, okString := value.(string)
- if !okString {
- return nil, fmt.Errorf("cannot cast value to string or array dynamic in encoding")
- }
- byteValue := []byte(stringValue)
- castedType, err := t.typeCastToTuple(len(byteValue))
- if err != nil {
- return nil, err
- }
- lengthEncode := make([]byte, lengthEncodeByteSize)
- binary.BigEndian.PutUint16(lengthEncode, uint16(len(byteValue)))
- encoded, err := castedType.Encode(byteValue)
- if err != nil {
- return nil, err
- }
- encoded = append(lengthEncode, encoded...)
- return encoded, nil
- case Tuple:
- return encodeTuple(value, t.childTypes)
- default:
- return nil, fmt.Errorf("cannot infer type for encoding")
- }
-}
-
-// encodeInt encodes int-alike golang values to bytes, following ABI encoding rules
-func encodeInt(intValue interface{}, bitSize uint16) ([]byte, error) {
- var bigInt *big.Int
-
- switch intValue := intValue.(type) {
- case int8:
- bigInt = big.NewInt(int64(intValue))
- case uint8:
- bigInt = new(big.Int).SetUint64(uint64(intValue))
- case int16:
- bigInt = big.NewInt(int64(intValue))
- case uint16:
- bigInt = new(big.Int).SetUint64(uint64(intValue))
- case int32:
- bigInt = big.NewInt(int64(intValue))
- case uint32:
- bigInt = new(big.Int).SetUint64(uint64(intValue))
- case int64:
- bigInt = big.NewInt(intValue)
- case uint64:
- bigInt = new(big.Int).SetUint64(intValue)
- case uint:
- bigInt = new(big.Int).SetUint64(uint64(intValue))
- case int:
- bigInt = big.NewInt(int64(intValue))
- case *big.Int:
- bigInt = intValue
- default:
- return nil, fmt.Errorf("cannot infer go type for uint encode")
- }
-
- if bigInt.Sign() < 0 {
- return nil, fmt.Errorf("passed in numeric value should be non negative")
- }
-
- castedBytes := make([]byte, bitSize/8)
-
- if bigInt.Cmp(new(big.Int).Lsh(big.NewInt(1), uint(bitSize))) >= 0 {
- return nil, fmt.Errorf("input value bit size %d > abi type bit size %d", bigInt.BitLen(), bitSize)
- }
-
- bigInt.FillBytes(castedBytes)
- return castedBytes, nil
-}
-
-// inferToSlice infers an interface element to a slice of interface{}, returns error if it cannot infer successfully
-func inferToSlice(value interface{}) ([]interface{}, error) {
- reflectVal := reflect.ValueOf(value)
- if reflectVal.Kind() != reflect.Slice && reflectVal.Kind() != reflect.Array {
- return nil, fmt.Errorf("cannot infer an interface value as a slice of interface element")
- }
- // * if input is a slice, with nil, then reflectVal.Len() == 0
- // * if input is an array, it is not possible it is nil
- values := make([]interface{}, reflectVal.Len())
- for i := 0; i < reflectVal.Len(); i++ {
- values[i] = reflectVal.Index(i).Interface()
- }
- return values, nil
-}
-
-// encodeTuple encodes slice-of-interface of golang values to bytes, following ABI encoding rules
-func encodeTuple(value interface{}, childT []Type) ([]byte, error) {
- if len(childT) >= abiEncodingLengthLimit {
- return nil, fmt.Errorf("abi child type number exceeds uint16 maximum")
- }
- values, err := inferToSlice(value)
- if err != nil {
- return nil, err
- }
- if len(values) != len(childT) {
- return nil, fmt.Errorf("cannot encode abi tuple: value slice length != child type number")
- }
-
- // for each tuple element value, it has a head/tail component
- // we create slots for head/tail bytes now, store them and concat them later
- heads := make([][]byte, len(childT))
- tails := make([][]byte, len(childT))
- isDynamicIndex := make(map[int]bool)
-
- for i := 0; i < len(childT); i++ {
- if childT[i].IsDynamic() {
- // if it is a dynamic value, the head component is not pre-determined
- // we store an empty placeholder first, since we will need it in byte length calculation
- headsPlaceholder := []byte{0x00, 0x00}
- heads[i] = headsPlaceholder
- // we keep track that the index points to a dynamic value
- isDynamicIndex[i] = true
- tailEncoding, err := childT[i].Encode(values[i])
- if err != nil {
- return nil, err
- }
- tails[i] = tailEncoding
- isDynamicIndex[i] = true
- } else if childT[i].abiTypeID == Bool {
- // search previous bool
- before := findBoolLR(childT, i, -1)
- // search after bool
- after := findBoolLR(childT, i, 1)
- // append to heads and tails
- if before%8 != 0 {
- return nil, fmt.Errorf("cannot encode abi tuple: expected before has number of bool mod 8 == 0")
- }
- if after > 7 {
- after = 7
- }
- compressed, err := compressBools(values[i : i+after+1])
- if err != nil {
- return nil, err
- }
- heads[i] = []byte{compressed}
- i += after
- isDynamicIndex[i] = false
- } else {
- encodeTi, err := childT[i].Encode(values[i])
- if err != nil {
- return nil, err
- }
- heads[i] = encodeTi
- isDynamicIndex[i] = false
- }
- }
-
- // adjust heads for dynamic type
- // since head size can be pre-determined (for we are storing static value and dynamic value index in head)
- // we accumulate the head size first
- // (also note that though head size is pre-determined, head value is not necessarily pre-determined)
- headLength := 0
- for _, headTi := range heads {
- headLength += len(headTi)
- }
-
- // when we iterate through the heads (byte slice), we need to find heads for dynamic values
- // the head should correspond to the start index: len( head(x[1]) ... head(x[N]) tail(x[1]) ... tail(x[i-1]) ).
- tailCurrLength := 0
- for i := 0; i < len(heads); i++ {
- if isDynamicIndex[i] {
- // calculate where the index of dynamic value encoding byte start
- headValue := headLength + tailCurrLength
- if headValue >= abiEncodingLengthLimit {
- return nil, fmt.Errorf("cannot encode abi tuple: encode length exceeds uint16 maximum")
- }
- binary.BigEndian.PutUint16(heads[i], uint16(headValue))
- }
- // accumulate the current tailing dynamic encoding bytes length.
- tailCurrLength += len(tails[i])
- }
-
- // concat everything as the abi encoded bytes
- encoded := make([]byte, 0, headLength+tailCurrLength)
- for _, head := range heads {
- encoded = append(encoded, head...)
- }
- for _, tail := range tails {
- encoded = append(encoded, tail...)
- }
- return encoded, nil
-}
-
-// compressBools takes a slice of interface{} (which can be casted to bools) length <= 8
-// and compress the bool values into a uint8 integer
-func compressBools(boolSlice []interface{}) (uint8, error) {
- var res uint8 = 0
- if len(boolSlice) > 8 {
- return 0, fmt.Errorf("compressBools: cannot have slice length > 8")
- }
- for i := 0; i < len(boolSlice); i++ {
- temp, ok := boolSlice[i].(bool)
- if !ok {
- return 0, fmt.Errorf("compressBools: cannot cast slice element to bool")
- }
- if temp {
- res |= 1 << uint(7-i)
- }
- }
- return res, nil
-}
-
-// decodeUint decodes byte slice into golang int/big.Int
-func decodeUint(encoded []byte, bitSize uint16) (interface{}, error) {
- if len(encoded) != int(bitSize)/8 {
- return nil,
- fmt.Errorf("uint/ufixed decode: expected byte length %d, but got byte length %d", bitSize/8, len(encoded))
- }
- switch bitSize / 8 {
- case 1:
- return encoded[0], nil
- case 2:
- return uint16(new(big.Int).SetBytes(encoded).Uint64()), nil
- case 3, 4:
- return uint32(new(big.Int).SetBytes(encoded).Uint64()), nil
- case 5, 6, 7, 8:
- return new(big.Int).SetBytes(encoded).Uint64(), nil
- default:
- return new(big.Int).SetBytes(encoded), nil
- }
-}
-
-// Decode is an ABI type method to decode bytes to go values from ABI encoding rules
-func (t Type) Decode(encoded []byte) (interface{}, error) {
- switch t.abiTypeID {
- case Uint, Ufixed:
- return decodeUint(encoded, t.bitSize)
- case Bool:
- if len(encoded) != 1 {
- return nil, fmt.Errorf("boolean byte should be length 1 byte")
- }
- if encoded[0] == 0x00 {
- return false, nil
- } else if encoded[0] == 0x80 {
- return true, nil
- }
- return nil, fmt.Errorf("single boolean encoded byte should be of form 0x80 or 0x00")
- case Byte:
- if len(encoded) != 1 {
- return nil, fmt.Errorf("byte should be length 1")
- }
- return encoded[0], nil
- case ArrayStatic:
- castedType, err := t.typeCastToTuple()
- if err != nil {
- return nil, err
- }
- return castedType.Decode(encoded)
- case Address:
- if len(encoded) != addressByteSize {
- return nil, fmt.Errorf("address should be length 32")
- }
- return encoded, nil
- case ArrayDynamic:
- if len(encoded) < lengthEncodeByteSize {
- return nil, fmt.Errorf("dynamic array format corrupted")
- }
- dynamicLen := binary.BigEndian.Uint16(encoded[:lengthEncodeByteSize])
- castedType, err := t.typeCastToTuple(int(dynamicLen))
- if err != nil {
- return nil, err
- }
- return castedType.Decode(encoded[lengthEncodeByteSize:])
- case String:
- if len(encoded) < lengthEncodeByteSize {
- return nil, fmt.Errorf("string format corrupted")
- }
- stringLenBytes := encoded[:lengthEncodeByteSize]
- byteLen := binary.BigEndian.Uint16(stringLenBytes)
- if len(encoded[lengthEncodeByteSize:]) != int(byteLen) {
- return nil, fmt.Errorf("string representation in byte: length not matching")
- }
- return string(encoded[lengthEncodeByteSize:]), nil
- case Tuple:
- return decodeTuple(encoded, t.childTypes)
- default:
- return nil, fmt.Errorf("cannot infer type for decoding")
- }
-}
-
-// decodeTuple decodes byte slice with ABI type slice, outputting a slice of golang interface values
-// following ABI encoding rules
-func decodeTuple(encoded []byte, childT []Type) ([]interface{}, error) {
- dynamicSegments := make([]int, 0, len(childT)+1)
- valuePartition := make([][]byte, 0, len(childT))
- iterIndex := 0
-
- for i := 0; i < len(childT); i++ {
- if childT[i].IsDynamic() {
- if len(encoded[iterIndex:]) < lengthEncodeByteSize {
- return nil, fmt.Errorf("ill formed tuple dynamic typed value encoding")
- }
- dynamicIndex := binary.BigEndian.Uint16(encoded[iterIndex : iterIndex+lengthEncodeByteSize])
- dynamicSegments = append(dynamicSegments, int(dynamicIndex))
- valuePartition = append(valuePartition, nil)
- iterIndex += lengthEncodeByteSize
- } else if childT[i].abiTypeID == Bool {
- // search previous bool
- before := findBoolLR(childT, i, -1)
- // search after bool
- after := findBoolLR(childT, i, 1)
- if before%8 == 0 {
- if after > 7 {
- after = 7
- }
- // parse bool in a byte to multiple byte strings
- for boolIndex := uint(0); boolIndex <= uint(after); boolIndex++ {
- boolMask := 0x80 >> boolIndex
- if encoded[iterIndex]&byte(boolMask) > 0 {
- valuePartition = append(valuePartition, []byte{0x80})
- } else {
- valuePartition = append(valuePartition, []byte{0x00})
- }
- }
- i += after
- iterIndex++
- } else {
- return nil, fmt.Errorf("expected before bool number mod 8 == 0")
- }
- } else {
- // not bool ...
- currLen, err := childT[i].ByteLen()
- if err != nil {
- return nil, err
- }
- valuePartition = append(valuePartition, encoded[iterIndex:iterIndex+currLen])
- iterIndex += currLen
- }
- if i != len(childT)-1 && iterIndex >= len(encoded) {
- return nil, fmt.Errorf("input byte not enough to decode")
- }
- }
-
- if len(dynamicSegments) > 0 {
- dynamicSegments = append(dynamicSegments, len(encoded))
- iterIndex = len(encoded)
- }
- if iterIndex < len(encoded) {
- return nil, fmt.Errorf("input byte not fully consumed")
- }
- for i := 0; i < len(dynamicSegments)-1; i++ {
- if dynamicSegments[i] > dynamicSegments[i+1] {
- return nil, fmt.Errorf("dynamic segment should display a [l, r] space with l <= r")
- }
- }
-
- segIndex := 0
- for i := 0; i < len(childT); i++ {
- if childT[i].IsDynamic() {
- valuePartition[i] = encoded[dynamicSegments[segIndex]:dynamicSegments[segIndex+1]]
- segIndex++
- }
- }
-
- values := make([]interface{}, len(childT))
- for i := 0; i < len(childT); i++ {
- var err error
- values[i], err = childT[i].Decode(valuePartition[i])
- if err != nil {
- return nil, err
- }
- }
- return values, nil
-}
-
-// maxAppArgs is the maximum number of arguments for an application call transaction, in compliance
-// with ARC-4. Currently this is the same as the MaxAppArgs consensus parameter, but the
-// difference is that the consensus parameter is liable to change in a future consensus upgrade.
-// However, the ARC-4 ABI argument encoding **MUST** always remain the same.
-const maxAppArgs = 16
-
-// The tuple threshold is maxAppArgs, minus 1 for the method selector in the first app arg,
-// minus 1 for the final app argument becoming a tuple of the remaining method args
-const methodArgsTupleThreshold = maxAppArgs - 2
-
-// ParseArgJSONtoByteSlice convert input method arguments to ABI encoded bytes
-// it converts funcArgTypes into a tuple type and apply changes over input argument string (in JSON format)
-// if there are greater or equal to 15 inputs, then we compact the tailing inputs into one tuple
-func ParseArgJSONtoByteSlice(argTypes []string, jsonArgs []string, applicationArgs *[][]byte) error {
- abiTypes := make([]Type, len(argTypes))
- for i, typeString := range argTypes {
- abiType, err := TypeOf(typeString)
- if err != nil {
- return err
- }
- abiTypes[i] = abiType
- }
-
- if len(abiTypes) != len(jsonArgs) {
- return fmt.Errorf("input argument number %d != method argument number %d", len(jsonArgs), len(abiTypes))
- }
-
- // Up to 16 app arguments can be passed to app call. First is reserved for method selector,
- // and the rest are for method call arguments. But if more than 15 method call arguments
- // are present, then the method arguments after the 14th are placed in a tuple in the last
- // app argument slot
- if len(abiTypes) > maxAppArgs-1 {
- typesForTuple := make([]Type, len(abiTypes)-methodArgsTupleThreshold)
- copy(typesForTuple, abiTypes[methodArgsTupleThreshold:])
-
- compactedType, err := MakeTupleType(typesForTuple)
- if err != nil {
- return err
- }
-
- abiTypes = append(abiTypes[:methodArgsTupleThreshold], compactedType)
-
- tupleValues := make([]json.RawMessage, len(jsonArgs)-methodArgsTupleThreshold)
- for i, jsonArg := range jsonArgs[methodArgsTupleThreshold:] {
- tupleValues[i] = []byte(jsonArg)
- }
-
- remainingJSON, err := json.Marshal(tupleValues)
- if err != nil {
- return err
- }
-
- jsonArgs = append(jsonArgs[:methodArgsTupleThreshold], string(remainingJSON))
- }
-
- // parse JSON value to ABI encoded bytes
- for i := 0; i < len(jsonArgs); i++ {
- interfaceVal, err := abiTypes[i].UnmarshalFromJSON([]byte(jsonArgs[i]))
- if err != nil {
- return err
- }
- abiEncoded, err := abiTypes[i].Encode(interfaceVal)
- if err != nil {
- return err
- }
- *applicationArgs = append(*applicationArgs, abiEncoded)
- }
- return nil
-}
-
-// ParseMethodSignature parses a method of format `method(argType1,argType2,...)retType`
-// into `method` {`argType1`,`argType2`,...} and `retType`
-func ParseMethodSignature(methodSig string) (name string, argTypes []string, returnType string, err error) {
- argsStart := strings.Index(methodSig, "(")
- if argsStart == -1 {
- err = fmt.Errorf(`No parenthesis in method signature: "%s"`, methodSig)
- return
- }
-
- if argsStart == 0 {
- err = fmt.Errorf(`Method signature has no name: "%s"`, methodSig)
- return
- }
-
- argsEnd := -1
- depth := 0
- for index, char := range methodSig {
- if char == '(' {
- depth++
- } else if char == ')' {
- if depth == 0 {
- err = fmt.Errorf(`Unpaired parenthesis in method signature: "%s"`, methodSig)
- return
- }
- depth--
- if depth == 0 {
- argsEnd = index
- break
- }
- }
- }
-
- if argsEnd == -1 {
- err = fmt.Errorf(`Unpaired parenthesis in method signature: "%s"`, methodSig)
- return
- }
-
- name = methodSig[:argsStart]
- argTypes, err = parseTupleContent(methodSig[argsStart+1 : argsEnd])
- returnType = methodSig[argsEnd+1:]
- return
-}
-
-// VerifyMethodSignature checks if a method signature and its referenced types can be parsed properly
-func VerifyMethodSignature(methodSig string) error {
- _, argTypes, retType, err := ParseMethodSignature(methodSig)
- if err != nil {
- return err
- }
-
- for i, argType := range argTypes {
- if IsReferenceType(argType) || IsTransactionType(argType) {
- continue
- }
-
- _, err = TypeOf(argType)
- if err != nil {
- return fmt.Errorf("Error parsing argument type at index %d: %s", i, err.Error())
- }
- }
-
- if retType != VoidReturnType {
- _, err = TypeOf(retType)
- if err != nil {
- return fmt.Errorf("Error parsing return type: %s", err.Error())
- }
- }
-
- return nil
-}
diff --git a/data/abi/abi_encode_test.go b/data/abi/abi_encode_test.go
deleted file mode 100644
index 231c1a0e0a..0000000000
--- a/data/abi/abi_encode_test.go
+++ /dev/null
@@ -1,1279 +0,0 @@
-// Copyright (C) 2019-2022 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see .
-
-package abi
-
-import (
- "crypto/rand"
- "encoding/binary"
- "fmt"
- "math/big"
- "testing"
-
- "github.com/algorand/go-algorand/test/partitiontest"
- "github.com/chrismcguire/gobberish"
- "github.com/stretchr/testify/require"
-)
-
-const (
- uintStepLength = 8
- uintBegin = 8
- uintEnd = 512
- uintRandomTestPoints = 1000
- uintTestCaseCount = 200
- ufixedPrecision = 160
- ufixedRandomTestPoints = 20
- tupleMaxLength = 10
- byteTestCaseCount = 1 << 8
- boolTestCaseCount = 2
- addressTestCaseCount = 300
- stringTestCaseCount = 10
- stringTestCaseSpecLenCount = 5
- takeNum = 10
- tupleTestCaseCount = 100
-)
-
-/*
- The set of parameters ensure that the error of byte length >= 2^16 is eliminated.
-
- i. Consider uint512[] with length 10, the ABI encoding length is: 64 x 10 + 2
- (2 is introduced from dynamic array length encoding)
- The motivation here is that, forall ABI type that is non-array/non-tuple like,
- uint512 gives the longest byte length in ABI encoding
- (utf-8 string's byte length is at most 42, address byte length is at most 32)
-
- ii. Consider a tuple of length 10, with all elements uint512[] of length 10.
- The ABI encoding length is: 10 x 2 + 10 x 642 == 6440
- (2 is for tuple index to keep track of dynamic type encoding)
-
- iii. Consider a tuple of length 10, with all elements of tuples mentioned in (ii).
- The ABI encoding length is: 10 x 2 + 10 x 6440 == 64420
- This is the end of the generation of nested-tuple test case,
- no more layers of random tuples will be produced.
-
- This gives an upper bound for the produced ABI encoding byte length in this test script,
- and noticing that length 64420 mentioned in (iii) is less than 2^16 == 65536.
- Assuming that ABI implementation is correct, then the flaky test should not happen again.
-*/
-
-func TestEncodeValid(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- // encoding test for uint type, iterating through all uint sizes
- // randomly pick 1000 valid uint values and check if encoded value match with expected
- for intSize := uintBegin; intSize <= uintEnd; intSize += uintStepLength {
- upperLimit := new(big.Int).Lsh(big.NewInt(1), uint(intSize))
- uintType, err := makeUintType(intSize)
- require.NoError(t, err, "make uint type fail")
-
- for i := 0; i < uintRandomTestPoints; i++ {
- randomInt, err := rand.Int(rand.Reader, upperLimit)
- require.NoError(t, err, "cryptographic random int init fail")
-
- expected := make([]byte, intSize/8)
- randomInt.FillBytes(expected)
-
- uintEncode, err := uintType.Encode(randomInt)
- require.NoError(t, err, "encoding from uint type fail")
-
- require.Equal(t, expected, uintEncode, "encode uint not match with expected")
- }
- // 2^[bitSize] - 1 test
- // check if uint can contain max uint value (2^bitSize - 1)
- largest := new(big.Int).Add(
- upperLimit,
- new(big.Int).Neg(big.NewInt(1)),
- )
- encoded, err := uintType.Encode(largest)
- require.NoError(t, err, "largest uint encode error")
- require.Equal(t, largest.Bytes(), encoded, "encode uint largest do not match with expected")
- }
-
- // encoding test for ufixed, iterating through all the valid ufixed bitSize and precision
- // randomly generate 10 big int values for ufixed numerator and check if encoded value match with expected
- // also check if ufixed can fit max numerator (2^bitSize - 1) under specific byte bitSize
- for size := uintBegin; size <= uintEnd; size += uintStepLength {
- upperLimit := new(big.Int).Lsh(big.NewInt(1), uint(size))
- largest := big.NewInt(0).Add(
- upperLimit,
- new(big.Int).Neg(big.NewInt(1)),
- )
- for precision := 1; precision <= ufixedPrecision; precision++ {
- typeUfixed, err := makeUfixedType(size, precision)
- require.NoError(t, err, "make ufixed type fail")
-
- for i := 0; i < ufixedRandomTestPoints; i++ {
- randomInt, err := rand.Int(rand.Reader, upperLimit)
- require.NoError(t, err, "cryptographic random int init fail")
-
- encodedUfixed, err := typeUfixed.Encode(randomInt)
- require.NoError(t, err, "ufixed encode fail")
-
- expected := make([]byte, size/8)
- randomInt.FillBytes(expected)
- require.Equal(t, expected, encodedUfixed, "encode ufixed not match with expected")
- }
- // (2^[bitSize] - 1) / (10^[precision]) test
- ufixedLargestEncode, err := typeUfixed.Encode(largest)
- require.NoError(t, err, "largest ufixed encode error")
- require.Equal(t, largest.Bytes(), ufixedLargestEncode,
- "encode ufixed largest do not match with expected")
- }
- }
-
- // encoding test for address, since address is 32 byte, it can be considered as 256 bit uint
- // randomly generate 1000 uint256 and make address values, check if encoded value match with expected
- upperLimit := new(big.Int).Lsh(big.NewInt(1), addressByteSize<<3)
- for i := 0; i < uintRandomTestPoints; i++ {
- randomAddrInt, err := rand.Int(rand.Reader, upperLimit)
- require.NoError(t, err, "cryptographic random int init fail")
-
- addrBytesExpected := make([]byte, addressByteSize)
- randomAddrInt.FillBytes(addrBytesExpected)
-
- addrBytesActual, err := addressType.Encode(addrBytesExpected)
- require.NoError(t, err, "address encode fail")
- require.Equal(t, addrBytesExpected, addrBytesActual, "encode addr not match with expected")
- }
-
- // encoding test for bool values
- for i := 0; i < boolTestCaseCount; i++ {
- boolEncode, err := boolType.Encode(i == 1)
- require.NoError(t, err, "bool encode fail")
- expected := []byte{0x00}
- if i == 1 {
- expected = []byte{0x80}
- }
- require.Equal(t, expected, boolEncode, "encode bool not match with expected")
- }
-
- // encoding test for byte values
- for i := 0; i < byteTestCaseCount; i++ {
- byteEncode, err := byteType.Encode(byte(i))
- require.NoError(t, err, "byte encode fail")
- expected := []byte{byte(i)}
- require.Equal(t, expected, byteEncode, "encode byte not match with expected")
- }
-
- // encoding test for string values, since strings in ABI contain utf-8 symbols
- // we use `gobberish` to generate random utf-8 symbols
- // randomly generate utf-8 str from length 1 to 100, each length draw 10 random strs
- // check if encoded ABI str match with expected value
- for length := 1; length <= stringTestCaseCount; length++ {
- for i := 0; i < stringTestCaseSpecLenCount; i++ {
- // generate utf8 strings from `gobberish` at some length
- utf8Str := gobberish.GenerateString(length)
- // since string is just type alias of `byte[]`, we need to store number of bytes in encoding
- utf8ByteLen := len([]byte(utf8Str))
- lengthBytes := make([]byte, 2)
- binary.BigEndian.PutUint16(lengthBytes, uint16(utf8ByteLen))
- expected := append(lengthBytes, []byte(utf8Str)...)
-
- strEncode, err := stringType.Encode(utf8Str)
- require.NoError(t, err, "string encode fail")
- require.Equal(t, expected, strEncode, "encode string not match with expected")
- }
- }
-
- // encoding test for static bool array, the expected behavior of encoding is to
- // compress multiple bool into a single byte.
- // input: {T, F, F, T, T}, encode expected: {0b10011000}
- staticBoolArrType := makeStaticArrayType(boolType, 5)
- t.Run("static bool array encoding", func(t *testing.T) {
- inputBase := []bool{true, false, false, true, true}
- expected := []byte{
- 0b10011000,
- }
- boolArrEncode, err := staticBoolArrType.Encode(inputBase)
- require.NoError(t, err, "static bool array encoding should not return error")
- require.Equal(t, expected, boolArrEncode, "static bool array encode not match expected")
- })
-
- // encoding test for static bool array
- // input: {F, F, F, T, T, F, T, F, T, F, T}, encode expected: {0b00011010, 0b10100000}
- staticBoolArrType = makeStaticArrayType(boolType, 11)
- t.Run("static bool array encoding", func(t *testing.T) {
- inputBase := []bool{false, false, false, true, true, false, true, false, true, false, true}
- expected := []byte{
- 0b00011010, 0b10100000,
- }
- boolArrEncode, err := staticBoolArrType.Encode(inputBase)
- require.NoError(t, err, "static bool array encoding should not return error")
- require.Equal(t, expected, boolArrEncode, "static bool array encode not match expected")
- })
-
- // encoding test for dynamic bool array
- // input: {F, T, F, T, F, T, F, T, F, T}, encode expected: {0b01010101, 0b01000000}
- dynamicBoolArrayType := makeDynamicArrayType(boolType)
- t.Run("dynamic bool array encoding", func(t *testing.T) {
- inputBase := []bool{false, true, false, true, false, true, false, true, false, true}
- expected := []byte{
- 0x00, 0x0A, 0b01010101, 0b01000000,
- }
- boolArrEncode, err := dynamicBoolArrayType.Encode(inputBase)
- require.NoError(t, err, "dynamic bool array encoding should not return error")
- require.Equal(t, expected, boolArrEncode, "dynamic bool array encode not match expected")
- })
-
- // encoding test for dynamic tuple values
- // input type: (string, bool, bool, bool, bool, string)
- // input value: ("ABC", T, F, T, F, "DEF")
- /*
- encode expected:
- 0x00, 0x05 (first string start at 5th byte)
- 0b10100000 (4 bool tuple element compacted together)
- 0x00, 0x0A (second string start at 10th byte)
- 0x00, 0x03 (first string byte length 3)
- byte('A'), byte('B'), byte('C') (first string encoded bytes)
- 0x00, 0x03 (second string byte length 3)
- byte('D'), byte('E'), byte('F') (second string encoded bytes)
- */
- tupleType, err := TypeOf("(string,bool,bool,bool,bool,string)")
- require.NoError(t, err, "type from string for dynamic tuple type should not return error")
- t.Run("dynamic tuple encoding", func(t *testing.T) {
- inputBase := []interface{}{
- "ABC", true, false, true, false, "DEF",
- }
- expected := []byte{
- 0x00, 0x05, 0b10100000, 0x00, 0x0A,
- 0x00, 0x03, byte('A'), byte('B'), byte('C'),
- 0x00, 0x03, byte('D'), byte('E'), byte('F'),
- }
- stringTupleEncode, err := tupleType.Encode(inputBase)
- require.NoError(t, err, "string tuple encoding should not return error")
- require.Equal(t, expected, stringTupleEncode, "string tuple encoding not match expected")
- })
-
- // encoding test for tuples with static bool arrays
- // input type: {bool[2], bool[2]}
- // input value: ({T, T}, {T, T})
- /*
- encode expected:
- 0b11000000 (first static bool array)
- 0b11000000 (second static bool array)
- */
- tupleType, err = TypeOf("(bool[2],bool[2])")
- require.NoError(t, err, "type from string for tuple type should not return error")
- t.Run("static bool array tuple encoding", func(t *testing.T) {
- expected := []byte{
- 0b11000000,
- 0b11000000,
- }
- actual, err := tupleType.Encode([]interface{}{
- []bool{true, true},
- []bool{true, true},
- })
- require.NoError(t, err, "encode tuple value should not return error")
- require.Equal(t, expected, actual, "encode static bool tuple should be equal")
- })
-
- // encoding test for tuples with static and dynamic bool arrays
- // input type: (bool[2], bool[])
- // input value: ({T, T}, {T, T})
- /*
- encode expected:
- 0b11000000 (first static bool array)
- 0x00, 0x03 (second dynamic bool array starts at 3rd byte)
- 0x00, 0x02 (dynamic bool array length 2)
- 0b11000000 (second static bool array)
- */
- tupleType, err = TypeOf("(bool[2],bool[])")
- require.NoError(t, err, "type from string for tuple type should not return error")
- t.Run("static/dynamic bool array tuple encoding", func(t *testing.T) {
- expected := []byte{
- 0b11000000,
- 0x00, 0x03,
- 0x00, 0x02, 0b11000000,
- }
- actual, err := tupleType.Encode([]interface{}{
- []bool{true, true},
- []bool{true, true},
- })
- require.NoError(t, err, "tuple value encoding should not return error")
- require.Equal(t, expected, actual, "encode static/dynamic bool array tuple should not return error")
- })
-
- // encoding test for tuples with all dynamic bool arrays
- // input type: (bool[], bool[])
- // input values: ({}, {})
- /*
- encode expected:
- 0x00, 0x04 (first dynamic bool array starts at 4th byte)
- 0x00, 0x06 (second dynamic bool array starts at 6th byte)
- 0x00, 0x00 (first dynamic bool array length 0)
- 0x00, 0x00 (second dynamic bool array length 0)
- */
- tupleType, err = TypeOf("(bool[],bool[])")
- require.NoError(t, err, "type from string for tuple type should not return error")
- t.Run("empty dynamic array tuple encoding", func(t *testing.T) {
- expected := []byte{
- 0x00, 0x04, 0x00, 0x06,
- 0x00, 0x00, 0x00, 0x00,
- }
- actual, err := tupleType.Encode([]interface{}{
- []bool{}, []bool{},
- })
- require.NoError(t, err, "encode empty dynamic array tuple should not return error")
- require.Equal(t, expected, actual, "encode empty dynamic array tuple does not match with expected")
- })
-
- // encoding test for empty tuple
- // input: (), expected encoding: ""
- tupleType, err = TypeOf("()")
- require.NoError(t, err, "type from string for tuple type should not return error")
- t.Run("empty tuple encoding", func(t *testing.T) {
- expected := make([]byte, 0)
- actual, err := tupleType.Encode([]interface{}{})
- require.NoError(t, err, "encode empty tuple should not return error")
- require.Equal(t, expected, actual, "empty tuple encode should not return error")
- })
-}
-
-func TestDecodeValid(t *testing.T) {
- partitiontest.PartitionTest(t)
- // decoding test for uint, iterating through all valid uint bitSize
- // randomly take 1000 tests on each valid bitSize
- // generate bytes from random uint values and decode bytes with additional type information
- for intSize := uintBegin; intSize <= uintEnd; intSize += uintStepLength {
- upperLimit := new(big.Int).Lsh(big.NewInt(1), uint(intSize))
- uintType, err := makeUintType(intSize)
- require.NoError(t, err, "make uint type failure")
- for i := 0; i < uintRandomTestPoints; i++ {
- randBig, err := rand.Int(rand.Reader, upperLimit)
- require.NoError(t, err, "cryptographic random int init fail")
-
- var expected interface{}
- if intSize <= 64 && intSize > 32 {
- expected = randBig.Uint64()
- } else if intSize <= 32 && intSize > 16 {
- expected = uint32(randBig.Uint64())
- } else if intSize == 16 {
- expected = uint16(randBig.Uint64())
- } else if intSize == 8 {
- expected = uint8(randBig.Uint64())
- } else {
- expected = randBig
- }
-
- encodedUint, err := uintType.Encode(expected)
- require.NoError(t, err, "uint encode fail")
-
- actual, err := uintType.Decode(encodedUint)
- require.NoError(t, err, "decoding uint should not return error")
- require.Equal(t, expected, actual, "decode uint fail to match expected value")
- }
- }
-
- // decoding test for ufixed, iterating through all valid ufixed bitSize and precision
- // randomly take 10 tests on each valid setting
- // generate ufixed bytes and try to decode back with additional type information
- for size := uintBegin; size <= uintEnd; size += uintStepLength {
- upperLimit := big.NewInt(0).Lsh(big.NewInt(1), uint(size))
- for precision := 1; precision <= ufixedPrecision; precision++ {
- ufixedType, err := makeUfixedType(size, precision)
- require.NoError(t, err, "make ufixed type failure")
- for i := 0; i < ufixedRandomTestPoints; i++ {
- randBig, err := rand.Int(rand.Reader, upperLimit)
- require.NoError(t, err, "cryptographic random int init fail")
-
- var expected interface{}
- if size <= 64 && size > 32 {
- expected = randBig.Uint64()
- } else if size <= 32 && size > 16 {
- expected = uint32(randBig.Uint64())
- } else if size == 16 {
- expected = uint16(randBig.Uint64())
- } else if size == 8 {
- expected = uint8(randBig.Uint64())
- } else {
- expected = randBig
- }
-
- encodedUfixed, err := ufixedType.Encode(expected)
- require.NoError(t, err, "ufixed encode fail")
- require.NoError(t, err, "cast big integer to expected value should not return error")
-
- actual, err := ufixedType.Decode(encodedUfixed)
- require.NoError(t, err, "decoding ufixed should not return error")
- require.Equal(t, expected, actual, "decode ufixed fail to match expected value")
- }
- }
- }
-
- // decoding test for address, randomly take 300 tests
- // address is type alias of byte[32], we generate address value with random 256 bit big int values
- // we make the expected address value and decode the encoding of expected, check if they match
- upperLimit := new(big.Int).Lsh(big.NewInt(1), addressByteSize<<3)
- for i := 0; i < addressTestCaseCount; i++ {
- randomAddrInt, err := rand.Int(rand.Reader, upperLimit)
- require.NoError(t, err, "cryptographic random int init fail")
-
- expected := make([]byte, addressByteSize)
- randomAddrInt.FillBytes(expected)
-
- actual, err := addressType.Decode(expected)
- require.NoError(t, err, "decoding address should not return error")
- require.Equal(t, expected, actual, "decode addr not match with expected")
- }
-
- // bool value decoding test
- for i := 0; i < 2; i++ {
- boolEncode, err := boolType.Encode(i == 1)
- require.NoError(t, err, "bool encode fail")
- actual, err := boolType.Decode(boolEncode)
- require.NoError(t, err, "decoding bool should not return error")
- require.Equal(t, i == 1, actual, "decode bool not match with expected")
- }
-
- // byte value decoding test, iterating through 256 valid byte value
- for i := 0; i < byteTestCaseCount; i++ {
- byteEncode, err := byteType.Encode(byte(i))
- require.NoError(t, err, "byte encode fail")
- actual, err := byteType.Decode(byteEncode)
- require.NoError(t, err, "decoding byte should not return error")
- require.Equal(t, byte(i), actual, "decode byte not match with expected")
- }
-
- // string value decoding test, test from utf string length 1 to 10
- // randomly take 5 utf-8 strings to make ABI string values
- // decode the encoded expected value and check if they match
- for length := 1; length <= stringTestCaseCount; length++ {
- for i := 0; i < stringTestCaseSpecLenCount; i++ {
- expected := gobberish.GenerateString(length)
- strEncode, err := stringType.Encode(expected)
- require.NoError(t, err, "string encode fail")
- actual, err := stringType.Decode(strEncode)
- require.NoError(t, err, "decoding string should not return error")
- require.Equal(t, expected, actual, "encode string not match with expected")
- }
- }
-
- // decoding test for static bool array
- // expected value: bool[5]: {T, F, F, T, T}
- // input: 0b10011000
- t.Run("static bool array decode", func(t *testing.T) {
- staticBoolArrT, err := TypeOf("bool[5]")
- require.NoError(t, err, "make static bool array type failure")
- expected := []interface{}{true, false, false, true, true}
- actual, err := staticBoolArrT.Decode([]byte{0b10011000})
- require.NoError(t, err, "decoding static bool array should not return error")
- require.Equal(t, expected, actual, "static bool array decode do not match expected")
- })
-
- // decoding test for static bool array
- // expected value: bool[11]: F, F, F, T, T, F, T, F, T, F, T
- // input: 0b00011010, 0b10100000
- t.Run("static bool array decode", func(t *testing.T) {
- staticBoolArrT, err := TypeOf("bool[11]")
- require.NoError(t, err, "make static bool array type failure")
- expected := []interface{}{false, false, false, true, true, false, true, false, true, false, true}
- actual, err := staticBoolArrT.Decode([]byte{0b00011010, 0b10100000})
- require.NoError(t, err, "decoding static bool array should not return error")
- require.Equal(t, expected, actual, "static bool array decode do not match expected")
- })
-
- // decoding test for static uint array
- // expected input: uint64[8]: {1, 2, 3, 4, 5, 6, 7, 8}
- /*
- input: 0, 0, 0, 0, 0, 0, 0, 1 (encoding for uint64 1)
- 0, 0, 0, 0, 0, 0, 0, 2 (encoding for uint64 2)
- 0, 0, 0, 0, 0, 0, 0, 3 (encoding for uint64 3)
- 0, 0, 0, 0, 0, 0, 0, 4 (encoding for uint64 4)
- 0, 0, 0, 0, 0, 0, 0, 5 (encoding for uint64 5)
- 0, 0, 0, 0, 0, 0, 0, 6 (encoding for uint64 6)
- 0, 0, 0, 0, 0, 0, 0, 7 (encoding for uint64 7)
- 0, 0, 0, 0, 0, 0, 0, 8 (encoding for uint64 8)
- */
- t.Run("static uint array decode", func(t *testing.T) {
- staticUintArrT, err := TypeOf("uint64[8]")
- require.NoError(t, err, "make static uint array type failure")
- expected := []interface{}{
- uint64(1), uint64(2),
- uint64(3), uint64(4),
- uint64(5), uint64(6),
- uint64(7), uint64(8),
- }
- arrayEncoded, err := staticUintArrT.Encode(expected)
- require.NoError(t, err, "uint64 static array encode should not return error")
- actual, err := staticUintArrT.Decode(arrayEncoded)
- require.NoError(t, err, "uint64 static array decode should not return error")
- require.Equal(t, expected, actual, "uint64 static array decode do not match with expected value")
- })
-
- // decoding test for dynamic bool array
- // expected value: bool[]: {F, T, F, T, F, T, F, T, F, T}
- /*
- input bytes: 0x00, 0x0A (dynamic bool array length 10)
- 0b01010101, 0b01000000 (dynamic bool array encoding)
- */
- t.Run("dynamic bool array decode", func(t *testing.T) {
- dynamicBoolArrT, err := TypeOf("bool[]")
- require.NoError(t, err, "make dynamic bool array type failure")
- expected := []interface{}{false, true, false, true, false, true, false, true, false, true}
- inputEncoded := []byte{
- 0x00, 0x0A, 0b01010101, 0b01000000,
- }
- actual, err := dynamicBoolArrT.Decode(inputEncoded)
- require.NoError(t, err, "decode dynamic array should not return error")
- require.Equal(t, expected, actual, "decode dynamic array do not match expected")
- })
-
- // decoding test for dynamic tuple values
- // expected value type: (string, bool, bool, bool, bool, string)
- // expected value: ("ABC", T, F, T, F, "DEF")
- /*
- input bytes:
- 0x00, 0x05 (first string start at 5th byte)
- 0b10100000 (4 bool tuple element compacted together)
- 0x00, 0x0A (second string start at 10th byte)
- 0x00, 0x03 (first string byte length 3)
- byte('A'), byte('B'), byte('C') (first string encoded bytes)
- 0x00, 0x03 (second string byte length 3)
- byte('D'), byte('E'), byte('F') (second string encoded bytes)
- */
- t.Run("dynamic tuple decoding", func(t *testing.T) {
- tupleT, err := TypeOf("(string,bool,bool,bool,bool,string)")
- require.NoError(t, err, "make tuple type failure")
- inputEncode := []byte{
- 0x00, 0x05, 0b10100000, 0x00, 0x0A,
- 0x00, 0x03, byte('A'), byte('B'), byte('C'),
- 0x00, 0x03, byte('D'), byte('E'), byte('F'),
- }
- expected := []interface{}{
- "ABC", true, false, true, false, "DEF",
- }
- actual, err := tupleT.Decode(inputEncode)
- require.NoError(t, err, "decoding dynamic tuple should not return error")
- require.Equal(t, expected, actual, "dynamic tuple not match with expected")
- })
-
- // decoding test for tuple with static bool array
- // expected type: (bool[2], bool[2])
- // expected value: ({T, T}, {T, T})
- /*
- input bytes:
- 0b11000000 (first static bool array)
- 0b11000000 (second static bool array)
- */
- t.Run("static bool array tuple decoding", func(t *testing.T) {
- tupleT, err := TypeOf("(bool[2],bool[2])")
- require.NoError(t, err, "make tuple type failure")
- expected := []interface{}{
- []interface{}{true, true},
- []interface{}{true, true},
- }
- encodedInput := []byte{
- 0b11000000,
- 0b11000000,
- }
- actual, err := tupleT.Decode(encodedInput)
- require.NoError(t, err, "decode tuple value should not return error")
- require.Equal(t, expected, actual, "decoded tuple value do not match with expected")
- })
-
- // decoding test for tuple with static and dynamic bool array
- // expected type: (bool[2], bool[])
- // expected value: ({T, T}, {T, T})
- /*
- input bytes:
- 0b11000000 (first static bool array)
- 0x00, 0x03 (second dynamic bool array starts at 3rd byte)
- 0x00, 0x02 (dynamic bool array length 2)
- 0b11000000 (second static bool array)
- */
- t.Run("static/dynamic bool array tuple decoding", func(t *testing.T) {
- tupleT, err := TypeOf("(bool[2],bool[])")
- require.NoError(t, err, "make tuple type failure")
- expected := []interface{}{
- []interface{}{true, true},
- []interface{}{true, true},
- }
- encodedInput := []byte{
- 0b11000000,
- 0x00, 0x03,
- 0x00, 0x02, 0b11000000,
- }
- actual, err := tupleT.Decode(encodedInput)
- require.NoError(t, err, "decode tuple for static/dynamic bool array should not return error")
- require.Equal(t, expected, actual, "decoded tuple value do not match with expected")
- })
-
- // decoding test for tuple with all dynamic bool array
- // expected value: (bool[], bool[])
- // expected value: ({}, {})
- /*
- input bytes:
- 0x00, 0x04 (first dynamic bool array starts at 4th byte)
- 0x00, 0x06 (second dynamic bool array starts at 6th byte)
- 0x00, 0x00 (first dynamic bool array length 0)
- 0x00, 0x00 (second dynamic bool array length 0)
- */
- t.Run("empty dynamic array tuple decoding", func(t *testing.T) {
- tupleT, err := TypeOf("(bool[],bool[])")
- require.NoError(t, err, "make tuple type failure")
- expected := []interface{}{
- []interface{}{}, []interface{}{},
- }
- encodedInput := []byte{
- 0x00, 0x04, 0x00, 0x06,
- 0x00, 0x00, 0x00, 0x00,
- }
- actual, err := tupleT.Decode(encodedInput)
- require.NoError(t, err, "decode tuple for empty dynamic array should not return error")
- require.Equal(t, expected, actual, "decoded tuple value do not match with expected")
- })
-
- // decoding test for empty tuple
- // expected value: ()
- // byte input: ""
- t.Run("empty tuple decoding", func(t *testing.T) {
- tupleT, err := TypeOf("()")
- require.NoError(t, err, "make empty tuple type should not return error")
- actual, err := tupleT.Decode([]byte{})
- require.NoError(t, err, "decode empty tuple should not return error")
- require.Equal(t, []interface{}{}, actual, "empty tuple encode should not return error")
- })
-}
-
-func TestDecodeInvalid(t *testing.T) {
- partitiontest.PartitionTest(t)
- // decoding test for *corrupted* static bool array
- // expected 9 elements for static bool array
- // encoded bytes have only 8 bool values
- // should throw error
- t.Run("corrupted static bool array decode", func(t *testing.T) {
- inputBase := []byte{0b11111111}
- arrayType := makeStaticArrayType(boolType, 9)
- _, err := arrayType.Decode(inputBase)
- require.Error(t, err, "decoding corrupted static bool array should return error")
- })
-
- // decoding test for *corrupted* static bool array
- // expected 8 elements for static bool array
- // encoded bytes have 1 byte more (0b00000000)
- // should throw error
- t.Run("corrupted static bool array decode", func(t *testing.T) {
- inputBase := []byte{0b01001011, 0b00000000}
- arrayType := makeStaticArrayType(boolType, 8)
- _, err := arrayType.Decode(inputBase)
- require.Error(t, err, "decoding corrupted static bool array should return error")
- })
-
- // decoding test for *corrupted* static uint array
- // expected 8 uint elements in static uint64[8] array
- // encoded bytes provide only 7 uint64 encoding
- // should throw error
- t.Run("static uint array decode", func(t *testing.T) {
- inputBase := []byte{
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 1,
- 0, 0, 0, 0, 0, 0, 0, 2,
- 0, 0, 0, 0, 0, 0, 0, 3,
- 0, 0, 0, 0, 0, 0, 0, 4,
- 0, 0, 0, 0, 0, 0, 0, 5,
- 0, 0, 0, 0, 0, 0, 0, 6,
- }
- uintTArray, err := TypeOf("uint64[8]")
- require.NoError(t, err, "make uint64 static array type should not return error")
- _, err = uintTArray.Decode(inputBase)
- require.Error(t, err, "corrupted uint64 static array decode should return error")
- })
-
- // decoding test for *corrupted* static uint array
- // expected 7 uint elements in static uint64[7] array
- // encoded bytes provide 8 uint64 encoding (one more uint64: 7)
- // should throw error
- t.Run("static uint array decode", func(t *testing.T) {
- inputBase := []byte{
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 1,
- 0, 0, 0, 0, 0, 0, 0, 2,
- 0, 0, 0, 0, 0, 0, 0, 3,
- 0, 0, 0, 0, 0, 0, 0, 4,
- 0, 0, 0, 0, 0, 0, 0, 5,
- 0, 0, 0, 0, 0, 0, 0, 6,
- 0, 0, 0, 0, 0, 0, 0, 7,
- }
- uintTArray, err := TypeOf("uint64[7]")
- require.NoError(t, err, "make uint64 static array type should not return error")
- _, err = uintTArray.Decode(inputBase)
- require.Error(t, err, "corrupted uint64 static array decode should return error")
- })
-
- // decoding test for *corrupted* dynamic bool array
- // expected 0x0A (10) bool elements in encoding head
- // encoded bytes provide only 8 bool elements
- // should throw error
- t.Run("corrupted dynamic bool array decode", func(t *testing.T) {
- inputBase := []byte{
- 0x00, 0x0A, 0b10101010,
- }
- dynamicT := makeDynamicArrayType(boolType)
- _, err := dynamicT.Decode(inputBase)
- require.Error(t, err, "decode corrupted dynamic array should return error")
- })
-
- // decoding test for *corrupted* dynamic bool array
- // expected 0x07 (7) bool elements in encoding head
- // encoded bytes provide 1 byte more (0b00000000)
- // should throw error
- t.Run("corrupted dynamic bool array decode", func(t *testing.T) {
- inputBase := []byte{
- 0x00, 0x07, 0b10101010, 0b00000000,
- }
- dynamicT := makeDynamicArrayType(boolType)
- _, err := dynamicT.Decode(inputBase)
- require.Error(t, err, "decode corrupted dynamic array should return error")
- })
-
- // decoding test for *corrupted* dynamic tuple value
- // expected type: (string, bool, bool, bool, bool, string)
- // expected value: ("ABC", T, F, T, F, "DEF")
- /*
- corrupted bytes:
- 0x00, 0x04 (corrupted: first string start at 4th byte, should be 5th)
- 0b10100000 (4 bool tuple element compacted together)
- 0x00, 0x0A (second string start at 10th byte)
- 0x00, 0x03 (first string byte length 3)
- byte('A'), byte('B'), byte('C') (first string encoded bytes)
- 0x00, 0x03 (second string byte length 3)
- byte('D'), byte('E'), byte('F') (second string encoded bytes)
- */
- // the result would be: first string have length 0x0A, 0x00
- // the length exceeds the segment it allocated: 0x0A, 0x00, 0x03, byte('A'), byte('B'), byte('C')
- // should throw error
- t.Run("corrupted dynamic tuple decoding", func(t *testing.T) {
- inputEncode := []byte{
- 0x00, 0x04, 0b10100000, 0x00, 0x0A,
- 0x00, 0x03, byte('A'), byte('B'), byte('C'),
- 0x00, 0x03, byte('D'), byte('E'), byte('F'),
- }
- tupleT, err := TypeOf("(string,bool,bool,bool,bool,string)")
- require.NoError(t, err, "make tuple type failure")
- _, err = tupleT.Decode(inputEncode)
- require.Error(t, err, "corrupted decoding dynamic tuple should return error")
- })
-
- // decoding test for *corrupted* tuple with static bool arrays
- // expected type: (bool[2], bool[2])
- // expected value: ({T, T}, {T, T})
- /*
- corrupted bytes test case 0:
- 0b11000000
- 0b11000000
- 0b00000000 <- corrupted byte, 1 byte more
-
- corrupted bytes test case 0:
- 0b11000000
- <- corrupted byte, 1 byte missing
- */
- t.Run("corrupted static bool array tuple decoding", func(t *testing.T) {
- expectedType, err := TypeOf("(bool[2],bool[2])")
- require.NoError(t, err, "make tuple type failure")
- encodedInput0 := []byte{
- 0b11000000,
- 0b11000000,
- 0b00000000,
- }
- _, err = expectedType.Decode(encodedInput0)
- require.Error(t, err, "decode corrupted tuple value should return error")
-
- encodedInput1 := []byte{
- 0b11000000,
- }
- _, err = expectedType.Decode(encodedInput1)
- require.Error(t, err, "decode corrupted tuple value should return error")
- })
-
- // decoding test for *corrupted* tuple with static and dynamic bool array
- // expected type: (bool[2], bool[])
- // expected value: ({T, T}, {T, T})
- /*
- corrupted bytes:
- 0b11000000 (first static bool array)
- 0x03 <- corrupted, missing 0x00 byte (second dynamic bool array starts at 3rd byte)
- 0x00, 0x02 (dynamic bool array length 2)
- 0b11000000 (second static bool array)
- */
- t.Run("corrupted static/dynamic bool array tuple decoding", func(t *testing.T) {
- encodedInput := []byte{
- 0b11000000,
- 0x03,
- 0x00, 0x02, 0b11000000,
- }
- tupleT, err := TypeOf("(bool[2],bool[])")
- require.NoError(t, err, "make tuple type failure")
- _, err = tupleT.Decode(encodedInput)
- require.Error(t, err, "decode corrupted tuple for static/dynamic bool array should return error")
- })
-
- // decoding test for *corrupted* tuple with dynamic bool array
- // expected type: (bool[], bool[])
- // expected value: ({}, {})
- /*
- corrupted bytes:
- 0x00, 0x04 (first dynamic bool array starts at 4th byte)
- 0x00, 0x07 <- corrupted, should be 0x06 (second dynamic bool array starts at 6th byte)
- 0x00, 0x00 (first dynamic bool array length 0)
- 0x00, 0x00 (second dynamic bool array length 0)
-
- first dynamic array starts at 0x04, segment is 0x00, 0x00, 0x00, 1 byte 0x00 more
- second dynamic array starts at 0x07, and only have 0x00 1 byte
- */
- // should return error
- t.Run("corrupted empty dynamic array tuple decoding", func(t *testing.T) {
- encodedInput := []byte{
- 0x00, 0x04, 0x00, 0x07,
- 0x00, 0x00, 0x00, 0x00,
- }
- tupleT, err := TypeOf("(bool[],bool[])")
- require.NoError(t, err, "make tuple type failure")
- _, err = tupleT.Decode(encodedInput)
- require.Error(t, err, "decode corrupted tuple for empty dynamic array should return error")
- })
-
- // decoding test for *corrupted* empty tuple
- // expected value: ()
- // corrupted input: 0xFF, should be empty byte
- // should return error
- t.Run("corrupted empty tuple decoding", func(t *testing.T) {
- encodedInput := []byte{0xFF}
- tupleT, err := TypeOf("()")
- require.NoError(t, err, "make tuple type failure")
- _, err = tupleT.Decode(encodedInput)
- require.Error(t, err, "decode corrupted empty tuple should return error")
- })
-}
-
-type testUnit struct {
- serializedType string
- value interface{}
-}
-
-func categorySelfRoundTripTest(t *testing.T, category []testUnit) {
- for _, testObj := range category {
- abiType, err := TypeOf(testObj.serializedType)
- require.NoError(t, err, "failure to deserialize type: "+testObj.serializedType)
- encodedValue, err := abiType.Encode(testObj.value)
- require.NoError(t, err,
- "failure to encode value %#v over type %s", testObj.value, testObj.serializedType,
- )
- actual, err := abiType.Decode(encodedValue)
- require.NoError(t, err,
- "failure to decode value %#v for type %s", encodedValue, testObj.serializedType,
- )
- require.Equal(t, testObj.value, actual,
- "decoded value %#v not equal to expected value %#v", actual, testObj.value,
- )
- jsonEncodedValue, err := abiType.MarshalToJSON(testObj.value)
- require.NoError(t, err,
- "failure to encode value %#v to JSON type", testObj.value,
- )
- jsonActual, err := abiType.UnmarshalFromJSON(jsonEncodedValue)
- require.NoError(t, err,
- "failure to decode JSON value %s back for type %s",
- string(jsonEncodedValue), testObj.serializedType,
- )
- require.Equal(t, testObj.value, jsonActual,
- "decode JSON value %s not equal to expected %s", jsonActual, testObj.value,
- )
- }
-}
-
-func addPrimitiveRandomValues(t *testing.T, pool *map[BaseType][]testUnit) {
- (*pool)[Uint] = make([]testUnit, uintTestCaseCount*uintEnd/uintStepLength)
- (*pool)[Ufixed] = make([]testUnit, ufixedPrecision*uintEnd/uintStepLength)
-
- uintIndex := 0
- ufixedIndex := 0
-
- for bitSize := uintBegin; bitSize <= uintEnd; bitSize += uintStepLength {
- max := new(big.Int).Lsh(big.NewInt(1), uint(bitSize))
-
- uintT, err := makeUintType(bitSize)
- require.NoError(t, err, "make uint type failure")
- uintTstr := uintT.String()
-
- for j := 0; j < uintTestCaseCount; j++ {
- randVal, err := rand.Int(rand.Reader, max)
- require.NoError(t, err, "generate random uint, should be no error")
-
- narrowest, err := castBigIntToNearestPrimitive(randVal, uint16(bitSize))
- require.NoError(t, err, "cast random uint to nearest primitive failure")
-
- (*pool)[Uint][uintIndex] = testUnit{serializedType: uintTstr, value: narrowest}
- uintIndex++
- }
-
- for precision := 1; precision <= ufixedPrecision; precision++ {
- randVal, err := rand.Int(rand.Reader, max)
- require.NoError(t, err, "generate random ufixed, should be no error")
-
- narrowest, err := castBigIntToNearestPrimitive(randVal, uint16(bitSize))
- require.NoError(t, err, "cast random uint to nearest primitive failure")
-
- ufixedT, err := makeUfixedType(bitSize, precision)
- require.NoError(t, err, "make ufixed type failure")
- ufixedTstr := ufixedT.String()
- (*pool)[Ufixed][ufixedIndex] = testUnit{serializedType: ufixedTstr, value: narrowest}
- ufixedIndex++
- }
- }
- categorySelfRoundTripTest(t, (*pool)[Uint])
- categorySelfRoundTripTest(t, (*pool)[Ufixed])
-
- (*pool)[Byte] = make([]testUnit, byteTestCaseCount)
- for i := 0; i < byteTestCaseCount; i++ {
- (*pool)[Byte][i] = testUnit{serializedType: byteType.String(), value: byte(i)}
- }
- categorySelfRoundTripTest(t, (*pool)[Byte])
-
- (*pool)[Bool] = make([]testUnit, boolTestCaseCount)
- (*pool)[Bool][0] = testUnit{serializedType: boolType.String(), value: false}
- (*pool)[Bool][1] = testUnit{serializedType: boolType.String(), value: true}
- categorySelfRoundTripTest(t, (*pool)[Bool])
-
- maxAddress := new(big.Int).Lsh(big.NewInt(1), addressByteSize<<3)
- (*pool)[Address] = make([]testUnit, addressTestCaseCount)
- for i := 0; i < addressTestCaseCount; i++ {
- randAddrVal, err := rand.Int(rand.Reader, maxAddress)
- require.NoError(t, err, "generate random value for address, should be no error")
- addrBytes := make([]byte, addressByteSize)
- randAddrVal.FillBytes(addrBytes)
- (*pool)[Address][i] = testUnit{serializedType: addressType.String(), value: addrBytes}
- }
- categorySelfRoundTripTest(t, (*pool)[Address])
-
- (*pool)[String] = make([]testUnit, stringTestCaseCount*stringTestCaseSpecLenCount)
- stringIndex := 0
- for length := 1; length <= stringTestCaseCount; length++ {
- for i := 0; i < stringTestCaseSpecLenCount; i++ {
- (*pool)[String][stringIndex] = testUnit{
- serializedType: stringType.String(),
- value: gobberish.GenerateString(length),
- }
- stringIndex++
- }
- }
- categorySelfRoundTripTest(t, (*pool)[String])
-}
-
-func takeSomeFromCategoryAndGenerateArray(
- t *testing.T, abiT BaseType, srtIndex int, takeNum uint16, pool *map[BaseType][]testUnit) {
-
- tempArray := make([]interface{}, takeNum)
- for i := 0; i < int(takeNum); i++ {
- index := srtIndex + i
- if index >= len((*pool)[abiT]) {
- index = srtIndex
- }
- tempArray[i] = (*pool)[abiT][index].value
- }
- tempT, err := TypeOf((*pool)[abiT][srtIndex].serializedType)
- require.NoError(t, err, "type in test uint cannot be deserialized")
- (*pool)[ArrayStatic] = append((*pool)[ArrayStatic], testUnit{
- serializedType: makeStaticArrayType(tempT, takeNum).String(),
- value: tempArray,
- })
- (*pool)[ArrayDynamic] = append((*pool)[ArrayDynamic], testUnit{
- serializedType: makeDynamicArrayType(tempT).String(),
- value: tempArray,
- })
-}
-
-func addArrayRandomValues(t *testing.T, pool *map[BaseType][]testUnit) {
- for intIndex := 0; intIndex < len((*pool)[Uint]); intIndex += uintTestCaseCount {
- takeSomeFromCategoryAndGenerateArray(t, Uint, intIndex, takeNum, pool)
- }
- takeSomeFromCategoryAndGenerateArray(t, Byte, 0, takeNum, pool)
- takeSomeFromCategoryAndGenerateArray(t, Address, 0, takeNum, pool)
- takeSomeFromCategoryAndGenerateArray(t, String, 0, takeNum, pool)
- takeSomeFromCategoryAndGenerateArray(t, Bool, 0, takeNum, pool)
-
- categorySelfRoundTripTest(t, (*pool)[ArrayStatic])
- categorySelfRoundTripTest(t, (*pool)[ArrayDynamic])
-}
-
-func addTupleRandomValues(t *testing.T, slotRange BaseType, pool *map[BaseType][]testUnit) {
- for i := 0; i < tupleTestCaseCount; i++ {
- tupleLenBig, err := rand.Int(rand.Reader, big.NewInt(tupleMaxLength))
- require.NoError(t, err, "generate random tuple length should not return error")
- tupleLen := tupleLenBig.Int64() + 1
- testUnits := make([]testUnit, tupleLen)
- for index := 0; index < int(tupleLen); index++ {
- tupleTypeIndexBig, err := rand.Int(rand.Reader, big.NewInt(int64(slotRange)+1))
- require.NoError(t, err, "generate random tuple element type index should not return error")
- tupleTypeIndex := BaseType(tupleTypeIndexBig.Int64())
- tupleElemChoiceRange := len((*pool)[tupleTypeIndex])
-
- tupleElemRangeIndexBig, err := rand.Int(rand.Reader, big.NewInt(int64(tupleElemChoiceRange)))
- require.NoError(t, err, "generate random tuple element index in test pool should not return error")
- tupleElemRangeIndex := tupleElemRangeIndexBig.Int64()
- tupleElem := (*pool)[tupleTypeIndex][tupleElemRangeIndex]
- testUnits[index] = tupleElem
- }
- elemValues := make([]interface{}, tupleLen)
- elemTypes := make([]Type, tupleLen)
- for index := 0; index < int(tupleLen); index++ {
- elemValues[index] = testUnits[index].value
- abiT, err := TypeOf(testUnits[index].serializedType)
- require.NoError(t, err, "deserialize type failure for tuple elements")
- elemTypes[index] = abiT
- }
- tupleT, err := MakeTupleType(elemTypes)
- require.NoError(t, err, "make tuple type failure")
- (*pool)[Tuple] = append((*pool)[Tuple], testUnit{
- serializedType: tupleT.String(),
- value: elemValues,
- })
- }
-}
-
-func TestRandomABIEncodeDecodeRoundTrip(t *testing.T) {
- partitiontest.PartitionTest(t)
- testValuePool := make(map[BaseType][]testUnit)
- addPrimitiveRandomValues(t, &testValuePool)
- addArrayRandomValues(t, &testValuePool)
- addTupleRandomValues(t, String, &testValuePool)
- addTupleRandomValues(t, Tuple, &testValuePool)
- categorySelfRoundTripTest(t, testValuePool[Tuple])
-}
-
-func TestParseArgJSONtoByteSlice(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- makeRepeatSlice := func(size int, value string) []string {
- slice := make([]string, size)
- for i := range slice {
- slice[i] = value
- }
- return slice
- }
-
- tests := []struct {
- argTypes []string
- jsonArgs []string
- expectedAppArgs [][]byte
- }{
- {
- argTypes: []string{},
- jsonArgs: []string{},
- expectedAppArgs: [][]byte{},
- },
- {
- argTypes: []string{"uint8"},
- jsonArgs: []string{"100"},
- expectedAppArgs: [][]byte{{100}},
- },
- {
- argTypes: []string{"uint8", "uint16"},
- jsonArgs: []string{"100", "65535"},
- expectedAppArgs: [][]byte{{100}, {255, 255}},
- },
- {
- argTypes: makeRepeatSlice(15, "string"),
- jsonArgs: []string{
- `"a"`,
- `"b"`,
- `"c"`,
- `"d"`,
- `"e"`,
- `"f"`,
- `"g"`,
- `"h"`,
- `"i"`,
- `"j"`,
- `"k"`,
- `"l"`,
- `"m"`,
- `"n"`,
- `"o"`,
- },
- expectedAppArgs: [][]byte{
- {00, 01, 97},
- {00, 01, 98},
- {00, 01, 99},
- {00, 01, 100},
- {00, 01, 101},
- {00, 01, 102},
- {00, 01, 103},
- {00, 01, 104},
- {00, 01, 105},
- {00, 01, 106},
- {00, 01, 107},
- {00, 01, 108},
- {00, 01, 109},
- {00, 01, 110},
- {00, 01, 111},
- },
- },
- {
- argTypes: makeRepeatSlice(16, "string"),
- jsonArgs: []string{
- `"a"`,
- `"b"`,
- `"c"`,
- `"d"`,
- `"e"`,
- `"f"`,
- `"g"`,
- `"h"`,
- `"i"`,
- `"j"`,
- `"k"`,
- `"l"`,
- `"m"`,
- `"n"`,
- `"o"`,
- `"p"`,
- },
- expectedAppArgs: [][]byte{
- {00, 01, 97},
- {00, 01, 98},
- {00, 01, 99},
- {00, 01, 100},
- {00, 01, 101},
- {00, 01, 102},
- {00, 01, 103},
- {00, 01, 104},
- {00, 01, 105},
- {00, 01, 106},
- {00, 01, 107},
- {00, 01, 108},
- {00, 01, 109},
- {00, 01, 110},
- {00, 04, 00, 07, 00, 01, 111, 00, 01, 112},
- },
- },
- }
-
- for i, test := range tests {
- t.Run(fmt.Sprintf("index=%d", i), func(t *testing.T) {
- applicationArgs := [][]byte{}
- err := ParseArgJSONtoByteSlice(test.argTypes, test.jsonArgs, &applicationArgs)
- require.NoError(t, err)
- require.Equal(t, test.expectedAppArgs, applicationArgs)
- })
- }
-}
-
-func TestParseMethodSignature(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- tests := []struct {
- signature string
- name string
- argTypes []string
- returnType string
- }{
- {
- signature: "add(uint8,uint16,pay,account,txn)uint32",
- name: "add",
- argTypes: []string{"uint8", "uint16", "pay", "account", "txn"},
- returnType: "uint32",
- },
- {
- signature: "nothing()void",
- name: "nothing",
- argTypes: []string{},
- returnType: "void",
- },
- {
- signature: "tupleArgs((uint8,uint128),account,(string,(bool,bool)))bool",
- name: "tupleArgs",
- argTypes: []string{"(uint8,uint128)", "account", "(string,(bool,bool))"},
- returnType: "bool",
- },
- {
- signature: "tupleReturn(uint64)(bool,bool,bool)",
- name: "tupleReturn",
- argTypes: []string{"uint64"},
- returnType: "(bool,bool,bool)",
- },
- {
- signature: "tupleArgsAndReturn((uint8,uint128),account,(string,(bool,bool)))(bool,bool,bool)",
- name: "tupleArgsAndReturn",
- argTypes: []string{"(uint8,uint128)", "account", "(string,(bool,bool))"},
- returnType: "(bool,bool,bool)",
- },
- }
-
- for _, test := range tests {
- t.Run(test.signature, func(t *testing.T) {
- name, argTypes, returnType, err := ParseMethodSignature(test.signature)
- require.NoError(t, err)
- require.Equal(t, test.name, name)
- require.Equal(t, test.argTypes, argTypes)
- require.Equal(t, test.returnType, returnType)
- })
- }
-}
-
-func TestInferToSlice(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- var emptySlice []int
- tests := []struct {
- toBeInferred interface{}
- length int
- }{
- {
- toBeInferred: []int{},
- length: 0,
- },
- {
- toBeInferred: make([]int, 0),
- length: 0,
- },
- {
- toBeInferred: emptySlice,
- length: 0,
- },
- {
- toBeInferred: [0]int{},
- length: 0,
- },
- {
- toBeInferred: [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32},
- length: 32,
- },
- {
- toBeInferred: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32},
- length: 32,
- },
- }
-
- for i, test := range tests {
- inferredSlice, err := inferToSlice(test.toBeInferred)
- require.NoError(t, err, "inferToSlice on testcase %d failed to successfully infer %v", i, test.toBeInferred)
- require.Equal(t, test.length, len(inferredSlice), "inferToSlice on testcase %d inferred different length, expected %d", i, test.length)
- }
-
- // one more testcase for totally nil (with no type information) is bad, should not pass the test
- _, err := inferToSlice(nil)
- require.EqualError(
- t, err,
- "cannot infer an interface value as a slice of interface element",
- "inferToSlice should return type inference error when passed in nil with unexpected Kind")
-
- // one moar testcase for wrong typed nil is bad, should not pass the test
- var nilPt *uint64 = nil
- _, err = inferToSlice(nilPt)
- require.EqualError(
- t, err,
- "cannot infer an interface value as a slice of interface element",
- "inferToSlice should return type inference error when passing argument type other than slice or array")
-}
diff --git a/data/abi/abi_json.go b/data/abi/abi_json.go
deleted file mode 100644
index a71823f0ce..0000000000
--- a/data/abi/abi_json.go
+++ /dev/null
@@ -1,291 +0,0 @@
-// Copyright (C) 2019-2022 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see .
-
-package abi
-
-import (
- "bytes"
- "crypto/sha512"
- "encoding/base32"
- "encoding/json"
- "fmt"
- "math/big"
-)
-
-// NOTE: discussion about go-algorand-sdk
-// https://github.com/algorand/go-algorand/pull/3375#issuecomment-1007536841
-
-var base32Encoder = base32.StdEncoding.WithPadding(base32.NoPadding)
-
-func addressCheckSum(addressBytes []byte) ([]byte, error) {
- if len(addressBytes) != addressByteSize {
- return nil, fmt.Errorf("address bytes should be of length 32")
- }
- hashed := sha512.Sum512_256(addressBytes[:])
- return hashed[addressByteSize-checksumByteSize:], nil
-}
-
-func castBigIntToNearestPrimitive(num *big.Int, bitSize uint16) (interface{}, error) {
- if num.BitLen() > int(bitSize) {
- return nil, fmt.Errorf("cast big int to nearest primitive failure: %v >= 2^%d", num, bitSize)
- } else if num.Sign() < 0 {
- return nil, fmt.Errorf("cannot cast big int to near primitive: %v < 0", num)
- }
-
- switch bitSize / 8 {
- case 1:
- return uint8(num.Uint64()), nil
- case 2:
- return uint16(num.Uint64()), nil
- case 3, 4:
- return uint32(num.Uint64()), nil
- case 5, 6, 7, 8:
- return num.Uint64(), nil
- default:
- return num, nil
- }
-}
-
-// MarshalToJSON convert golang value to JSON format from ABI type
-func (t Type) MarshalToJSON(value interface{}) ([]byte, error) {
- switch t.abiTypeID {
- case Uint:
- bytesUint, err := encodeInt(value, t.bitSize)
- if err != nil {
- return nil, err
- }
- return new(big.Int).SetBytes(bytesUint).MarshalJSON()
- case Ufixed:
- denom := new(big.Int).Exp(big.NewInt(10), big.NewInt(int64(t.precision)), nil)
- encodedUint, err := encodeInt(value, t.bitSize)
- if err != nil {
- return nil, err
- }
- return []byte(new(big.Rat).SetFrac(new(big.Int).SetBytes(encodedUint), denom).FloatString(int(t.precision))), nil
- case Bool:
- boolValue, ok := value.(bool)
- if !ok {
- return nil, fmt.Errorf("cannot infer to bool for marshal to JSON")
- }
- return json.Marshal(boolValue)
- case Byte:
- byteValue, ok := value.(byte)
- if !ok {
- return nil, fmt.Errorf("cannot infer to byte for marshal to JSON")
- }
- return json.Marshal(byteValue)
- case Address:
- var addressValueInternal []byte
- switch valueCasted := value.(type) {
- case []byte:
- if len(valueCasted) != addressByteSize {
- return nil, fmt.Errorf("address byte slice length not equal to 32 byte")
- }
- addressValueInternal = valueCasted
- case [addressByteSize]byte:
- copy(addressValueInternal[:], valueCasted[:])
- default:
- return nil, fmt.Errorf("cannot infer to byte slice/array for marshal to JSON")
- }
- checksum, err := addressCheckSum(addressValueInternal)
- if err != nil {
- return nil, err
- }
- addressValueInternal = append(addressValueInternal, checksum...)
- return json.Marshal(base32Encoder.EncodeToString(addressValueInternal))
- case ArrayStatic, ArrayDynamic:
- values, err := inferToSlice(value)
- if err != nil {
- return nil, err
- }
- if t.abiTypeID == ArrayStatic && int(t.staticLength) != len(values) {
- return nil, fmt.Errorf("length of slice %d != type specific length %d", len(values), t.staticLength)
- }
- if t.childTypes[0].abiTypeID == Byte {
- byteArr := make([]byte, len(values))
- for i := 0; i < len(values); i++ {
- tempByte, ok := values[i].(byte)
- if !ok {
- return nil, fmt.Errorf("cannot infer byte element from slice")
- }
- byteArr[i] = tempByte
- }
- return json.Marshal(byteArr)
- }
- rawMsgSlice := make([]json.RawMessage, len(values))
- for i := 0; i < len(values); i++ {
- rawMsgSlice[i], err = t.childTypes[0].MarshalToJSON(values[i])
- if err != nil {
- return nil, err
- }
- }
- return json.Marshal(rawMsgSlice)
- case String:
- stringVal, ok := value.(string)
- if !ok {
- return nil, fmt.Errorf("cannot infer to string for marshal to JSON")
- }
- return json.Marshal(stringVal)
- case Tuple:
- values, err := inferToSlice(value)
- if err != nil {
- return nil, err
- }
- if len(values) != int(t.staticLength) {
- return nil, fmt.Errorf("tuple element number != value slice length")
- }
- rawMsgSlice := make([]json.RawMessage, len(values))
- for i := 0; i < len(values); i++ {
- rawMsgSlice[i], err = t.childTypes[i].MarshalToJSON(values[i])
- if err != nil {
- return nil, err
- }
- }
- return json.Marshal(rawMsgSlice)
- default:
- return nil, fmt.Errorf("cannot infer ABI type for marshalling value to JSON")
- }
-}
-
-// UnmarshalFromJSON convert bytes to golang value following ABI type and encoding rules
-func (t Type) UnmarshalFromJSON(jsonEncoded []byte) (interface{}, error) {
- switch t.abiTypeID {
- case Uint:
- num := new(big.Int)
- if err := num.UnmarshalJSON(jsonEncoded); err != nil {
- return nil, fmt.Errorf("cannot cast JSON encoded (%s) to uint: %v", string(jsonEncoded), err)
- }
- return castBigIntToNearestPrimitive(num, t.bitSize)
- case Ufixed:
- floatTemp := new(big.Rat)
- if err := floatTemp.UnmarshalText(jsonEncoded); err != nil {
- return nil, fmt.Errorf("cannot cast JSON encoded (%s) to ufixed: %v", string(jsonEncoded), err)
- }
- denom := new(big.Int).Exp(big.NewInt(10), big.NewInt(int64(t.precision)), nil)
- denomRat := new(big.Rat).SetInt(denom)
- numeratorRat := new(big.Rat).Mul(denomRat, floatTemp)
- if !numeratorRat.IsInt() {
- return nil, fmt.Errorf("cannot cast JSON encoded (%s) to ufixed: precision out of range", string(jsonEncoded))
- }
- return castBigIntToNearestPrimitive(numeratorRat.Num(), t.bitSize)
- case Bool:
- var elem bool
- if err := json.Unmarshal(jsonEncoded, &elem); err != nil {
- return nil, fmt.Errorf("cannot cast JSON encoded (%s) to bool: %v", string(jsonEncoded), err)
- }
- return elem, nil
- case Byte:
- var elem byte
- if err := json.Unmarshal(jsonEncoded, &elem); err != nil {
- return nil, fmt.Errorf("cannot cast JSON encoded to byte: %v", err)
- }
- return elem, nil
- case Address:
- var addrStr string
- if err := json.Unmarshal(jsonEncoded, &addrStr); err != nil {
- return nil, fmt.Errorf("cannot cast JSON encoded to address string: %v", err)
- }
- decoded, err := base32Encoder.DecodeString(addrStr)
- if err != nil {
- return nil,
- fmt.Errorf("cannot cast JSON encoded address string (%s) to address: %v", addrStr, err)
- }
- if len(decoded) != addressByteSize+checksumByteSize {
- return nil,
- fmt.Errorf(
- "cannot cast JSON encoded address string (%s) to address: "+
- "decoded byte length should equal to 36 with address and checksum",
- string(jsonEncoded),
- )
- }
- checksum, err := addressCheckSum(decoded[:addressByteSize])
- if err != nil {
- return nil, err
- }
- if !bytes.Equal(checksum, decoded[addressByteSize:]) {
- return nil, fmt.Errorf("cannot cast JSON encoded address string (%s) to address: decoded checksum unmatch", addrStr)
- }
- return decoded[:addressByteSize], nil
- case ArrayStatic, ArrayDynamic:
- if t.childTypes[0].abiTypeID == Byte && bytes.HasPrefix(jsonEncoded, []byte{'"'}) {
- var byteArr []byte
- err := json.Unmarshal(jsonEncoded, &byteArr)
- if err != nil {
- return nil, fmt.Errorf("cannot cast JSON encoded (%s) to bytes: %v", string(jsonEncoded), err)
- }
- if t.abiTypeID == ArrayStatic && len(byteArr) != int(t.staticLength) {
- return nil, fmt.Errorf("length of slice %d != type specific length %d", len(byteArr), t.staticLength)
- }
- outInterface := make([]interface{}, len(byteArr))
- for i := 0; i < len(byteArr); i++ {
- outInterface[i] = byteArr[i]
- }
- return outInterface, nil
- }
- var elems []json.RawMessage
- if err := json.Unmarshal(jsonEncoded, &elems); err != nil {
- return nil, fmt.Errorf("cannot cast JSON encoded (%s) to array: %v", string(jsonEncoded), err)
- }
- if t.abiTypeID == ArrayStatic && len(elems) != int(t.staticLength) {
- return nil, fmt.Errorf("JSON array element number != ABI array elem number")
- }
- values := make([]interface{}, len(elems))
- for i := 0; i < len(elems); i++ {
- tempValue, err := t.childTypes[0].UnmarshalFromJSON(elems[i])
- if err != nil {
- return nil, err
- }
- values[i] = tempValue
- }
- return values, nil
- case String:
- stringEncoded := string(jsonEncoded)
- if bytes.HasPrefix(jsonEncoded, []byte{'"'}) {
- var stringVar string
- if err := json.Unmarshal(jsonEncoded, &stringVar); err != nil {
- return nil, fmt.Errorf("cannot cast JSON encoded (%s) to string: %v", stringEncoded, err)
- }
- return stringVar, nil
- } else if bytes.HasPrefix(jsonEncoded, []byte{'['}) {
- var elems []byte
- if err := json.Unmarshal(jsonEncoded, &elems); err != nil {
- return nil, fmt.Errorf("cannot cast JSON encoded (%s) to string: %v", stringEncoded, err)
- }
- return string(elems), nil
- } else {
- return nil, fmt.Errorf("cannot cast JSON encoded (%s) to string", stringEncoded)
- }
- case Tuple:
- var elems []json.RawMessage
- if err := json.Unmarshal(jsonEncoded, &elems); err != nil {
- return nil, fmt.Errorf("cannot cast JSON encoded (%s) to array for tuple: %v", string(jsonEncoded), err)
- }
- if len(elems) != int(t.staticLength) {
- return nil, fmt.Errorf("JSON array element number != ABI tuple elem number")
- }
- values := make([]interface{}, len(elems))
- for i := 0; i < len(elems); i++ {
- tempValue, err := t.childTypes[i].UnmarshalFromJSON(elems[i])
- if err != nil {
- return nil, err
- }
- values[i] = tempValue
- }
- return values, nil
- default:
- return nil, fmt.Errorf("cannot cast JSON encoded %s to ABI encoding stuff", string(jsonEncoded))
- }
-}
diff --git a/data/abi/abi_json_test.go b/data/abi/abi_json_test.go
deleted file mode 100644
index 49083fdeaa..0000000000
--- a/data/abi/abi_json_test.go
+++ /dev/null
@@ -1,150 +0,0 @@
-// Copyright (C) 2019-2022 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see .
-
-package abi
-
-import (
- "crypto/rand"
- "math/big"
- "testing"
-
- "github.com/algorand/go-algorand/data/basics"
- "github.com/algorand/go-algorand/test/partitiontest"
- "github.com/stretchr/testify/require"
-)
-
-func TestRandomAddressEquality(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- upperLimit := new(big.Int).Lsh(big.NewInt(1), addressByteSize<<3)
- var addrBasics basics.Address
- var addrABI []byte = make([]byte, addressByteSize)
-
- for testCaseIndex := 0; testCaseIndex < addressTestCaseCount; testCaseIndex++ {
- randomAddrInt, err := rand.Int(rand.Reader, upperLimit)
- require.NoError(t, err, "cryptographic random int init fail")
-
- randomAddrInt.FillBytes(addrBasics[:])
- randomAddrInt.FillBytes(addrABI)
-
- checkSumBasics := addrBasics.GetChecksum()
- checkSumABI, err := addressCheckSum(addrABI)
- require.NoError(t, err, "ABI compute checksum for address slice failed")
-
- require.Equal(t, checkSumBasics, checkSumABI,
- "basics.Address computed checksum %v not equal to data.abi computed checksum %v",
- )
- }
-}
-
-func TestJSONtoInterfaceValid(t *testing.T) {
- partitiontest.PartitionTest(t)
- var testCases = []struct {
- input string
- typeStr string
- expected interface{}
- }{
- {
- input: `[true, [0, 1, 2], 17]`,
- typeStr: `(bool,byte[],uint64)`,
- expected: []interface{}{
- true,
- []interface{}{byte(0), byte(1), byte(2)},
- uint64(17),
- },
- },
- {
- input: `[true, "AAEC", 17]`,
- typeStr: `(bool,byte[],uint64)`,
- expected: []interface{}{
- true,
- []interface{}{byte(0), byte(1), byte(2)},
- uint64(17),
- },
- },
- {
- input: `"AQEEBQEE"`,
- typeStr: `byte[6]`,
- expected: []interface{}{byte(1), byte(1), byte(4), byte(5), byte(1), byte(4)},
- },
- {
- input: `[[0, [true, false], "utf-8"], [18446744073709551615, [false, true], "pistachio"]]`,
- typeStr: `(uint64,bool[2],string)[]`,
- expected: []interface{}{
- []interface{}{uint64(0), []interface{}{true, false}, "utf-8"},
- []interface{}{^uint64(0), []interface{}{false, true}, "pistachio"},
- },
- },
- {
- input: `[]`,
- typeStr: `(uint64,bool[2],string)[]`,
- expected: []interface{}{},
- },
- {
- input: "[]",
- typeStr: "()",
- expected: []interface{}{},
- },
- {
- input: "[65, 66, 67]",
- typeStr: "string",
- expected: "ABC",
- },
- {
- input: "[]",
- typeStr: "string",
- expected: "",
- },
- {
- input: "123.456",
- typeStr: "ufixed64x3",
- expected: uint64(123456),
- },
- {
- input: `"optin"`,
- typeStr: "string",
- expected: "optin",
- },
- {
- input: `"AAEC"`,
- typeStr: "byte[3]",
- expected: []interface{}{byte(0), byte(1), byte(2)},
- },
- {
- input: `["uwu",["AAEC",12.34]]`,
- typeStr: "(string,(byte[3],ufixed64x3))",
- expected: []interface{}{"uwu", []interface{}{[]interface{}{byte(0), byte(1), byte(2)}, uint64(12340)}},
- },
- {
- input: `[399,"should pass",[true,false,false,true]]`,
- typeStr: "(uint64,string,bool[])",
- expected: []interface{}{uint64(399), "should pass", []interface{}{true, false, false, true}},
- },
- }
-
- for _, testCase := range testCases {
- abiT, err := TypeOf(testCase.typeStr)
- require.NoError(t, err, "fail to construct ABI type (%s): %v", testCase.typeStr, err)
- res, err := abiT.UnmarshalFromJSON([]byte(testCase.input))
- require.NoError(t, err, "fail to unmarshal JSON to interface: (%s): %v", testCase.input, err)
- require.Equal(t, testCase.expected, res, "%v not matching with expected value %v", res, testCase.expected)
- resEncoded, err := abiT.Encode(res)
- require.NoError(t, err, "fail to encode %v to ABI bytes: %v", res, err)
- resDecoded, err := abiT.Decode(resEncoded)
- require.NoError(t, err, "fail to decode ABI bytes of %v: %v", res, err)
- require.Equal(t, res, resDecoded, "ABI encode-decode round trip: %v not match with expected %v", resDecoded, res)
- }
-}
diff --git a/data/abi/abi_type.go b/data/abi/abi_type.go
deleted file mode 100644
index aa4e0b75af..0000000000
--- a/data/abi/abi_type.go
+++ /dev/null
@@ -1,498 +0,0 @@
-// Copyright (C) 2019-2022 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see .
-
-package abi
-
-import (
- "fmt"
- "math"
- "regexp"
- "strconv"
- "strings"
-)
-
-/*
- ABI-Types: uint: An N-bit unsigned integer (8 <= N <= 512 and N % 8 = 0).
- | byte (alias for uint8)
- | ufixed x (8 <= N <= 512, N % 8 = 0, and 0 < M <= 160)
- | bool
- | address (alias for byte[32])
- | []
- | []
- | string
- | (T1, ..., Tn)
-*/
-
-// BaseType is an type-alias for uint32. A BaseType value indicates the type of an ABI value.
-type BaseType uint32
-
-const (
- // Uint is the index (0) for `Uint` type in ABI encoding.
- Uint BaseType = iota
- // Byte is the index (1) for `Byte` type in ABI encoding.
- Byte
- // Ufixed is the index (2) for `UFixed` type in ABI encoding.
- Ufixed
- // Bool is the index (3) for `Bool` type in ABI encoding.
- Bool
- // ArrayStatic is the index (4) for static length array ([length]) type in ABI encoding.
- ArrayStatic
- // Address is the index (5) for `Address` type in ABI encoding (an type alias of Byte[32]).
- Address
- // ArrayDynamic is the index (6) for dynamic length array ([]) type in ABI encoding.
- ArrayDynamic
- // String is the index (7) for `String` type in ABI encoding (an type alias of Byte[]).
- String
- // Tuple is the index (8) for tuple `(, ..., )` in ABI encoding.
- Tuple
-)
-
-const (
- addressByteSize = 32
- checksumByteSize = 4
- singleByteSize = 1
- singleBoolSize = 1
- lengthEncodeByteSize = 2
- abiEncodingLengthLimit = 1 << 16
-)
-
-// Type is the struct that stores information about an ABI value's type.
-type Type struct {
- abiTypeID BaseType
- childTypes []Type
-
- // only can be applied to `uint` bitSize or `ufixed` bitSize
- bitSize uint16
- // only can be applied to `ufixed` precision
- precision uint16
-
- // length for static array / tuple
- /*
- by ABI spec, len over binary array returns number of bytes
- the type is uint16, which allows for only length in [0, 2^16 - 1]
- representation of static length can only be constrained in uint16 type
- */
- // NOTE may want to change back to uint32/uint64
- staticLength uint16
-}
-
-// String serialize an ABI Type to a string in ABI encoding.
-func (t Type) String() string {
- switch t.abiTypeID {
- case Uint:
- return fmt.Sprintf("uint%d", t.bitSize)
- case Byte:
- return "byte"
- case Ufixed:
- return fmt.Sprintf("ufixed%dx%d", t.bitSize, t.precision)
- case Bool:
- return "bool"
- case ArrayStatic:
- return fmt.Sprintf("%s[%d]", t.childTypes[0].String(), t.staticLength)
- case Address:
- return "address"
- case ArrayDynamic:
- return t.childTypes[0].String() + "[]"
- case String:
- return "string"
- case Tuple:
- typeStrings := make([]string, len(t.childTypes))
- for i := 0; i < len(t.childTypes); i++ {
- typeStrings[i] = t.childTypes[i].String()
- }
- return "(" + strings.Join(typeStrings, ",") + ")"
- default:
- panic("Type Serialization Error, fail to infer from abiTypeID (bruh you shouldn't be here)")
- }
-}
-
-var staticArrayRegexp = regexp.MustCompile(`^([a-z\d\[\](),]+)\[([1-9][\d]*)]$`)
-var ufixedRegexp = regexp.MustCompile(`^ufixed([1-9][\d]*)x([1-9][\d]*)$`)
-
-// TypeOf parses an ABI type string.
-// For example: `TypeOf("(uint64,byte[])")`
-func TypeOf(str string) (Type, error) {
- switch {
- case strings.HasSuffix(str, "[]"):
- arrayArgType, err := TypeOf(str[:len(str)-2])
- if err != nil {
- return Type{}, err
- }
- return makeDynamicArrayType(arrayArgType), nil
- case strings.HasSuffix(str, "]"):
- stringMatches := staticArrayRegexp.FindStringSubmatch(str)
- // match the string itself, array element type, then array length
- if len(stringMatches) != 3 {
- return Type{}, fmt.Errorf(`static array ill formated: "%s"`, str)
- }
- // guaranteed that the length of array is existing
- arrayLengthStr := stringMatches[2]
- // allowing only decimal static array length, with limit size to 2^16 - 1
- arrayLength, err := strconv.ParseUint(arrayLengthStr, 10, 16)
- if err != nil {
- return Type{}, err
- }
- // parse the array element type
- arrayType, err := TypeOf(stringMatches[1])
- if err != nil {
- return Type{}, err
- }
- return makeStaticArrayType(arrayType, uint16(arrayLength)), nil
- case strings.HasPrefix(str, "uint"):
- typeSize, err := strconv.ParseUint(str[4:], 10, 16)
- if err != nil {
- return Type{}, fmt.Errorf(`ill formed uint type: "%s"`, str)
- }
- return makeUintType(int(typeSize))
- case str == "byte":
- return byteType, nil
- case strings.HasPrefix(str, "ufixed"):
- stringMatches := ufixedRegexp.FindStringSubmatch(str)
- // match string itself, then type-bitSize, and type-precision
- if len(stringMatches) != 3 {
- return Type{}, fmt.Errorf(`ill formed ufixed type: "%s"`, str)
- }
- // guaranteed that there are 2 uint strings in ufixed string
- ufixedSize, err := strconv.ParseUint(stringMatches[1], 10, 16)
- if err != nil {
- return Type{}, err
- }
- ufixedPrecision, err := strconv.ParseUint(stringMatches[2], 10, 16)
- if err != nil {
- return Type{}, err
- }
- return makeUfixedType(int(ufixedSize), int(ufixedPrecision))
- case str == "bool":
- return boolType, nil
- case str == "address":
- return addressType, nil
- case str == "string":
- return stringType, nil
- case len(str) >= 2 && str[0] == '(' && str[len(str)-1] == ')':
- tupleContent, err := parseTupleContent(str[1 : len(str)-1])
- if err != nil {
- return Type{}, err
- }
- tupleTypes := make([]Type, len(tupleContent))
- for i := 0; i < len(tupleContent); i++ {
- ti, err := TypeOf(tupleContent[i])
- if err != nil {
- return Type{}, err
- }
- tupleTypes[i] = ti
- }
- return MakeTupleType(tupleTypes)
- default:
- return Type{}, fmt.Errorf(`cannot convert the string "%s" to an ABI type`, str)
- }
-}
-
-// segment keeps track of the start and end of a segment in a string.
-type segment struct{ left, right int }
-
-// parseTupleContent splits an ABI encoded string for tuple type into multiple sub-strings.
-// Each sub-string represents a content type of the tuple type.
-// The argument str is the content between parentheses of tuple, i.e.
-// (...... str ......)
-// ^ ^
-func parseTupleContent(str string) ([]string, error) {
- // if the tuple type content is empty (which is also allowed)
- // just return the empty string list
- if len(str) == 0 {
- return []string{}, nil
- }
-
- // the following 2 checks want to make sure input string can be separated by comma
- // with form: "...substr_0,...substr_1,...,...substr_k"
-
- // str should noe have leading/tailing comma
- if strings.HasSuffix(str, ",") || strings.HasPrefix(str, ",") {
- return []string{}, fmt.Errorf("parsing error: tuple content should not start with comma")
- }
-
- // str should not have consecutive commas contained
- if strings.Contains(str, ",,") {
- return []string{}, fmt.Errorf("no consecutive commas")
- }
-
- var parenSegmentRecord = make([]segment, 0)
- var stack []int
-
- // get the most exterior parentheses segment (not overlapped by other parentheses)
- // illustration: "*****,(*****),*****" => ["*****", "(*****)", "*****"]
- // once iterate to left paren (, stack up by 1 in stack
- // iterate to right paren ), pop 1 in stack
- // if iterate to right paren ) with stack height 0, find a parenthesis segment "(******)"
- for index, chr := range str {
- if chr == '(' {
- stack = append(stack, index)
- } else if chr == ')' {
- if len(stack) == 0 {
- return []string{}, fmt.Errorf("unpaired parentheses: %s", str)
- }
- leftParenIndex := stack[len(stack)-1]
- stack = stack[:len(stack)-1]
- if len(stack) == 0 {
- parenSegmentRecord = append(parenSegmentRecord, segment{
- left: leftParenIndex,
- right: index,
- })
- }
- }
- }
- if len(stack) != 0 {
- return []string{}, fmt.Errorf("unpaired parentheses: %s", str)
- }
-
- // take out tuple-formed type str in tuple argument
- strCopied := str
- for i := len(parenSegmentRecord) - 1; i >= 0; i-- {
- parenSeg := parenSegmentRecord[i]
- strCopied = strCopied[:parenSeg.left] + strCopied[parenSeg.right+1:]
- }
-
- // split the string without parenthesis segments
- tupleStrSegs := strings.Split(strCopied, ",")
-
- // the empty strings are placeholders for parenthesis segments
- // put the parenthesis segments back into segment list
- parenSegCount := 0
- for index, segStr := range tupleStrSegs {
- if segStr == "" {
- parenSeg := parenSegmentRecord[parenSegCount]
- tupleStrSegs[index] = str[parenSeg.left : parenSeg.right+1]
- parenSegCount++
- }
- }
-
- return tupleStrSegs, nil
-}
-
-// makeUintType makes `Uint` ABI type by taking a type bitSize argument.
-// The range of type bitSize is [8, 512] and type bitSize % 8 == 0.
-func makeUintType(typeSize int) (Type, error) {
- if typeSize%8 != 0 || typeSize < 8 || typeSize > 512 {
- return Type{}, fmt.Errorf("unsupported uint type bitSize: %d", typeSize)
- }
- return Type{
- abiTypeID: Uint,
- bitSize: uint16(typeSize),
- }, nil
-}
-
-var (
- // byteType is ABI type constant for byte
- byteType = Type{abiTypeID: Byte}
-
- // boolType is ABI type constant for bool
- boolType = Type{abiTypeID: Bool}
-
- // addressType is ABI type constant for address
- addressType = Type{abiTypeID: Address}
-
- // stringType is ABI type constant for string
- stringType = Type{abiTypeID: String}
-)
-
-// makeUfixedType makes `UFixed` ABI type by taking type bitSize and type precision as arguments.
-// The range of type bitSize is [8, 512] and type bitSize % 8 == 0.
-// The range of type precision is [1, 160].
-func makeUfixedType(typeSize int, typePrecision int) (Type, error) {
- if typeSize%8 != 0 || typeSize < 8 || typeSize > 512 {
- return Type{}, fmt.Errorf("unsupported ufixed type bitSize: %d", typeSize)
- }
- if typePrecision > 160 || typePrecision < 1 {
- return Type{}, fmt.Errorf("unsupported ufixed type precision: %d", typePrecision)
- }
- return Type{
- abiTypeID: Ufixed,
- bitSize: uint16(typeSize),
- precision: uint16(typePrecision),
- }, nil
-}
-
-// makeStaticArrayType makes static length array ABI type by taking
-// array element type and array length as arguments.
-func makeStaticArrayType(argumentType Type, arrayLength uint16) Type {
- return Type{
- abiTypeID: ArrayStatic,
- childTypes: []Type{argumentType},
- staticLength: arrayLength,
- }
-}
-
-// makeDynamicArrayType makes dynamic length array by taking array element type as argument.
-func makeDynamicArrayType(argumentType Type) Type {
- return Type{
- abiTypeID: ArrayDynamic,
- childTypes: []Type{argumentType},
- }
-}
-
-// MakeTupleType makes tuple ABI type by taking an array of tuple element types as argument.
-func MakeTupleType(argumentTypes []Type) (Type, error) {
- if len(argumentTypes) >= math.MaxUint16 {
- return Type{}, fmt.Errorf("tuple type child type number larger than maximum uint16 error")
- }
- return Type{
- abiTypeID: Tuple,
- childTypes: argumentTypes,
- staticLength: uint16(len(argumentTypes)),
- }, nil
-}
-
-// Equal method decides the equality of two types: t == t0.
-func (t Type) Equal(t0 Type) bool {
- if t.abiTypeID != t0.abiTypeID {
- return false
- }
- if t.precision != t0.precision || t.bitSize != t0.bitSize {
- return false
- }
- if t.staticLength != t0.staticLength {
- return false
- }
- if len(t.childTypes) != len(t0.childTypes) {
- return false
- }
- for i := 0; i < len(t.childTypes); i++ {
- if !t.childTypes[i].Equal(t0.childTypes[i]) {
- return false
- }
- }
-
- return true
-}
-
-// IsDynamic method decides if an ABI type is dynamic or static.
-func (t Type) IsDynamic() bool {
- switch t.abiTypeID {
- case ArrayDynamic, String:
- return true
- default:
- for _, childT := range t.childTypes {
- if childT.IsDynamic() {
- return true
- }
- }
- return false
- }
-}
-
-// Assume that the current index on the list of type is an ABI bool type.
-// It returns the difference between the current index and the index of the furthest consecutive Bool type.
-func findBoolLR(typeList []Type, index int, delta int) int {
- until := 0
- for {
- curr := index + delta*until
- if typeList[curr].abiTypeID == Bool {
- if curr != len(typeList)-1 && delta > 0 {
- until++
- } else if curr > 0 && delta < 0 {
- until++
- } else {
- break
- }
- } else {
- until--
- break
- }
- }
- return until
-}
-
-// ByteLen method calculates the byte length of a static ABI type.
-func (t Type) ByteLen() (int, error) {
- switch t.abiTypeID {
- case Address:
- return addressByteSize, nil
- case Byte:
- return singleByteSize, nil
- case Uint, Ufixed:
- return int(t.bitSize / 8), nil
- case Bool:
- return singleBoolSize, nil
- case ArrayStatic:
- if t.childTypes[0].abiTypeID == Bool {
- byteLen := int(t.staticLength+7) / 8
- return byteLen, nil
- }
- elemByteLen, err := t.childTypes[0].ByteLen()
- if err != nil {
- return -1, err
- }
- return int(t.staticLength) * elemByteLen, nil
- case Tuple:
- size := 0
- for i := 0; i < len(t.childTypes); i++ {
- if t.childTypes[i].abiTypeID == Bool {
- // search after bool
- after := findBoolLR(t.childTypes, i, 1)
- // shift the index
- i += after
- // get number of bool
- boolNum := after + 1
- size += (boolNum + 7) / 8
- } else {
- childByteSize, err := t.childTypes[i].ByteLen()
- if err != nil {
- return -1, err
- }
- size += childByteSize
- }
- }
- return size, nil
- default:
- return -1, fmt.Errorf("%s is a dynamic type", t.String())
- }
-}
-
-// AnyTransactionType is the ABI argument type string for a nonspecific transaction argument
-const AnyTransactionType = "txn"
-
-// IsTransactionType checks if a type string represents a transaction type
-// argument, such as "txn", "pay", "keyreg", etc.
-func IsTransactionType(s string) bool {
- switch s {
- case AnyTransactionType, "pay", "keyreg", "acfg", "axfer", "afrz", "appl":
- return true
- default:
- return false
- }
-}
-
-// AccountReferenceType is the ABI argument type string for account references
-const AccountReferenceType = "account"
-
-// AssetReferenceType is the ABI argument type string for asset references
-const AssetReferenceType = "asset"
-
-// ApplicationReferenceType is the ABI argument type string for application references
-const ApplicationReferenceType = "application"
-
-// IsReferenceType checks if a type string represents a reference type argument,
-// such as "account", "asset", or "application".
-func IsReferenceType(s string) bool {
- switch s {
- case AccountReferenceType, AssetReferenceType, ApplicationReferenceType:
- return true
- default:
- return false
- }
-}
-
-// VoidReturnType is the ABI return type string for a method that does not return any value
-const VoidReturnType = "void"
diff --git a/data/abi/abi_type_test.go b/data/abi/abi_type_test.go
deleted file mode 100644
index fb7c7e9025..0000000000
--- a/data/abi/abi_type_test.go
+++ /dev/null
@@ -1,613 +0,0 @@
-// Copyright (C) 2019-2022 Algorand, Inc.
-// This file is part of go-algorand
-//
-// go-algorand is free software: you can redistribute it and/or modify
-// it under the terms of the GNU Affero General Public License as
-// published by the Free Software Foundation, either version 3 of the
-// License, or (at your option) any later version.
-//
-// go-algorand is distributed in the hope that it will be useful,
-// but WITHOUT ANY WARRANTY; without even the implied warranty of
-// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-// GNU Affero General Public License for more details.
-//
-// You should have received a copy of the GNU Affero General Public License
-// along with go-algorand. If not, see .
-
-package abi
-
-import (
- "fmt"
- "math/rand"
- "strconv"
- "strings"
- "testing"
- "time"
-
- "github.com/algorand/go-algorand/test/partitiontest"
- "github.com/stretchr/testify/require"
-)
-
-func TestMakeTypeValid(t *testing.T) {
- partitiontest.PartitionTest(t)
- // uint
- for i := 8; i <= 512; i += 8 {
- uintType, err := makeUintType(i)
- require.NoError(t, err, "make uint type in valid space should not return error")
- expected := "uint" + strconv.Itoa(i)
- actual := uintType.String()
- require.Equal(t, expected, actual, "makeUintType: expected %s, actual %s", expected, actual)
- }
- // ufixed
- for i := 8; i <= 512; i += 8 {
- for j := 1; j <= 160; j++ {
- ufixedType, err := makeUfixedType(i, j)
- require.NoError(t, err, "make ufixed type in valid space should not return error")
- expected := "ufixed" + strconv.Itoa(i) + "x" + strconv.Itoa(j)
- actual := ufixedType.String()
- require.Equal(t, expected, actual,
- "TypeOf ufixed error: expected %s, actual %s", expected, actual)
- }
- }
- // bool/strings/address/byte + dynamic/static array + tuple
- var testcases = []struct {
- input Type
- testType string
- expected string
- }{
- {input: boolType, testType: "bool", expected: "bool"},
- {input: stringType, testType: "string", expected: "string"},
- {input: addressType, testType: "address", expected: "address"},
- {input: byteType, testType: "byte", expected: "byte"},
- // dynamic array
- {
- input: makeDynamicArrayType(
- Type{
- abiTypeID: Uint,
- bitSize: uint16(32),
- },
- ),
- testType: "dynamic array",
- expected: "uint32[]",
- },
- {
- input: makeDynamicArrayType(
- makeDynamicArrayType(
- byteType,
- ),
- ),
- testType: "dynamic array",
- expected: "byte[][]",
- },
- {
- input: makeStaticArrayType(
- Type{
- abiTypeID: Ufixed,
- bitSize: uint16(128),
- precision: uint16(10),
- },
- uint16(100),
- ),
- testType: "static array",
- expected: "ufixed128x10[100]",
- },
- {
- input: makeStaticArrayType(
- makeStaticArrayType(
- boolType,
- uint16(128),
- ),
- uint16(256),
- ),
- testType: "static array",
- expected: "bool[128][256]",
- },
- // tuple type
- {
- input: Type{
- abiTypeID: Tuple,
- childTypes: []Type{
- {
- abiTypeID: Uint,
- bitSize: uint16(32),
- },
- {
- abiTypeID: Tuple,
- childTypes: []Type{
- addressType,
- byteType,
- makeStaticArrayType(boolType, uint16(10)),
- makeDynamicArrayType(
- Type{
- abiTypeID: Ufixed,
- bitSize: uint16(256),
- precision: uint16(10),
- },
- ),
- },
- staticLength: 4,
- },
- makeDynamicArrayType(byteType),
- },
- staticLength: 3,
- },
- testType: "tuple type",
- expected: "(uint32,(address,byte,bool[10],ufixed256x10[]),byte[])",
- },
- }
- for _, testcase := range testcases {
- t.Run(fmt.Sprintf("MakeType test %s", testcase.testType), func(t *testing.T) {
- actual := testcase.input.String()
- require.Equal(t, testcase.expected, actual,
- "MakeType: expected %s, actual %s", testcase.expected, actual)
- })
- }
-}
-
-func TestMakeTypeInvalid(t *testing.T) {
- partitiontest.PartitionTest(t)
- // uint
- for i := 0; i <= 1000; i++ {
- randInput := rand.Uint32() % (1 << 16)
- for randInput%8 == 0 && randInput <= 512 && randInput >= 8 {
- randInput = rand.Uint32() % (1 << 16)
- }
- // note: if a var mod 8 = 0 (or not) in uint32, then it should mod 8 = 0 (or not) in uint16.
- _, err := makeUintType(int(randInput))
- require.Error(t, err, "makeUintType: should throw error on bitSize input %d", uint16(randInput))
- }
- // ufixed
- for i := 0; i <= 10000; i++ {
- randSize := rand.Uint64() % (1 << 16)
- for randSize%8 == 0 && randSize <= 512 && randSize >= 8 {
- randSize = rand.Uint64() % (1 << 16)
- }
- randPrecision := rand.Uint32()
- for randPrecision >= 1 && randPrecision <= 160 {
- randPrecision = rand.Uint32()
- }
- _, err := makeUfixedType(int(randSize), int(randPrecision))
- require.Error(t, err, "makeUfixedType: should throw error on bitSize %d, precision %d", randSize, randPrecision)
- }
-}
-
-func TestTypeFromStringValid(t *testing.T) {
- partitiontest.PartitionTest(t)
- // uint
- for i := 8; i <= 512; i += 8 {
- expected, err := makeUintType(i)
- require.NoError(t, err, "make uint type in valid space should not return error")
- actual, err := TypeOf(expected.String())
- require.NoError(t, err, "TypeOf: uint parsing error: %s", expected.String())
- require.Equal(t, expected, actual,
- "TypeOf: expected %s, actual %s", expected.String(), actual.String())
- }
- // ufixed
- for i := 8; i <= 512; i += 8 {
- for j := 1; j <= 160; j++ {
- expected, err := makeUfixedType(i, j)
- require.NoError(t, err, "make ufixed type in valid space should not return error")
- actual, err := TypeOf("ufixed" + strconv.Itoa(i) + "x" + strconv.Itoa(j))
- require.NoError(t, err, "TypeOf ufixed parsing error: %s", expected.String())
- require.Equal(t, expected, actual,
- "TypeOf ufixed: expected %s, actual %s", expected.String(), actual.String())
- }
- }
- var testcases = []struct {
- input string
- testType string
- expected Type
- }{
- {input: boolType.String(), testType: "bool", expected: boolType},
- {input: stringType.String(), testType: "string", expected: stringType},
- {input: addressType.String(), testType: "address", expected: addressType},
- {input: byteType.String(), testType: "byte", expected: byteType},
- {
- input: "uint256[]",
- testType: "dynamic array",
- expected: makeDynamicArrayType(Type{abiTypeID: Uint, bitSize: 256}),
- },
- {
- input: "ufixed256x64[]",
- testType: "dynamic array",
- expected: makeDynamicArrayType(
- Type{
- abiTypeID: Ufixed,
- bitSize: 256,
- precision: 64,
- },
- ),
- },
- {
- input: "byte[][][][]",
- testType: "dynamic array",
- expected: makeDynamicArrayType(
- makeDynamicArrayType(
- makeDynamicArrayType(
- makeDynamicArrayType(
- byteType,
- ),
- ),
- ),
- ),
- },
- // static array
- {
- input: "address[100]",
- testType: "static array",
- expected: makeStaticArrayType(
- addressType,
- uint16(100),
- ),
- },
- {
- input: "uint64[][200]",
- testType: "static array",
- expected: makeStaticArrayType(
- makeDynamicArrayType(
- Type{abiTypeID: Uint, bitSize: uint16(64)},
- ),
- uint16(200),
- ),
- },
- // tuple type
- {
- input: "()",
- testType: "tuple type",
- expected: Type{
- abiTypeID: Tuple,
- childTypes: []Type{},
- staticLength: 0,
- },
- },
- {
- input: "(uint32,(address,byte,bool[10],ufixed256x10[]),byte[])",
- testType: "tuple type",
- expected: Type{
- abiTypeID: Tuple,
- childTypes: []Type{
- {
- abiTypeID: Uint,
- bitSize: uint16(32),
- },
- {
- abiTypeID: Tuple,
- childTypes: []Type{
- addressType,
- byteType,
- makeStaticArrayType(boolType, uint16(10)),
- makeDynamicArrayType(
- Type{
- abiTypeID: Ufixed,
- bitSize: uint16(256),
- precision: uint16(10),
- },
- ),
- },
- staticLength: 4,
- },
- makeDynamicArrayType(byteType),
- },
- staticLength: 3,
- },
- },
- {
- input: "(uint32,(address,byte,bool[10],(ufixed256x10[])))",
- testType: "tuple type",
- expected: Type{
- abiTypeID: Tuple,
- childTypes: []Type{
- {
- abiTypeID: Uint,
- bitSize: uint16(32),
- },
- {
- abiTypeID: Tuple,
- childTypes: []Type{
- addressType,
- byteType,
- makeStaticArrayType(boolType, uint16(10)),
- {
- abiTypeID: Tuple,
- childTypes: []Type{
- makeDynamicArrayType(
- Type{
- abiTypeID: Ufixed,
- bitSize: uint16(256),
- precision: uint16(10),
- },
- ),
- },
- staticLength: 1,
- },
- },
- staticLength: 4,
- },
- },
- staticLength: 2,
- },
- },
- {
- input: "((uint32),(address,(byte,bool[10],ufixed256x10[])))",
- testType: "tuple type",
- expected: Type{
- abiTypeID: Tuple,
- childTypes: []Type{
- {
- abiTypeID: Tuple,
- childTypes: []Type{
- {
- abiTypeID: Uint,
- bitSize: uint16(32),
- },
- },
- staticLength: 1,
- },
- {
- abiTypeID: Tuple,
- childTypes: []Type{
- addressType,
- {
- abiTypeID: Tuple,
- childTypes: []Type{
- byteType,
- makeStaticArrayType(boolType, uint16(10)),
- makeDynamicArrayType(
- Type{
- abiTypeID: Ufixed,
- bitSize: uint16(256),
- precision: uint16(10),
- },
- ),
- },
- staticLength: 3,
- },
- },
- staticLength: 2,
- },
- },
- staticLength: 2,
- },
- },
- }
- for _, testcase := range testcases {
- t.Run(fmt.Sprintf("TypeOf test %s", testcase.testType), func(t *testing.T) {
- actual, err := TypeOf(testcase.input)
- require.NoError(t, err, "TypeOf %s parsing error", testcase.testType)
- require.Equal(t, testcase.expected, actual, "TestFromString %s: expected %s, actual %s",
- testcase.testType, testcase.expected.String(), actual.String())
- })
- }
-}
-
-func TestTypeFromStringInvalid(t *testing.T) {
- partitiontest.PartitionTest(t)
- for i := 0; i <= 1000; i++ {
- randSize := rand.Uint64()
- for randSize%8 == 0 && randSize <= 512 && randSize >= 8 {
- randSize = rand.Uint64()
- }
- errorInput := "uint" + strconv.FormatUint(randSize, 10)
- _, err := TypeOf(errorInput)
- require.Error(t, err, "makeUintType: should throw error on bitSize input %d", randSize)
- }
- for i := 0; i <= 10000; i++ {
- randSize := rand.Uint64()
- for randSize%8 == 0 && randSize <= 512 && randSize >= 8 {
- randSize = rand.Uint64()
- }
- randPrecision := rand.Uint64()
- for randPrecision >= 1 && randPrecision <= 160 {
- randPrecision = rand.Uint64()
- }
- errorInput := "ufixed" + strconv.FormatUint(randSize, 10) + "x" + strconv.FormatUint(randPrecision, 10)
- _, err := TypeOf(errorInput)
- require.Error(t, err, "makeUintType: should throw error on bitSize input %d", randSize)
- }
- var testcases = []string{
- // uint
- "uint123x345",
- "uint 128",
- "uint8 ",
- "uint!8",
- "uint[32]",
- "uint-893",
- "uint#120\\",
- // ufixed
- "ufixed000000000016x0000010",
- "ufixed123x345",
- "ufixed 128 x 100",
- "ufixed64x10 ",
- "ufixed!8x2 ",
- "ufixed[32]x16",
- "ufixed-64x+100",
- "ufixed16x+12",
- // dynamic array
- "uint256 []",
- "byte[] ",
- "[][][]",
- "stuff[]",
- // static array
- "ufixed32x10[0]",
- "byte[10 ]",
- "uint64[0x21]",
- // tuple
- "(ufixed128x10))",
- "(,uint128,byte[])",
- "(address,ufixed64x5,)",
- "(byte[16],somethingwrong)",
- "( )",
- "((uint32)",
- "(byte,,byte)",
- "((byte),,(byte))",
- }
- for _, testcase := range testcases {
- t.Run(fmt.Sprintf("TypeOf dynamic array test %s", testcase), func(t *testing.T) {
- _, err := TypeOf(testcase)
- require.Error(t, err, "%s should throw error", testcase)
- })
- }
-}
-
-func generateTupleType(baseTypes []Type, tupleTypes []Type) Type {
- if len(baseTypes) == 0 && len(tupleTypes) == 0 {
- panic("should not pass all nil arrays into generateTupleType")
- }
- tupleLen := 0
- for tupleLen == 0 {
- tupleLen = rand.Intn(20)
- }
- resultTypes := make([]Type, tupleLen)
- for i := 0; i < tupleLen; i++ {
- baseOrTuple := rand.Intn(5)
- if baseOrTuple == 1 && len(tupleTypes) > 0 {
- resultTypes[i] = tupleTypes[rand.Intn(len(tupleTypes))]
- } else {
- resultTypes[i] = baseTypes[rand.Intn(len(baseTypes))]
- }
- }
- return Type{abiTypeID: Tuple, childTypes: resultTypes, staticLength: uint16(tupleLen)}
-}
-
-func TestTypeMISC(t *testing.T) {
- partitiontest.PartitionTest(t)
- rand.Seed(time.Now().Unix())
-
- var testpool = []Type{
- boolType,
- addressType,
- stringType,
- byteType,
- }
- for i := 8; i <= 512; i += 8 {
- uintT, err := makeUintType(i)
- require.NoError(t, err, "make uint type error")
- testpool = append(testpool, uintT)
- }
- for i := 8; i <= 512; i += 8 {
- for j := 1; j <= 160; j++ {
- ufixedT, err := makeUfixedType(i, j)
- require.NoError(t, err, "make ufixed type error: bitSize %d, precision %d", i, j)
- testpool = append(testpool, ufixedT)
- }
- }
- for _, testcase := range testpool {
- testpool = append(testpool, makeDynamicArrayType(testcase))
- testpool = append(testpool, makeStaticArrayType(testcase, 10))
- testpool = append(testpool, makeStaticArrayType(testcase, 20))
- }
-
- for _, testcase := range testpool {
- require.True(t, testcase.Equal(testcase), "test type self equal error")
- }
- baseTestCount := 0
- for baseTestCount < 1000 {
- index0 := rand.Intn(len(testpool))
- index1 := rand.Intn(len(testpool))
- if index0 == index1 {
- continue
- }
- require.False(t, testpool[index0].Equal(testpool[index1]),
- "test type not equal error\n%s\n%s",
- testpool[index0].String(), testpool[index1].String())
- baseTestCount++
- }
-
- testpoolTuple := make([]Type, 0)
- for i := 0; i < 100; i++ {
- testpoolTuple = append(testpoolTuple, generateTupleType(testpool, testpoolTuple))
- }
- for _, testcaseTuple := range testpoolTuple {
- require.True(t, testcaseTuple.Equal(testcaseTuple), "test type tuple equal error")
- }
-
- tupleTestCount := 0
- for tupleTestCount < 100 {
- index0 := rand.Intn(len(testpoolTuple))
- index1 := rand.Intn(len(testpoolTuple))
- if testpoolTuple[index0].String() == testpoolTuple[index1].String() {
- continue
- }
- require.False(t, testpoolTuple[index0].Equal(testpoolTuple[index1]),
- "test type tuple not equal error\n%s\n%s",
- testpoolTuple[index0].String(), testpoolTuple[index1].String())
- tupleTestCount++
- }
-
- testpool = append(testpool, testpoolTuple...)
- isDynamicCount := 0
- for isDynamicCount < 100 {
- index := rand.Intn(len(testpool))
- isDynamicArr := strings.Contains(testpool[index].String(), "[]")
- isDynamicStr := strings.Contains(testpool[index].String(), "string")
- require.Equal(t, isDynamicArr || isDynamicStr, testpool[index].IsDynamic(),
- "test type isDynamic error\n%s", testpool[index].String())
- isDynamicCount++
- }
-
- addressByteLen, err := addressType.ByteLen()
- require.NoError(t, err, "address type bytelen should not return error")
- require.Equal(t, 32, addressByteLen, "address type bytelen should be 32")
- byteByteLen, err := byteType.ByteLen()
- require.NoError(t, err, "byte type bytelen should not return error")
- require.Equal(t, 1, byteByteLen, "byte type bytelen should be 1")
- boolByteLen, err := boolType.ByteLen()
- require.NoError(t, err, "bool type bytelen should be 1")
- require.Equal(t, 1, boolByteLen, "bool type bytelen should be 1")
-
- byteLenTestCount := 0
- for byteLenTestCount < 100 {
- index := rand.Intn(len(testpool))
- testType := testpool[index]
- byteLen, err := testType.ByteLen()
- if testType.IsDynamic() {
- require.Error(t, err, "byteLen test error on %s dynamic type, should have error",
- testType.String())
- } else {
- require.NoError(t, err, "byteLen test error on %s dynamic type, should not have error")
- if testType.abiTypeID == Tuple {
- sizeSum := 0
- for i := 0; i < len(testType.childTypes); i++ {
- if testType.childTypes[i].abiTypeID == Bool {
- // search previous bool
- before := findBoolLR(testType.childTypes, i, -1)
- // search after bool
- after := findBoolLR(testType.childTypes, i, 1)
- // append to heads and tails
- require.True(t, before%8 == 0, "expected tuple bool compact by 8")
- if after > 7 {
- after = 7
- }
- i += after
- sizeSum++
- } else {
- childByteSize, err := testType.childTypes[i].ByteLen()
- require.NoError(t, err, "byteLen not expected to fail on tuple child type")
- sizeSum += childByteSize
- }
- }
-
- require.Equal(t, sizeSum, byteLen,
- "%s do not match calculated byte length %d", testType.String(), sizeSum)
- } else if testType.abiTypeID == ArrayStatic {
- if testType.childTypes[0].abiTypeID == Bool {
- expected := testType.staticLength / 8
- if testType.staticLength%8 != 0 {
- expected++
- }
- actual, err := testType.ByteLen()
- require.NoError(t, err, "%s should not return error on byteLen test")
- require.Equal(t, int(expected), actual, "%s do not match calculated byte length %d",
- testType.String(), expected)
- } else {
- childSize, err := testType.childTypes[0].ByteLen()
- require.NoError(t, err, "%s should not return error on byteLen test", testType.childTypes[0].String())
- expected := childSize * int(testType.staticLength)
- require.Equal(t, expected, byteLen,
- "%s do not match calculated byte length %d", testType.String(), expected)
- }
- }
- }
- byteLenTestCount++
- }
-}
diff --git a/data/account/participationRegistry.go b/data/account/participationRegistry.go
index e36f224516..e1c82a8924 100644
--- a/data/account/participationRegistry.go
+++ b/data/account/participationRegistry.go
@@ -220,6 +220,9 @@ var ErrNoKeyForID = errors.New("no valid key found for the participationID")
// ErrSecretNotFound is used when attempting to lookup secrets for a particular round.
var ErrSecretNotFound = errors.New("the participation ID did not have secrets for the requested round")
+// ErrStateProofVerifierNotFound states that no state proof field was found.
+var ErrStateProofVerifierNotFound = errors.New("record contains no StateProofVerifier")
+
// ParticipationRegistry contain all functions for interacting with the Participation Registry.
type ParticipationRegistry interface {
// Insert adds a record to storage and computes the ParticipationID
@@ -767,6 +770,10 @@ func (db *participationDB) GetStateProofSecretsForRound(id ParticipationID, roun
if err != nil {
return StateProofSecretsForRound{}, err
}
+ if partRecord.StateProof == nil {
+ return StateProofSecretsForRound{},
+ fmt.Errorf("%w: for participation ID %v", ErrStateProofVerifierNotFound, id)
+ }
var result StateProofSecretsForRound
result.ParticipationRecord = partRecord.ParticipationRecord
diff --git a/data/account/participationRegistry_test.go b/data/account/participationRegistry_test.go
index 8e02ff4e43..286edf117b 100644
--- a/data/account/participationRegistry_test.go
+++ b/data/account/participationRegistry_test.go
@@ -27,10 +27,9 @@ import (
"os"
"path/filepath"
"strconv"
- "sync/atomic"
-
"strings"
"sync"
+ "sync/atomic"
"testing"
"time"
@@ -958,6 +957,36 @@ func TestAddStateProofKeys(t *testing.T) {
}
}
+func TestGetRoundSecretsWithNilStateProofVerifier(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := assert.New(t)
+ registry, dbfile := getRegistry(t)
+ defer registryCloseTest(t, registry, dbfile)
+
+ access, err := db.MakeAccessor("stateprooftest", false, true)
+ if err != nil {
+ panic(err)
+ }
+ root, err := GenerateRoot(access)
+ p, err := FillDBWithParticipationKeys(access, root.Address(), 0, basics.Round(stateProofIntervalForTests*2), 3)
+ access.Close()
+ a.NoError(err)
+
+ // Install a key for testing
+ id, err := registry.Insert(p.Participation)
+ a.NoError(err)
+
+ // ensuring that GetStateProof will receive from cache a participationRecord without StateProof field.
+ prt := registry.cache[id]
+ prt.StateProof = nil
+ registry.cache[id] = prt
+
+ a.NoError(registry.Flush(defaultTimeout))
+
+ _, err = registry.GetStateProofSecretsForRound(id, basics.Round(stateProofIntervalForTests)-1)
+ a.ErrorIs(err, ErrStateProofVerifierNotFound)
+}
+
func TestSecretNotFound(t *testing.T) {
partitiontest.PartitionTest(t)
a := require.New(t)
diff --git a/data/account/registeryDbOps.go b/data/account/registeryDbOps.go
index 6fa69bb153..282008eb76 100644
--- a/data/account/registeryDbOps.go
+++ b/data/account/registeryDbOps.go
@@ -21,9 +21,10 @@ import (
"database/sql"
"errors"
"fmt"
+ "strings"
+
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/protocol"
- "strings"
)
type dbOp interface {
@@ -168,11 +169,7 @@ func (i *insertOp) apply(db *participationDB) (err error) {
// Create Rolling entry
result, err = tx.Exec(insertRollingQuery, pk, rawVoting)
- if err = verifyExecWithOneRowEffected(err, result, "insert rolling"); err != nil {
- return err
- }
-
- return nil
+ return verifyExecWithOneRowEffected(err, result, "insert rolling")
})
return err
}
diff --git a/data/accountManager.go b/data/accountManager.go
index d44091f806..aa5064e093 100644
--- a/data/accountManager.go
+++ b/data/accountManager.go
@@ -79,10 +79,10 @@ func (manager *AccountManager) Keys(rnd basics.Round) (out []account.Participati
// StateProofKeys returns a list of Participation accounts, and their stateproof secrets
func (manager *AccountManager) StateProofKeys(rnd basics.Round) (out []account.StateProofSecretsForRound) {
for _, part := range manager.registry.GetAll() {
- if part.OverlapsInterval(rnd, rnd) {
+ if part.StateProof != nil && part.OverlapsInterval(rnd, rnd) {
partRndSecrets, err := manager.registry.GetStateProofSecretsForRound(part.ParticipationID, rnd)
if err != nil {
- manager.log.Errorf("error while loading round secrets from participation registry: %w", err)
+ manager.log.Errorf("error while loading round secrets from participation registry: %v", err)
continue
}
out = append(out, partRndSecrets)
diff --git a/data/accountManager_test.go b/data/accountManager_test.go
index 9d464cba17..1fcfe56bf7 100644
--- a/data/accountManager_test.go
+++ b/data/accountManager_test.go
@@ -17,6 +17,7 @@
package data
import (
+ "bytes"
"fmt"
"os"
"path/filepath"
@@ -248,3 +249,46 @@ func TestAccountManagerOverlappingStateProofKeys(t *testing.T) {
res = acctManager.StateProofKeys(basics.Round(merklesignature.KeyLifetimeDefault * 3))
a.Equal(1, len(res))
}
+
+func TestGetStateProofKeysDontLogErrorOnNilStateProof(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ a := assert.New(t)
+
+ registry, dbName := getRegistryImpl(t, false, true)
+ defer registryCloseTest(t, registry, dbName)
+
+ log := logging.TestingLog(t)
+ log.SetLevel(logging.Error)
+ logbuffer := bytes.NewBuffer(nil)
+ log.SetOutput(logbuffer)
+
+ acctManager := MakeAccountManager(log, registry)
+ databaseFiles := make([]string, 0)
+ defer func() {
+ for _, fileName := range databaseFiles {
+ os.Remove(fileName)
+ os.Remove(fileName + "-shm")
+ os.Remove(fileName + "-wal")
+ os.Remove(fileName + "-journal")
+ }
+ }()
+
+ // Generate 2 participations under the same account
+ store, err := db.MakeAccessor("stateprooftest", false, true)
+ a.NoError(err)
+ root, err := account.GenerateRoot(store)
+ a.NoError(err)
+ part1, err := account.FillDBWithParticipationKeys(store, root.Address(), 0, basics.Round(merklesignature.KeyLifetimeDefault*2), 3)
+ a.NoError(err)
+ store.Close()
+
+ part1.StateProofSecrets = nil
+ _, err = registry.Insert(part1.Participation)
+ a.NoError(err)
+
+ logbuffer.Reset()
+ acctManager.StateProofKeys(1)
+ lg := logbuffer.String()
+ a.False(strings.Contains(lg, account.ErrStateProofVerifierNotFound.Error()))
+ a.False(strings.Contains(lg, "level=error"), "expected no error in log:", lg)
+}
diff --git a/data/basics/address.go b/data/basics/address.go
index 5eed1c5121..412b7bf75b 100644
--- a/data/basics/address.go
+++ b/data/basics/address.go
@@ -24,23 +24,6 @@ import (
"github.com/algorand/go-algorand/crypto"
)
-// NOTE: Another (partial) implementation of `basics.Address` is in `data/abi`.
-// The reason of not using this `Address` in `data/abi` is that:
-// - `data/basics` has C dependencies (`go-algorand/crypto`)
-// - `go-algorand-sdk` has dependency to `go-algorand` for `ABI`
-// - if `go-algorand`'s ABI uses `basics.Address`, then it would be
-// impossible to up the version of `go-algorand` in `go-algorand-sdk`
-
-// This is discussed in:
-// - ISSUE https://github.com/algorand/go-algorand/issues/3355
-// - PR https://github.com/algorand/go-algorand/pull/3375
-
-// There are two solutions:
-// - One is to refactoring `crypto.Digest`, `crypto.Hash` and `basics.Address`
-// into packages that does not need `libsodium` crypto dependency
-// - The other is wrapping `libsodium` in a driver interface to make crypto
-// package importable (even if `libsodium` does not exist)
-
type (
// Address is a unique identifier corresponding to ownership of money
Address crypto.Digest
diff --git a/data/bookkeeping/genesis.go b/data/bookkeeping/genesis.go
index 114bb37f8c..7f01519e84 100644
--- a/data/bookkeeping/genesis.go
+++ b/data/bookkeeping/genesis.go
@@ -18,7 +18,7 @@ package bookkeeping
import (
"fmt"
- "io/ioutil"
+ "os"
"time"
"github.com/algorand/go-algorand/config"
@@ -86,7 +86,7 @@ type Genesis struct {
// LoadGenesisFromFile attempts to load a Genesis structure from a (presumably) genesis.json file.
func LoadGenesisFromFile(genesisFile string) (genesis Genesis, err error) {
// Load genesis.json
- genesisText, err := ioutil.ReadFile(genesisFile)
+ genesisText, err := os.ReadFile(genesisFile)
if err != nil {
return
}
diff --git a/data/bookkeeping/txn_merkle_test.go b/data/bookkeeping/txn_merkle_test.go
index 30a34ab7f2..4ead543dae 100644
--- a/data/bookkeeping/txn_merkle_test.go
+++ b/data/bookkeeping/txn_merkle_test.go
@@ -162,6 +162,7 @@ func BenchmarkTxnRoots(b *testing.B) {
crypto.RandBytes(txn.PaymentTxnFields.Receiver[:])
sigtxn := transactions.SignedTxn{Txn: txn}
+ crypto.RandBytes(sigtxn.Sig[:])
ad := transactions.ApplyData{}
stib, err := blk.BlockHeader.EncodeSignedTxn(sigtxn, ad)
@@ -173,7 +174,7 @@ func BenchmarkTxnRoots(b *testing.B) {
break
}
}
-
+ b.Logf("Made block with %d transactions and %d txn bytes", len(blk.Payset), len(protocol.Encode(blk.Payset)))
var r crypto.Digest
b.Run("FlatCommit", func(b *testing.B) {
@@ -192,6 +193,14 @@ func BenchmarkTxnRoots(b *testing.B) {
}
})
+ b.Run("SHA256MerkleCommit", func(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ var err error
+ r, err = blk.paysetCommitSHA256()
+ require.NoError(b, err)
+ }
+ })
+
_ = r
}
diff --git a/data/ledger.go b/data/ledger.go
index 8fc03cb6eb..101da721af 100644
--- a/data/ledger.go
+++ b/data/ledger.go
@@ -184,7 +184,7 @@ func (l *Ledger) Circulation(r basics.Round) (basics.MicroAlgos, error) {
}
}
- totals, err := l.OnlineTotals(r) //nolint:typecheck
+ totals, err := l.OnlineTotals(r)
if err != nil {
return basics.MicroAlgos{}, err
}
diff --git a/data/pools/transactionPool.go b/data/pools/transactionPool.go
index d5efc6c894..d5df868119 100644
--- a/data/pools/transactionPool.go
+++ b/data/pools/transactionPool.go
@@ -859,15 +859,13 @@ func (pool *TransactionPool) AssembleBlock(round basics.Round, deadline time.Tim
}
}
stats.TotalLength += uint64(encodedLen)
- stats.StateProofNextRound = uint64(assembled.Block().StateProofTracking[protocol.StateProofBasic].StateProofNextRound)
if txib.Txn.Type == protocol.StateProofTx {
stats.StateProofStats = pool.getStateProofStats(&txib, encodedLen)
}
}
-
stats.AverageFee = totalFees / uint64(stats.IncludedCount)
}
-
+ stats.StateProofNextRound = uint64(assembled.Block().StateProofTracking[protocol.StateProofBasic].StateProofNextRound)
var details struct {
Round uint64
}
diff --git a/data/pools/transactionPool_test.go b/data/pools/transactionPool_test.go
index 7dcc4c6ba7..acbc5a9bc1 100644
--- a/data/pools/transactionPool_test.go
+++ b/data/pools/transactionPool_test.go
@@ -21,6 +21,9 @@ import (
"bytes"
"fmt"
"math/rand"
+ "os"
+ "runtime"
+ "runtime/pprof"
"strings"
"testing"
"time"
@@ -1099,6 +1102,110 @@ func BenchmarkTransactionPoolPending(b *testing.B) {
}
}
+// BenchmarkTransactionPoolRecompute attempts to build a transaction pool of 3x block size
+// and then calls recomputeBlockEvaluator, to update the pool given the just-committed txns.
+// For b.N is does this process repeatedly given the size of N.
+func BenchmarkTransactionPoolRecompute(b *testing.B) {
+ b.Log("Running with b.N", b.N)
+ poolSize := 100000
+ numOfAccounts := 100
+ numTransactions := 75000
+ blockTxnCount := 25000
+
+ myVersion := protocol.ConsensusVersion("test-large-blocks")
+ myProto := config.Consensus[protocol.ConsensusCurrentVersion]
+ if myProto.MaxTxnBytesPerBlock != 5*1024*1024 {
+ b.FailNow() // intended to use with 5MB blocks
+ }
+ config.Consensus[myVersion] = myProto
+
+ // Generate accounts
+ secrets := make([]*crypto.SignatureSecrets, numOfAccounts)
+ addresses := make([]basics.Address, numOfAccounts)
+
+ for i := 0; i < numOfAccounts; i++ {
+ secret := keypair()
+ addr := basics.Address(secret.SignatureVerifier)
+ secrets[i] = secret
+ addresses[i] = addr
+ }
+
+ l := mockLedger(b, initAccFixed(addresses, 1<<50), myVersion)
+ cfg := config.GetDefaultLocal()
+ cfg.TxPoolSize = poolSize
+ cfg.EnableProcessBlockStats = false
+
+ setupPool := func() (*TransactionPool, map[transactions.Txid]ledgercore.IncludedTransactions, uint) {
+ transactionPool := MakeTransactionPool(l, cfg, logging.Base())
+
+ // make some transactions
+ var signedTransactions []transactions.SignedTxn
+ for i := 0; i < numTransactions; i++ {
+ tx := transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: transactions.Header{
+ Sender: addresses[i%numOfAccounts],
+ Fee: basics.MicroAlgos{Raw: 20000 + proto.MinTxnFee},
+ FirstValid: 0,
+ LastValid: basics.Round(proto.MaxTxnLife),
+ GenesisHash: l.GenesisHash(),
+ },
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: addresses[rand.Intn(numOfAccounts)],
+ Amount: basics.MicroAlgos{Raw: proto.MinBalance + uint64(rand.Intn(1<<32))},
+ },
+ }
+
+ signedTx := tx.Sign(secrets[i%numOfAccounts])
+ signedTransactions = append(signedTransactions, signedTx)
+ require.NoError(b, transactionPool.RememberOne(signedTx))
+ }
+
+ // make args for recomputeBlockEvaluator() like OnNewBlock() would
+ var knownCommitted uint
+ committedTxIds := make(map[transactions.Txid]ledgercore.IncludedTransactions)
+ for i := 0; i < blockTxnCount; i++ {
+ knownCommitted++
+ // OK to use empty IncludedTransactions: recomputeBlockEvaluator is only checking map membership
+ committedTxIds[signedTransactions[i].ID()] = ledgercore.IncludedTransactions{}
+ }
+ b.Logf("Made transactionPool with %d signedTransactions, %d committedTxIds, %d knownCommitted",
+ len(signedTransactions), len(committedTxIds), knownCommitted)
+ b.Logf("transactionPool pendingTxGroups %d rememberedTxGroups %d",
+ len(transactionPool.pendingTxGroups), len(transactionPool.rememberedTxGroups))
+ return transactionPool, committedTxIds, knownCommitted
+ }
+
+ transactionPool := make([]*TransactionPool, b.N)
+ committedTxIds := make([]map[transactions.Txid]ledgercore.IncludedTransactions, b.N)
+ knownCommitted := make([]uint, b.N)
+ for i := 0; i < b.N; i++ {
+ transactionPool[i], committedTxIds[i], knownCommitted[i] = setupPool()
+ }
+ time.Sleep(time.Second)
+ runtime.GC()
+ // CPU profiler if CPUPROFILE set
+ var profF *os.File
+ if os.Getenv("CPUPROFILE") != "" {
+ var err error
+ profF, err = os.Create(fmt.Sprintf("recomputePool-%d-%d.prof", b.N, crypto.RandUint64()))
+ require.NoError(b, err)
+ }
+
+ // call recomputeBlockEvaluator
+ if profF != nil {
+ pprof.StartCPUProfile(profF)
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ transactionPool[i].recomputeBlockEvaluator(committedTxIds[i], knownCommitted[i])
+ }
+ b.StopTimer()
+ if profF != nil {
+ pprof.StopCPUProfile()
+ }
+}
+
func BenchmarkTransactionPoolSteadyState(b *testing.B) {
poolSize := 100000
@@ -1432,11 +1539,17 @@ func TestStateProofLogging(t *testing.T) {
lines = append(lines, scanner.Text())
}
fmt.Println(lines[len(lines)-1])
+ // Verify that the StateProofNextRound is added when there are no transactions
+ var int1, nextRound uint64
+ var str1 string
+ partsNext := strings.Split(lines[len(lines)-10], "TransactionsLoopStartTime:")
+ fmt.Sscanf(partsNext[1], "%d, StateProofNextRound:%d, %s", &int1, &nextRound, &str1)
+ require.Equal(t, int(512), int(nextRound))
+
parts := strings.Split(lines[len(lines)-1], "StateProofNextRound:")
// Verify the Metrics is correct
- var nextRound, pWeight, signedWeight, numReveals, posToReveal, txnSize uint64
- var str1 string
+ var pWeight, signedWeight, numReveals, posToReveal, txnSize uint64
fmt.Sscanf(parts[1], "%d, ProvenWeight:%d, SignedWeight:%d, NumReveals:%d, NumPosToReveal:%d, TxnSize:%d\"%s",
&nextRound, &pWeight, &signedWeight, &numReveals, &posToReveal, &txnSize, &str1)
require.Equal(t, uint64(768), nextRound)
diff --git a/data/transactions/logic/README.md b/data/transactions/logic/README.md
index 0494971b3b..2fda903c98 100644
--- a/data/transactions/logic/README.md
+++ b/data/transactions/logic/README.md
@@ -594,6 +594,7 @@ Account fields used in the `acct_params_get` opcode.
| `assert` | immediately fail unless A is a non-zero number |
| `callsub target` | branch unconditionally to TARGET, saving the next instruction on the call stack |
| `retsub` | pop the top instruction from the call stack and branch to it |
+| `switch target ...` | branch to the Ath label. Continue at following instruction if index A exceeds the number of labels. |
### State Access
@@ -615,7 +616,7 @@ Account fields used in the `acct_params_get` opcode.
| `app_params_get f` | X is field F from app A. Y is 1 if A exists, else 0 |
| `acct_params_get f` | X is field F from account A. Y is 1 if A owns positive algos, else 0 |
| `log` | write A to log state of the current application |
-| `block f` | field F of block A. Fail unless A falls between txn.LastValid-1002 and the current round (exclusive) |
+| `block f` | field F of block A. Fail unless A falls between txn.LastValid-1002 and txn.FirstValid (exclusive) |
### Inner Transactions
diff --git a/data/transactions/logic/TEAL_opcodes.md b/data/transactions/logic/TEAL_opcodes.md
index 5fbd310d2a..05cb20b96d 100644
--- a/data/transactions/logic/TEAL_opcodes.md
+++ b/data/transactions/logic/TEAL_opcodes.md
@@ -1053,6 +1053,13 @@ The call stack is separate from the data stack. Only `callsub` and `retsub` mani
The call stack is separate from the data stack. Only `callsub` and `retsub` manipulate it.
+## switch target ...
+
+- Opcode: 0x8a {uint8 branch count} [{int16 branch offset, big-endian}, ...]
+- Stack: ..., A: uint64 → ...
+- branch to the Ath label. Continue at following instruction if index A exceeds the number of labels.
+- Availability: v8
+
## shl
- Opcode: 0x90
@@ -1401,7 +1408,7 @@ The notation A,B indicates that A and B are interpreted as a uint128 value, with
- Opcode: 0xd1 {uint8 block field}
- Stack: ..., A: uint64 → ..., any
-- field F of block A. Fail unless A falls between txn.LastValid-1002 and the current round (exclusive)
+- field F of block A. Fail unless A falls between txn.LastValid-1002 and txn.FirstValid (exclusive)
- Availability: v7
`block` Fields:
diff --git a/data/transactions/logic/assembler.go b/data/transactions/logic/assembler.go
index e175a17034..47d5da1a15 100644
--- a/data/transactions/logic/assembler.go
+++ b/data/transactions/logic/assembler.go
@@ -27,11 +27,12 @@ import (
"errors"
"fmt"
"io"
+ "math"
"sort"
"strconv"
"strings"
- "github.com/algorand/go-algorand/data/abi"
+ "github.com/algorand/avm-abi/abi"
"github.com/algorand/go-algorand/data/basics"
)
@@ -48,10 +49,13 @@ type Writer interface {
type labelReference struct {
sourceLine int
- // position of the opcode start that refers to the label
+ // position of the label reference
position int
label string
+
+ // ending positions of the opcode containing the label reference.
+ offsetPosition int
}
type constReference interface {
@@ -224,11 +228,13 @@ type OpStream struct {
intc []uint64 // observed ints in code. We'll put them into a intcblock
intcRefs []intReference // references to int pseudo-op constants, used for optimization
- hasIntcBlock bool // prevent prepending intcblock because asm has one
+ cntIntcBlock int // prevent prepending intcblock because asm has one
+ hasPseudoInt bool // were any `int` pseudo ops used?
bytec [][]byte // observed bytes in code. We'll put them into a bytecblock
bytecRefs []byteReference // references to byte/addr pseudo-op constants, used for optimization
- hasBytecBlock bool // prevent prepending bytecblock because asm has one
+ cntBytecBlock int // prevent prepending bytecblock because asm has one
+ hasPseudoByte bool // were any `byte` (or equivalent) pseudo ops used?
// tracks information we know to be true at the point being assembled
known ProgramKnowledge
@@ -344,8 +350,8 @@ func (ops *OpStream) recordSourceLine() {
}
// referToLabel records an opcode label reference to resolve later
-func (ops *OpStream) referToLabel(pc int, label string) {
- ops.labelReferences = append(ops.labelReferences, labelReference{ops.sourceLine, pc, label})
+func (ops *OpStream) referToLabel(pc int, label string, offsetPosition int) {
+ ops.labelReferences = append(ops.labelReferences, labelReference{ops.sourceLine, pc, label, offsetPosition})
}
type refineFunc func(pgm *ProgramKnowledge, immediates []string) (StackTypes, StackTypes)
@@ -373,18 +379,18 @@ func (ops *OpStream) returns(spec *OpSpec, replacement StackType) {
func (ops *OpStream) Intc(constIndex uint) {
switch constIndex {
case 0:
- ops.pending.WriteByte(0x22) // intc_0
+ ops.pending.WriteByte(OpsByName[ops.Version]["intc_0"].Opcode)
case 1:
- ops.pending.WriteByte(0x23) // intc_1
+ ops.pending.WriteByte(OpsByName[ops.Version]["intc_1"].Opcode)
case 2:
- ops.pending.WriteByte(0x24) // intc_2
+ ops.pending.WriteByte(OpsByName[ops.Version]["intc_2"].Opcode)
case 3:
- ops.pending.WriteByte(0x25) // intc_3
+ ops.pending.WriteByte(OpsByName[ops.Version]["intc_3"].Opcode)
default:
if constIndex > 0xff {
ops.error("cannot have more than 256 int constants")
}
- ops.pending.WriteByte(0x21) // intc
+ ops.pending.WriteByte(OpsByName[ops.Version]["intc"].Opcode)
ops.pending.WriteByte(uint8(constIndex))
}
if constIndex >= uint(len(ops.intc)) {
@@ -394,8 +400,10 @@ func (ops *OpStream) Intc(constIndex uint) {
}
}
-// Uint writes opcodes for loading a uint literal
-func (ops *OpStream) Uint(val uint64) {
+// IntLiteral writes opcodes for loading a uint literal
+func (ops *OpStream) IntLiteral(val uint64) {
+ ops.hasPseudoInt = true
+
found := false
var constIndex uint
for i, cv := range ops.intc {
@@ -405,7 +413,11 @@ func (ops *OpStream) Uint(val uint64) {
break
}
}
+
if !found {
+ if ops.cntIntcBlock > 0 {
+ ops.errorf("int %d used without %d in intcblock", val, val)
+ }
constIndex = uint(len(ops.intc))
ops.intc = append(ops.intc, val)
}
@@ -420,18 +432,18 @@ func (ops *OpStream) Uint(val uint64) {
func (ops *OpStream) Bytec(constIndex uint) {
switch constIndex {
case 0:
- ops.pending.WriteByte(0x28) // bytec_0
+ ops.pending.WriteByte(OpsByName[ops.Version]["bytec_0"].Opcode)
case 1:
- ops.pending.WriteByte(0x29) // bytec_1
+ ops.pending.WriteByte(OpsByName[ops.Version]["bytec_1"].Opcode)
case 2:
- ops.pending.WriteByte(0x2a) // bytec_2
+ ops.pending.WriteByte(OpsByName[ops.Version]["bytec_2"].Opcode)
case 3:
- ops.pending.WriteByte(0x2b) // bytec_3
+ ops.pending.WriteByte(OpsByName[ops.Version]["bytec_3"].Opcode)
default:
if constIndex > 0xff {
ops.error("cannot have more than 256 byte constants")
}
- ops.pending.WriteByte(0x27) // bytec
+ ops.pending.WriteByte(OpsByName[ops.Version]["bytec"].Opcode)
ops.pending.WriteByte(uint8(constIndex))
}
if constIndex >= uint(len(ops.bytec)) {
@@ -444,6 +456,8 @@ func (ops *OpStream) Bytec(constIndex uint) {
// ByteLiteral writes opcodes and data for loading a []byte literal
// Values are accumulated so that they can be put into a bytecblock
func (ops *OpStream) ByteLiteral(val []byte) {
+ ops.hasPseudoByte = true
+
found := false
var constIndex uint
for i, cv := range ops.bytec {
@@ -454,6 +468,9 @@ func (ops *OpStream) ByteLiteral(val []byte) {
}
}
if !found {
+ if ops.cntBytecBlock > 0 {
+ ops.errorf("byte/addr/method used without value in bytecblock")
+ }
constIndex = uint(len(ops.bytec))
ops.bytec = append(ops.bytec, val)
}
@@ -468,23 +485,46 @@ func asmInt(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) != 1 {
return ops.error("int needs one argument")
}
+
+ // After backBranchEnabledVersion, control flow is confusing, so if there's
+ // a manual cblock, use push instead of trying to use what's given.
+ if ops.cntIntcBlock > 0 && ops.Version >= backBranchEnabledVersion {
+ // We don't understand control-flow, so use pushint
+ ops.warnf("int %s used with explicit intcblock. must pushint", args[0])
+ pushint := OpsByName[ops.Version]["pushint"]
+ return asmPushInt(ops, &pushint, args)
+ }
+
+ // There are no backjumps, but there are multiple cblocks. Maybe one is
+ // conditional skipped. Too confusing.
+ if ops.cntIntcBlock > 1 {
+ pushint, ok := OpsByName[ops.Version]["pushint"]
+ if ok {
+ return asmPushInt(ops, &pushint, args)
+ }
+ return ops.errorf("int %s used with manual intcblocks. Use intc.", args[0])
+ }
+
+ // In both of the above clauses, we _could_ track whether a particular
+ // intcblock dominates the current instruction. If so, we could use it.
+
// check txn type constants
i, ok := txnTypeMap[args[0]]
if ok {
- ops.Uint(i)
+ ops.IntLiteral(i)
return nil
}
- // check OnCompetion constants
+ // check OnCompletion constants
oc, isOCStr := onCompletionMap[args[0]]
if isOCStr {
- ops.Uint(oc)
+ ops.IntLiteral(oc)
return nil
}
val, err := strconv.ParseUint(args[0], 0, 64)
if err != nil {
return ops.error(err)
}
- ops.Uint(val)
+ ops.IntLiteral(val)
return nil
}
@@ -545,7 +585,7 @@ func asmPushBytes(ops *OpStream, spec *OpSpec, args []string) error {
return nil
}
-func base32DecdodeAnyPadding(x string) (val []byte, err error) {
+func base32DecodeAnyPadding(x string) (val []byte, err error) {
val, err = base32.StdEncoding.WithPadding(base32.NoPadding).DecodeString(x)
if err != nil {
// try again with standard padding
@@ -567,7 +607,7 @@ func parseBinaryArgs(args []string) (val []byte, consumed int, err error) {
err = errors.New("byte base32 arg lacks close paren")
return
}
- val, err = base32DecdodeAnyPadding(arg[open+1 : close])
+ val, err = base32DecodeAnyPadding(arg[open+1 : close])
if err != nil {
return
}
@@ -595,7 +635,7 @@ func parseBinaryArgs(args []string) (val []byte, consumed int, err error) {
err = fmt.Errorf("need literal after 'byte %s'", arg)
return
}
- val, err = base32DecdodeAnyPadding(args[1])
+ val, err = base32DecodeAnyPadding(args[1])
if err != nil {
return
}
@@ -696,6 +736,29 @@ func asmByte(ops *OpStream, spec *OpSpec, args []string) error {
if len(args) == 0 {
return ops.errorf("%s operation needs byte literal argument", spec.Name)
}
+
+ // After backBranchEnabledVersion, control flow is confusing, so if there's
+ // a manual cblock, use push instead of trying to use what's given.
+ if ops.cntBytecBlock > 0 && ops.Version >= backBranchEnabledVersion {
+ // We don't understand control-flow, so use pushbytes
+ ops.warnf("byte %s used with explicit bytecblock. must pushbytes", args[0])
+ pushbytes := OpsByName[ops.Version]["pushbytes"]
+ return asmPushBytes(ops, &pushbytes, args)
+ }
+
+ // There are no backjumps, but there are multiple cblocks. Maybe one is
+ // conditional skipped. Too confusing.
+ if ops.cntBytecBlock > 1 {
+ pushbytes, ok := OpsByName[ops.Version]["pushbytes"]
+ if ok {
+ return asmPushBytes(ops, &pushbytes, args)
+ }
+ return ops.errorf("byte %s used with manual bytecblocks. Use bytec.", args[0])
+ }
+
+ // In both of the above clauses, we _could_ track whether a particular
+ // bytecblock dominates the current instruction. If so, we could use it.
+
val, consumed, err := parseBinaryArgs(args)
if err != nil {
return ops.error(err)
@@ -723,7 +786,7 @@ func asmMethod(ops *OpStream, spec *OpSpec, args []string) error {
if err != nil {
// Warn if an invalid signature is used. Don't return an error, since the ABI is not
// governed by the core protocol, so there may be changes to it that we don't know about
- ops.warnf("Invalid ARC-4 ABI method signature for method op: %s", err.Error()) // nolint:errcheck
+ ops.warnf("Invalid ARC-4 ABI method signature for method op: %s", err.Error())
}
hash := sha512.Sum512_256(methodSig)
ops.ByteLiteral(hash[0:4])
@@ -734,11 +797,10 @@ func asmMethod(ops *OpStream, spec *OpSpec, args []string) error {
func asmIntCBlock(ops *OpStream, spec *OpSpec, args []string) error {
ops.pending.WriteByte(spec.Opcode)
+ ivals := make([]uint64, len(args))
var scratch [binary.MaxVarintLen64]byte
l := binary.PutUvarint(scratch[:], uint64(len(args)))
ops.pending.Write(scratch[:l])
- ops.intcRefs = nil
- ops.intc = make([]uint64, len(args))
for i, xs := range args {
cu, err := strconv.ParseUint(xs, 0, 64)
if err != nil {
@@ -746,9 +808,21 @@ func asmIntCBlock(ops *OpStream, spec *OpSpec, args []string) error {
}
l = binary.PutUvarint(scratch[:], cu)
ops.pending.Write(scratch[:l])
- ops.intc[i] = cu
+ if !ops.known.deadcode {
+ ivals[i] = cu
+ }
}
- ops.hasIntcBlock = true
+ if !ops.known.deadcode {
+ // If we previously processed an `int`, we thought we could insert our
+ // own intcblock, but now we see a manual one.
+ if ops.hasPseudoInt {
+ ops.error("intcblock following int")
+ }
+ ops.intcRefs = nil
+ ops.intc = ivals
+ ops.cntIntcBlock++
+ }
+
return nil
}
@@ -763,8 +837,7 @@ func asmByteCBlock(ops *OpStream, spec *OpSpec, args []string) error {
// intcblock, but parseBinaryArgs would have
// to return a useful consumed value even in
// the face of errors. Hard.
- ops.error(err)
- return nil
+ return ops.error(err)
}
bvals = append(bvals, val)
rest = rest[consumed:]
@@ -777,9 +850,16 @@ func asmByteCBlock(ops *OpStream, spec *OpSpec, args []string) error {
ops.pending.Write(scratch[:l])
ops.pending.Write(bv)
}
- ops.bytecRefs = nil
- ops.bytec = bvals
- ops.hasBytecBlock = true
+ if !ops.known.deadcode {
+ // If we previously processed a pseudo `byte`, we thought we could
+ // insert our own bytecblock, but now we see a manual one.
+ if ops.hasPseudoByte {
+ ops.error("bytecblock following byte/addr/method")
+ }
+ ops.bytecRefs = nil
+ ops.bytec = bvals
+ ops.cntBytecBlock++
+ }
return nil
}
@@ -827,7 +907,7 @@ func asmBranch(ops *OpStream, spec *OpSpec, args []string) error {
return ops.error("branch operation needs label argument")
}
- ops.referToLabel(ops.pending.Len(), args[0])
+ ops.referToLabel(ops.pending.Len()+1, args[0], ops.pending.Len()+spec.Size)
ops.pending.WriteByte(spec.Opcode)
// zero bytes will get replaced with actual offset in resolveLabels()
ops.pending.WriteByte(0)
@@ -835,6 +915,23 @@ func asmBranch(ops *OpStream, spec *OpSpec, args []string) error {
return nil
}
+func asmSwitch(ops *OpStream, spec *OpSpec, args []string) error {
+ numOffsets := len(args)
+ if numOffsets > math.MaxUint8 {
+ return ops.errorf("%s cannot take more than 255 labels", spec.Name)
+ }
+ ops.pending.WriteByte(spec.Opcode)
+ ops.pending.WriteByte(byte(numOffsets))
+ opEndPos := ops.pending.Len() + 2*numOffsets
+ for _, arg := range args {
+ ops.referToLabel(ops.pending.Len(), arg, opEndPos)
+ // zero bytes will get replaced with actual offset in resolveLabels()
+ ops.pending.WriteByte(0)
+ ops.pending.WriteByte(0)
+ }
+ return nil
+}
+
func asmSubstring(ops *OpStream, spec *OpSpec, args []string) error {
err := asmDefault(ops, spec, args)
if err != nil {
@@ -1399,25 +1496,29 @@ func typecheck(expected, got StackType) bool {
return expected == got
}
-var spaces = [256]uint8{'\t': 1, ' ': 1}
+// newline not included since handled in scanner
+var tokenSeparators = [256]bool{'\t': true, ' ': true, ';': true}
-func fieldsFromLine(line string) []string {
- var fields []string
+func tokensFromLine(line string) []string {
+ var tokens []string
i := 0
- for i < len(line) && spaces[line[i]] != 0 {
+ for i < len(line) && tokenSeparators[line[i]] {
+ if line[i] == ';' {
+ tokens = append(tokens, ";")
+ }
i++
}
start := i
- inString := false
- inBase64 := false
+ inString := false // tracked to allow spaces and comments inside
+ inBase64 := false // tracked to allow '//' inside
for i < len(line) {
- if spaces[line[i]] == 0 { // if not space
+ if !tokenSeparators[line[i]] { // if not space
switch line[i] {
case '"': // is a string literal?
if !inString {
- if i == 0 || i > 0 && spaces[line[i-1]] != 0 {
+ if i == 0 || i > 0 && tokenSeparators[line[i-1]] {
inString = true
}
} else {
@@ -1428,9 +1529,9 @@ func fieldsFromLine(line string) []string {
case '/': // is a comment?
if i < len(line)-1 && line[i+1] == '/' && !inBase64 && !inString {
if start != i { // if a comment without whitespace
- fields = append(fields, line[start:i])
+ tokens = append(tokens, line[start:i])
}
- return fields
+ return tokens
}
case '(': // is base64( seq?
prefix := line[start:i]
@@ -1446,19 +1547,29 @@ func fieldsFromLine(line string) []string {
i++
continue
}
+
+ // we've hit a space, end last token unless inString
+
if !inString {
- field := line[start:i]
- fields = append(fields, field)
- if field == "base64" || field == "b64" {
- inBase64 = true
- } else if inBase64 {
+ token := line[start:i]
+ tokens = append(tokens, token)
+ if line[i] == ';' {
+ tokens = append(tokens, ";")
+ }
+ if inBase64 {
inBase64 = false
+ } else if token == "base64" || token == "b64" {
+ inBase64 = true
}
}
i++
+ // gobble up consecutive whitespace (but notice semis)
if !inString {
- for i < len(line) && spaces[line[i]] != 0 {
+ for i < len(line) && tokenSeparators[line[i]] {
+ if line[i] == ';' {
+ tokens = append(tokens, ";")
+ }
i++
}
start = i
@@ -1467,10 +1578,10 @@ func fieldsFromLine(line string) []string {
// add rest of the string if any
if start < len(line) {
- fields = append(fields, line[start:i])
+ tokens = append(tokens, line[start:i])
}
- return fields
+ return tokens
}
func (ops *OpStream) trace(format string, args ...interface{}) {
@@ -1531,6 +1642,16 @@ func (ops *OpStream) trackStack(args StackTypes, returns StackTypes, instruction
}
}
+// splitTokens breaks tokens into two slices at the first semicolon.
+func splitTokens(tokens []string) (current, rest []string) {
+ for i, token := range tokens {
+ if token == ";" {
+ return tokens[:i], tokens[i+1:]
+ }
+ }
+ return tokens, nil
+}
+
// assemble reads text from an input and accumulates the program
func (ops *OpStream) assemble(text string) error {
fin := strings.NewReader(text)
@@ -1541,74 +1662,82 @@ func (ops *OpStream) assemble(text string) error {
for scanner.Scan() {
ops.sourceLine++
line := scanner.Text()
- line = strings.TrimSpace(line)
- if len(line) == 0 {
- ops.trace("%3d: 0 line\n", ops.sourceLine)
- continue
- }
- if strings.HasPrefix(line, "//") {
- ops.trace("%3d: // line\n", ops.sourceLine)
- continue
- }
- if strings.HasPrefix(line, "#pragma") {
- ops.trace("%3d: #pragma line\n", ops.sourceLine)
- ops.pragma(line)
- continue
- }
- fields := fieldsFromLine(line)
- if len(fields) == 0 {
- ops.trace("%3d: no fields\n", ops.sourceLine)
- continue
- }
- // we're about to begin processing opcodes, so settle the Version
- if ops.Version == assemblerNoVersion {
- ops.Version = AssemblerDefaultVersion
- }
- if ops.versionedPseudoOps == nil {
- ops.versionedPseudoOps = prepareVersionedPseudoTable(ops.Version)
- }
- opstring := fields[0]
- if opstring[len(opstring)-1] == ':' {
- ops.createLabel(opstring[:len(opstring)-1])
- fields = fields[1:]
- if len(fields) == 0 {
- ops.trace("%3d: label only\n", ops.sourceLine)
+ tokens := tokensFromLine(line)
+ if len(tokens) > 0 {
+ if first := tokens[0]; first[0] == '#' {
+ directive := first[1:]
+ switch directive {
+ case "pragma":
+ ops.pragma(tokens) //nolint:errcheck // report bad pragma line error, but continue assembling
+ ops.trace("%3d: #pragma line\n", ops.sourceLine)
+ default:
+ ops.errorf("Unknown directive: %s", directive)
+ }
continue
}
- opstring = fields[0]
}
- spec, expandedName, ok := getSpec(ops, opstring, fields[1:])
- if ok {
- ops.trace("%3d: %s\t", ops.sourceLine, opstring)
- ops.recordSourceLine()
- if spec.Modes == modeApp {
- ops.HasStatefulOps = true
+ for current, next := splitTokens(tokens); len(current) > 0 || len(next) > 0; current, next = splitTokens(next) {
+ if len(current) == 0 {
+ continue
}
- args, returns := spec.Arg.Types, spec.Return.Types
- if spec.refine != nil {
- nargs, nreturns := spec.refine(&ops.known, fields[1:])
- if nargs != nil {
- args = nargs
- }
- if nreturns != nil {
- returns = nreturns
- }
+ // we're about to begin processing opcodes, so settle the Version
+ if ops.Version == assemblerNoVersion {
+ ops.Version = AssemblerDefaultVersion
}
- ops.trackStack(args, returns, append([]string{expandedName}, fields[1:]...))
- spec.asm(ops, &spec, fields[1:])
- if spec.deadens() { // An unconditional branch deadens the following code
- ops.known.deaden()
+ if ops.versionedPseudoOps == nil {
+ ops.versionedPseudoOps = prepareVersionedPseudoTable(ops.Version)
}
- if spec.Name == "callsub" {
- // since retsub comes back to the callsub, it is an entry point like a label
- ops.known.label()
+ opstring := current[0]
+ if opstring[len(opstring)-1] == ':' {
+ ops.createLabel(opstring[:len(opstring)-1])
+ current = current[1:]
+ if len(current) == 0 {
+ ops.trace("%3d: label only\n", ops.sourceLine)
+ continue
+ }
+ opstring = current[0]
+ }
+ spec, expandedName, ok := getSpec(ops, opstring, current[1:])
+ if ok {
+ ops.trace("%3d: %s\t", ops.sourceLine, opstring)
+ ops.recordSourceLine()
+ if spec.Modes == modeApp {
+ ops.HasStatefulOps = true
+ }
+ args, returns := spec.Arg.Types, spec.Return.Types
+ if spec.refine != nil {
+ nargs, nreturns := spec.refine(&ops.known, current[1:])
+ if nargs != nil {
+ args = nargs
+ }
+ if nreturns != nil {
+ returns = nreturns
+ }
+ }
+ ops.trackStack(args, returns, append([]string{expandedName}, current[1:]...))
+ spec.asm(ops, &spec, current[1:]) //nolint:errcheck // ignore error and continue, to collect more errors
+
+ if spec.deadens() { // An unconditional branch deadens the following code
+ ops.known.deaden()
+ }
+ if spec.Name == "callsub" {
+ // since retsub comes back to the callsub, it is an entry point like a label
+ ops.known.label()
+ }
}
ops.trace("\n")
continue
}
}
- // backward compatibility: do not allow jumps behind last instruction in v1
+ if err := scanner.Err(); err != nil {
+ if errors.Is(err, bufio.ErrTooLong) {
+ err = errors.New("line too long")
+ }
+ ops.error(err)
+ }
+
+ // backward compatibility: do not allow jumps past last instruction in v1
if ops.Version <= 1 {
for label, dest := range ops.labels {
if dest == ops.pending.Len() {
@@ -1635,21 +1764,20 @@ func (ops *OpStream) assemble(text string) error {
return nil
}
-func (ops *OpStream) pragma(line string) error {
- fields := strings.Split(line, " ")
- if fields[0] != "#pragma" {
- return ops.errorf("invalid syntax: %s", fields[0])
+func (ops *OpStream) pragma(tokens []string) error {
+ if tokens[0] != "#pragma" {
+ return ops.errorf("invalid syntax: %s", tokens[0])
}
- if len(fields) < 2 {
+ if len(tokens) < 2 {
return ops.error("empty pragma")
}
- key := fields[1]
+ key := tokens[1]
switch key {
case "version":
- if len(fields) < 3 {
+ if len(tokens) < 3 {
return ops.error("no version value")
}
- value := fields[2]
+ value := tokens[2]
var ver uint64
if ops.pending.Len() > 0 {
return ops.error("#pragma version is only allowed before instructions")
@@ -1674,10 +1802,10 @@ func (ops *OpStream) pragma(line string) error {
}
return nil
case "typetrack":
- if len(fields) < 3 {
+ if len(tokens) < 3 {
return ops.error("no typetrack value")
}
- value := fields[2]
+ value := tokens[2]
on, err := strconv.ParseBool(value)
if err != nil {
return ops.errorf("bad #pragma typetrack: %#v", value)
@@ -1708,19 +1836,20 @@ func (ops *OpStream) resolveLabels() {
reported[lr.label] = true
continue
}
- // all branch instructions (currently) are opcode byte and 2 offset bytes, and the destination is relative to the next pc as if the branch was a no-op
- naturalPc := lr.position + 3
- if ops.Version < backBranchEnabledVersion && dest < naturalPc {
+
+ // All branch targets are encoded as 2 offset bytes. The destination is relative to the end of the
+ // instruction they appear in, which is available in lr.offsetPostion
+ if ops.Version < backBranchEnabledVersion && dest < lr.offsetPosition {
ops.errorf("label %#v is a back reference, back jump support was introduced in v4", lr.label)
continue
}
- jump := dest - naturalPc
+ jump := dest - lr.offsetPosition
if jump > 0x7fff {
ops.errorf("label %#v is too far away", lr.label)
continue
}
- raw[lr.position+1] = uint8(jump >> 8)
- raw[lr.position+2] = uint8(jump & 0x0ff)
+ raw[lr.position] = uint8(jump >> 8)
+ raw[lr.position+1] = uint8(jump & 0x0ff)
}
ops.pending = *bytes.NewBuffer(raw)
ops.sourceLine = saved
@@ -1769,7 +1898,7 @@ func replaceBytes(s []byte, index, originalLen int, newBytes []byte) []byte {
// This function only optimizes constants introduces by the int pseudo-op, not
// preexisting intcblocks in the code.
func (ops *OpStream) optimizeIntcBlock() error {
- if ops.hasIntcBlock {
+ if ops.cntIntcBlock > 0 {
// don't optimize an existing intcblock, only int pseudo-ops
return nil
}
@@ -1812,7 +1941,7 @@ func (ops *OpStream) optimizeIntcBlock() error {
// This function only optimizes constants introduces by the byte or addr
// pseudo-ops, not preexisting bytecblocks in the code.
func (ops *OpStream) optimizeBytecBlock() error {
- if ops.hasBytecBlock {
+ if ops.cntBytecBlock > 0 {
// don't optimize an existing bytecblock, only byte/addr pseudo-ops
return nil
}
@@ -1954,6 +2083,7 @@ func (ops *OpStream) optimizeConstants(refs []constReference, constBlock []inter
for i := range ops.labelReferences {
if ops.labelReferences[i].position > position {
ops.labelReferences[i].position += positionDelta
+ ops.labelReferences[i].offsetPosition += positionDelta
}
}
@@ -1987,8 +2117,8 @@ func (ops *OpStream) prependCBlocks() []byte {
prebytes := bytes.Buffer{}
vlen := binary.PutUvarint(scratch[:], ops.Version)
prebytes.Write(scratch[:vlen])
- if len(ops.intc) > 0 && !ops.hasIntcBlock {
- prebytes.WriteByte(0x20) // intcblock
+ if len(ops.intc) > 0 && ops.cntIntcBlock == 0 {
+ prebytes.WriteByte(OpsByName[ops.Version]["intcblock"].Opcode)
vlen := binary.PutUvarint(scratch[:], uint64(len(ops.intc)))
prebytes.Write(scratch[:vlen])
for _, iv := range ops.intc {
@@ -1996,8 +2126,8 @@ func (ops *OpStream) prependCBlocks() []byte {
prebytes.Write(scratch[:vlen])
}
}
- if len(ops.bytec) > 0 && !ops.hasBytecBlock {
- prebytes.WriteByte(0x26) // bytecblock
+ if len(ops.bytec) > 0 && ops.cntBytecBlock == 0 {
+ prebytes.WriteByte(OpsByName[ops.Version]["bytecblock"].Opcode)
vlen := binary.PutUvarint(scratch[:], uint64(len(ops.bytec)))
prebytes.Write(scratch[:vlen])
for _, bv := range ops.bytec {
@@ -2190,11 +2320,8 @@ func disassemble(dis *disassembleState, spec *OpSpec) (string, error) {
pc++
case immLabel:
- offset := (uint(dis.program[pc]) << 8) | uint(dis.program[pc+1])
- target := int(offset) + pc + 2
- if target > 0xffff {
- target -= 0x10000
- }
+ offset := decodeBranchOffset(dis.program, pc)
+ target := offset + pc + 2
var label string
if dis.numericTargets {
label = fmt.Sprintf("%d", target)
@@ -2235,7 +2362,7 @@ func disassemble(dis *disassembleState, spec *OpSpec) (string, error) {
return "", err
}
- dis.intc = append(dis.intc, intc...)
+ dis.intc = intc
for i, iv := range intc {
if i != 0 {
out += " "
@@ -2248,7 +2375,7 @@ func disassemble(dis *disassembleState, spec *OpSpec) (string, error) {
if err != nil {
return "", err
}
- dis.bytec = append(dis.bytec, bytec...)
+ dis.bytec = bytec
for i, bv := range bytec {
if i != 0 {
out += " "
@@ -2256,6 +2383,30 @@ func disassemble(dis *disassembleState, spec *OpSpec) (string, error) {
out += fmt.Sprintf("0x%s", hex.EncodeToString(bv))
}
pc = nextpc
+ case immLabels:
+ targets, nextpc, err := parseSwitch(dis.program, pc)
+ if err != nil {
+ return "", err
+ }
+
+ var labels []string
+ for _, target := range targets {
+ var label string
+ if dis.numericTargets {
+ label = fmt.Sprintf("%d", target)
+ } else {
+ if known, ok := dis.pendingLabels[target]; ok {
+ label = known
+ } else {
+ dis.labelCount++
+ label = fmt.Sprintf("label%d", dis.labelCount)
+ dis.putLabel(label, target)
+ }
+ }
+ labels = append(labels, label)
+ }
+ out += strings.Join(labels, " ")
+ pc = nextpc
default:
return "", fmt.Errorf("unknown immKind %d", imm.kind)
}
@@ -2409,6 +2560,20 @@ func checkByteConstBlock(cx *EvalContext) error {
return nil
}
+func parseSwitch(program []byte, pos int) (targets []int, nextpc int, err error) {
+ numOffsets := int(program[pos])
+ pos++
+ end := pos + 2*numOffsets // end of op: offset is applied to this position
+ for i := 0; i < numOffsets; i++ {
+ offset := decodeBranchOffset(program, pos)
+ target := end + offset
+ targets = append(targets, target)
+ pos += 2
+ }
+ nextpc = pos
+ return
+}
+
func allPrintableASCII(bytes []byte) bool {
for _, b := range bytes {
if b < 32 || b > 126 {
diff --git a/data/transactions/logic/assembler_test.go b/data/transactions/logic/assembler_test.go
index 24d9dffd07..e614a722c6 100644
--- a/data/transactions/logic/assembler_test.go
+++ b/data/transactions/logic/assembler_test.go
@@ -17,6 +17,7 @@
package logic
import (
+ "bytes"
"encoding/hex"
"fmt"
"strings"
@@ -393,7 +394,17 @@ pushint 1
replace3
`
-const v8Nonsense = v7Nonsense + pairingNonsense
+const switchNonsense = `
+switch_label0:
+pushint 1
+switch switch_label0 switch_label1
+switch_label1:
+pushint 1
+`
+
+const v8Nonsense = v7Nonsense + switchNonsense
+
+const v9Nonsense = v8Nonsense + pairingNonsense
const v6Compiled = "2004010002b7a60c26050242420c68656c6c6f20776f726c6421070123456789abcd208dae2087fbba51304eb02b91f656948397a7946390e8cb70fc9ea4d95f92251d047465737400320032013202320380021234292929292b0431003101310231043105310731083109310a310b310c310d310e310f3111311231133114311533000033000133000233000433000533000733000833000933000a33000b33000c33000d33000e33000f3300113300123300133300143300152d2e01022581f8acd19181cf959a1281f8acd19181cf951a81f8acd19181cf1581f8acd191810f082209240a220b230c240d250e230f23102311231223132314181b1c28171615400003290349483403350222231d4a484848482b50512a632223524100034200004322602261222704634848222862482864286548482228246628226723286828692322700048482371004848361c0037001a0031183119311b311d311e311f312023221e312131223123312431253126312731283129312a312b312c312d312e312f447825225314225427042455220824564c4d4b0222382124391c0081e80780046a6f686e2281d00f23241f880003420001892224902291922494249593a0a1a2a3a4a5a6a7a8a9aaabacadae24af3a00003b003c003d816472064e014f012a57000823810858235b235a2359b03139330039b1b200b322c01a23c1001a2323c21a23c3233e233f8120af06002a494905002a49490700b53a03b6b7043cb8033a0c2349c42a9631007300810881088120978101c53a8101c6003a"
@@ -402,7 +413,11 @@ const randomnessCompiled = "81ffff03d101d000"
const v7Compiled = v6Compiled + "5e005f018120af060180070123456789abcd49490501988003012345494984" +
randomnessCompiled + "800243218001775c0280018881015d"
-const v8Compiled = v7Compiled + pairingCompiled
+const switchCompiled = "81018a02fff800008101"
+
+const v8Compiled = v7Compiled + switchCompiled
+
+const v9Compiled = v7Compiled + pairingCompiled
var nonsense = map[uint64]string{
1: v1Nonsense,
@@ -413,6 +428,7 @@ var nonsense = map[uint64]string{
6: v6Nonsense,
7: v7Nonsense,
8: v8Nonsense,
+ 9: v9Nonsense,
}
var compiled = map[uint64]string{
@@ -479,6 +495,9 @@ var experiments = []uint64{pairingVersion}
// intended to release the opcodes, they should have been removed from
// `experiments`.
func TestExperimental(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
futureV := config.Consensus[protocol.ConsensusFuture].LogicSigVersion
for _, v := range experiments {
// Allows less, so we can push something out, even before vFuture has been updated.
@@ -506,16 +525,21 @@ type Expect struct {
s string
}
-func testMatch(t testing.TB, actual, expected string) bool {
+func testMatch(t testing.TB, actual, expected string) (ok bool) {
+ defer func() {
+ if !ok {
+ t.Logf("'%s' does not match '%s'", actual, expected)
+ }
+ }()
t.Helper()
if strings.HasPrefix(expected, "...") && strings.HasSuffix(expected, "...") {
- return assert.Contains(t, actual, expected[3:len(expected)-3])
+ return strings.Contains(actual, expected[3:len(expected)-3])
} else if strings.HasPrefix(expected, "...") {
- return assert.Contains(t, actual+"^", expected[3:]+"^")
+ return strings.Contains(actual+"^", expected[3:]+"^")
} else if strings.HasSuffix(expected, "...") {
- return assert.Contains(t, "^"+actual, "^"+expected[:len(expected)-3])
+ return strings.Contains("^"+actual, "^"+expected[:len(expected)-3])
} else {
- return assert.Equal(t, expected, actual)
+ return expected == actual
}
}
@@ -552,8 +576,7 @@ func summarize(trace *strings.Builder) string {
func testProg(t testing.TB, source string, ver uint64, expected ...Expect) *OpStream {
t.Helper()
- program := strings.ReplaceAll(source, ";", "\n")
- ops, err := assembleWithTrace(program, ver)
+ ops, err := assembleWithTrace(source, ver)
if len(expected) == 0 {
if len(ops.Errors) > 0 || err != nil || ops == nil || ops.Program == nil {
t.Log(summarize(ops.Trace))
@@ -567,13 +590,13 @@ func testProg(t testing.TB, source string, ver uint64, expected ...Expect) *OpSt
require.NotNil(t, ops.Program)
// It should always be possible to Disassemble
dis, err := Disassemble(ops.Program)
- require.NoError(t, err, program)
+ require.NoError(t, err, source)
// And, while the disassembly may not match input
// exactly, the assembly of the disassembly should
// give the same bytecode
ops2, err := AssembleStringWithVersion(notrack(dis), ver)
if len(ops2.Errors) > 0 || err != nil || ops2 == nil || ops2.Program == nil {
- t.Log(program)
+ t.Log(source)
t.Log(dis)
}
require.Empty(t, ops2.Errors)
@@ -581,19 +604,19 @@ func testProg(t testing.TB, source string, ver uint64, expected ...Expect) *OpSt
require.Equal(t, ops.Program, ops2.Program)
} else {
if err == nil {
- t.Log(program)
+ t.Log(source)
}
require.Error(t, err)
errors := ops.Errors
for _, exp := range expected {
if exp.l == 0 {
- // line 0 means: "must match all"
+ // line 0 means: "must match some line"
require.Len(t, expected, 1)
- fail := false
+ fail := true
for _, err := range errors {
msg := err.Unwrap().Error()
- if !testMatch(t, msg, exp.s) {
- fail = true
+ if testMatch(t, msg, exp.s) {
+ fail = false
}
}
if fail {
@@ -701,9 +724,9 @@ func TestAssembleGlobal(t *testing.T) {
testProg(t, "global MinTxnFee; int 2; +", AssemblerMaxVersion)
testProg(t, "global ZeroAddress; byte 0x12; concat; len", AssemblerMaxVersion)
testProg(t, "global MinTxnFee; byte 0x12; concat", AssemblerMaxVersion,
- Expect{3, "concat arg 0 wanted type []byte..."})
+ Expect{1, "concat arg 0 wanted type []byte..."})
testProg(t, "int 2; global ZeroAddress; +", AssemblerMaxVersion,
- Expect{3, "+ arg 1 wanted type uint64..."})
+ Expect{1, "+ arg 1 wanted type uint64..."})
}
func TestAssembleDefault(t *testing.T) {
@@ -730,7 +753,7 @@ func TestOpUint(t *testing.T) {
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
ops := newOpStream(v)
- ops.Uint(0xcafebabe)
+ ops.IntLiteral(0xcafebabe)
prog := ops.prependCBlocks()
require.NotNil(t, prog)
s := hex.EncodeToString(prog)
@@ -746,9 +769,8 @@ func TestOpUint64(t *testing.T) {
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
- t.Parallel()
ops := newOpStream(v)
- ops.Uint(0xcafebabecafebabe)
+ ops.IntLiteral(0xcafebabecafebabe)
prog := ops.prependCBlocks()
require.NotNil(t, prog)
s := hex.EncodeToString(prog)
@@ -769,6 +791,7 @@ func TestOpBytes(t *testing.T) {
require.NotNil(t, prog)
s := hex.EncodeToString(prog)
require.Equal(t, mutateProgVersion(v, "0126010661626364656628"), s)
+ testProg(t, "byte 0x7; len", v, Expect{1, "...odd length hex string"})
})
}
}
@@ -876,6 +899,125 @@ func TestAssembleBytesString(t *testing.T) {
}
}
+func TestManualCBlocks(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // Despite appearing twice, 500s are pushints because of manual intcblock
+ ops := testProg(t, "intcblock 1; int 500; int 500; ==", AssemblerMaxVersion)
+ require.Equal(t, ops.Program[4], OpsByName[ops.Version]["pushint"].Opcode)
+
+ ops = testProg(t, "intcblock 2 3; intcblock 4 10; int 5", AssemblerMaxVersion)
+ text, err := Disassemble(ops.Program)
+ require.NoError(t, err)
+ require.Contains(t, text, "pushint 5")
+
+ ops = testProg(t, "intcblock 2 3; intcblock 4 10; intc_3", AssemblerMaxVersion)
+ text, err = Disassemble(ops.Program)
+ require.NoError(t, err)
+ require.Contains(t, text, "intc_3\n") // That is, no commented value for intc_3 is shown
+
+ // In old straight-line versions, allow mixing int and intc if the ints all
+ // reference manual block. Since conditionals do make it possible that
+ // different cblocks could be in effect depending on earlier path choices,
+ // maybe we should not even allow this.
+ checkSame(t, 3,
+ "intcblock 4 5 1; intc_0; intc_2; +; intc_1; ==",
+ "intcblock 4 5 1; int 4; int 1; +; intc_1; ==",
+ "intcblock 4 5 1; intc_0; int 1; +; int 5; ==")
+ checkSame(t, 3,
+ "bytecblock 0x44 0x55 0x4455; bytec_0; bytec_1; concat; bytec_2; ==",
+ "bytecblock 0x44 0x55 0x4455; byte 0x44; bytec_1; concat; byte 0x4455; ==",
+ "bytecblock 0x44 0x55 0x4455; bytec_0; byte 0x55; concat; bytec_2; ==")
+
+ // But complain if they do not
+ testProg(t, "intcblock 4; int 3;", 3, Expect{1, "int 3 used without 3 in intcblock"})
+ testProg(t, "bytecblock 0x44; byte 0x33;", 3, Expect{1, "byte/addr/method used without value in bytecblock"})
+
+ // Or if the ref comes before the constant block, even if they match
+ testProg(t, "int 5; intcblock 4;", 3, Expect{1, "intcblock following int"})
+ testProg(t, "int 4; intcblock 4;", 3, Expect{1, "intcblock following int"})
+ testProg(t, "addr RWXCBB73XJITATVQFOI7MVUUQOL2PFDDSDUMW4H4T2SNSX4SEUOQ2MM7F4; bytecblock 0x44", 3, Expect{1, "bytecblock following byte/addr/method"})
+
+ // But we can't complain precisely once backjumps are allowed, so we force
+ // compile to push*. (We don't analyze the CFG, so we don't know if we can
+ // use what is in the user defined block. Perhaps we could special case
+ // single cblocks at start of program.
+ checkSame(t, 4,
+ "intcblock 4 5 1; int 4; int 1; +; int 5; ==",
+ "intcblock 4 5 1; pushint 4; pushint 1; +; pushint 5; ==")
+ checkSame(t, 4,
+ "bytecblock 0x44 0x55 0x4455; byte 0x44; byte 0x55; concat; byte 0x4455; ==",
+ "bytecblock 0x44 0x55 0x4455; pushbytes 0x44; pushbytes 0x55; concat; pushbytes 0x4455; ==")
+ // Can't switch to push* after the fact.
+ testProg(t, "int 5; intcblock 4;", 4, Expect{1, "intcblock following int"})
+ testProg(t, "int 4; intcblock 4;", 4, Expect{1, "intcblock following int"})
+ testProg(t, "addr RWXCBB73XJITATVQFOI7MVUUQOL2PFDDSDUMW4H4T2SNSX4SEUOQ2MM7F4; bytecblock 0x44", 4, Expect{1, "bytecblock following byte/addr/method"})
+
+ // Ignore manually added cblocks in deadcode, so they can be added easily to
+ // existing programs. There are proposals to put metadata there.
+ ops = testProg(t, "int 4; int 4; +; int 8; ==; return; intcblock 10", AssemblerMaxVersion)
+ require.Equal(t, ops.Program[1], OpsByName[ops.Version]["intcblock"].Opcode)
+ require.EqualValues(t, ops.Program[3], 4) // 1 4
+ require.Equal(t, ops.Program[4], OpsByName[ops.Version]["intc_0"].Opcode)
+ ops = testProg(t, "b skip; intcblock 10; skip: int 4; int 4; +; int 8; ==;", AssemblerMaxVersion)
+ require.Equal(t, ops.Program[1], OpsByName[ops.Version]["intcblock"].Opcode)
+ require.EqualValues(t, ops.Program[3], 4)
+
+ ops = testProg(t, "byte 0x44; byte 0x44; concat; len; return; bytecblock 0x11", AssemblerMaxVersion)
+ require.Equal(t, ops.Program[1], OpsByName[ops.Version]["bytecblock"].Opcode)
+ require.EqualValues(t, ops.Program[4], 0x44) // 1 1 0x44
+ require.Equal(t, ops.Program[5], OpsByName[ops.Version]["bytec_0"].Opcode)
+ ops = testProg(t, "b skip; bytecblock 0x11; skip: byte 0x44; byte 0x44; concat; len; int 4; ==", AssemblerMaxVersion)
+ require.Equal(t, ops.Program[1], OpsByName[ops.Version]["bytecblock"].Opcode)
+ require.EqualValues(t, ops.Program[4], 0x44)
+}
+
+func TestManualCBlocksPreBackBranch(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // Before backbranch enabled, the assembler is willing to assemble an `int`
+ // reference after an intcblock as an intc. It uses the most recent seen
+ // non-deadcode intcblock, so it *could* be wrong.
+ testProg(t, "intcblock 10 20; int 10;", backBranchEnabledVersion-1)
+ // By the same token, assembly complains if that intcblock doesn't have the
+ // constant. In v3, and v3 only, it *could* pushint.
+ testProg(t, "intcblock 10 20; int 30;", backBranchEnabledVersion-1, Expect{1, "int 30 used..."})
+
+ // Since the second intcblock is dead, the `int 10` "sees" the first block, not the second
+ testProg(t, "intcblock 10 20; b skip; intcblock 3 4 5; skip: int 10;", backBranchEnabledVersion-1)
+ testProg(t, "intcblock 10 20; b skip; intcblock 3 4 5; skip: int 3;", backBranchEnabledVersion-1,
+ Expect{1, "int 3 used..."})
+
+ // Here, the intcblock in effect is unknowable, better to force the user to
+ // use intc (unless pushint is available to save the day).
+
+ // backBranchEnabledVersion-1 contains pushint
+ testProg(t, "intcblock 10 20; txn NumAppArgs; bz skip; intcblock 3 4 5; skip: int 10;", backBranchEnabledVersion-1)
+ testProg(t, "intcblock 10 20; txn NumAppArgs; bz skip; intcblock 3 4 5; skip: int 3;", backBranchEnabledVersion-1)
+
+ // backBranchEnabledVersion-2 does not
+ testProg(t, "intcblock 10 20; txn NumAppArgs; bz skip; intcblock 3 4 5; skip: int 10;", backBranchEnabledVersion-2,
+ Expect{1, "int 10 used with manual intcblocks. Use intc."})
+ testProg(t, "intcblock 10 20; txn NumAppArgs; bz skip; intcblock 3 4 5; skip: int 3;", backBranchEnabledVersion-2,
+ Expect{1, "int 3 used with manual intcblocks. Use intc."})
+
+ // REPEAT ABOVE, BUT FOR BYTE BLOCKS
+
+ testProg(t, "bytecblock 0x10 0x20; byte 0x10;", backBranchEnabledVersion-1)
+ testProg(t, "bytecblock 0x10 0x20; byte 0x30;", backBranchEnabledVersion-1, Expect{1, "byte/addr/method used..."})
+ testProg(t, "bytecblock 0x10 0x20; b skip; bytecblock 0x03 0x04 0x05; skip: byte 0x10;", backBranchEnabledVersion-1)
+ testProg(t, "bytecblock 0x10 0x20; b skip; bytecblock 0x03 0x04 0x05; skip: byte 0x03;", backBranchEnabledVersion-1,
+ Expect{1, "byte/addr/method used..."})
+ testProg(t, "bytecblock 0x10 0x20; txn NumAppArgs; bz skip; bytecblock 0x03 0x04 0x05; skip: byte 0x10;", backBranchEnabledVersion-1)
+ testProg(t, "bytecblock 0x10 0x20; txn NumAppArgs; bz skip; bytecblock 0x03 0x04 0x05; skip: byte 0x03;", backBranchEnabledVersion-1)
+ testProg(t, "bytecblock 0x10 0x20; txn NumAppArgs; bz skip; bytecblock 0x03 0x04 0x05; skip: byte 0x10;", backBranchEnabledVersion-2,
+ Expect{1, "byte 0x10 used with manual bytecblocks. Use bytec."})
+ testProg(t, "bytecblock 0x10 0x20; txn NumAppArgs; bz skip; bytecblock 0x03 0x04 0x05; skip: byte 0x03;", backBranchEnabledVersion-2,
+ Expect{1, "byte 0x03 used with manual bytecblocks. Use bytec."})
+}
+
func TestAssembleOptimizedConstants(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
@@ -1111,209 +1253,91 @@ func TestFieldsFromLine(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- line := "op arg"
- fields := fieldsFromLine(line)
- require.Equal(t, 2, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, "arg", fields[1])
-
- line = "op arg // test"
- fields = fieldsFromLine(line)
- require.Equal(t, 2, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, "arg", fields[1])
-
- line = "op base64 ABC//=="
- fields = fieldsFromLine(line)
- require.Equal(t, 3, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, "base64", fields[1])
- require.Equal(t, "ABC//==", fields[2])
-
- line = "op base64 ABC/=="
- fields = fieldsFromLine(line)
- require.Equal(t, 3, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, "base64", fields[1])
- require.Equal(t, "ABC/==", fields[2])
-
- line = "op base64 ABC/== /"
- fields = fieldsFromLine(line)
- require.Equal(t, 4, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, "base64", fields[1])
- require.Equal(t, "ABC/==", fields[2])
- require.Equal(t, "/", fields[3])
-
- line = "op base64 ABC/== //"
- fields = fieldsFromLine(line)
- require.Equal(t, 3, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, "base64", fields[1])
- require.Equal(t, "ABC/==", fields[2])
-
- line = "op base64 ABC//== //"
- fields = fieldsFromLine(line)
- require.Equal(t, 3, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, "base64", fields[1])
- require.Equal(t, "ABC//==", fields[2])
-
- line = "op b64 ABC//== //"
- fields = fieldsFromLine(line)
- require.Equal(t, 3, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, "b64", fields[1])
- require.Equal(t, "ABC//==", fields[2])
-
- line = "op b64(ABC//==) // comment"
- fields = fieldsFromLine(line)
- require.Equal(t, 2, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, "b64(ABC//==)", fields[1])
-
- line = "op base64(ABC//==) // comment"
- fields = fieldsFromLine(line)
- require.Equal(t, 2, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, "base64(ABC//==)", fields[1])
-
- line = "op b64(ABC/==) // comment"
- fields = fieldsFromLine(line)
- require.Equal(t, 2, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, "b64(ABC/==)", fields[1])
-
- line = "op base64(ABC/==) // comment"
- fields = fieldsFromLine(line)
- require.Equal(t, 2, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, "base64(ABC/==)", fields[1])
-
- line = "base64(ABC//==)"
- fields = fieldsFromLine(line)
- require.Equal(t, 1, len(fields))
- require.Equal(t, "base64(ABC//==)", fields[0])
-
- line = "b(ABC//==)"
- fields = fieldsFromLine(line)
- require.Equal(t, 1, len(fields))
- require.Equal(t, "b(ABC", fields[0])
-
- line = "b(ABC//==) //"
- fields = fieldsFromLine(line)
- require.Equal(t, 1, len(fields))
- require.Equal(t, "b(ABC", fields[0])
-
- line = "b(ABC ==) //"
- fields = fieldsFromLine(line)
- require.Equal(t, 2, len(fields))
- require.Equal(t, "b(ABC", fields[0])
- require.Equal(t, "==)", fields[1])
-
- line = "op base64 ABC)"
- fields = fieldsFromLine(line)
- require.Equal(t, 3, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, "base64", fields[1])
- require.Equal(t, "ABC)", fields[2])
-
- line = "op base64 ABC) // comment"
- fields = fieldsFromLine(line)
- require.Equal(t, 3, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, "base64", fields[1])
- require.Equal(t, "ABC)", fields[2])
-
- line = "op base64 ABC//) // comment"
- fields = fieldsFromLine(line)
- require.Equal(t, 3, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, "base64", fields[1])
- require.Equal(t, "ABC//)", fields[2])
-
- line = `op "test"`
- fields = fieldsFromLine(line)
- require.Equal(t, 2, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, `"test"`, fields[1])
-
- line = `op "test1 test2"`
- fields = fieldsFromLine(line)
- require.Equal(t, 2, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, `"test1 test2"`, fields[1])
-
- line = `op "test1 test2" // comment`
- fields = fieldsFromLine(line)
- require.Equal(t, 2, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, `"test1 test2"`, fields[1])
-
- line = `op "test1 test2 // not a comment"`
- fields = fieldsFromLine(line)
- require.Equal(t, 2, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, `"test1 test2 // not a comment"`, fields[1])
-
- line = `op "test1 test2 // not a comment" // comment`
- fields = fieldsFromLine(line)
- require.Equal(t, 2, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, `"test1 test2 // not a comment"`, fields[1])
-
- line = `op "test1 test2 // not a comment" // comment`
- fields = fieldsFromLine(line)
- require.Equal(t, 2, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, `"test1 test2 // not a comment"`, fields[1])
-
- line = `op "test1 test2" //`
- fields = fieldsFromLine(line)
- require.Equal(t, 2, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, `"test1 test2"`, fields[1])
-
- line = `op "test1 test2"//`
- fields = fieldsFromLine(line)
- require.Equal(t, 2, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, `"test1 test2"`, fields[1])
-
- line = `op "test1 test2` // non-terminated string literal
- fields = fieldsFromLine(line)
- require.Equal(t, 2, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, `"test1 test2`, fields[1])
-
- line = `op "test1 test2\"` // non-terminated string literal
- fields = fieldsFromLine(line)
- require.Equal(t, 2, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, `"test1 test2\"`, fields[1])
-
- line = `op \"test1 test2\"` // not a string literal
- fields = fieldsFromLine(line)
- require.Equal(t, 3, len(fields))
- require.Equal(t, "op", fields[0])
- require.Equal(t, `\"test1`, fields[1])
- require.Equal(t, `test2\"`, fields[2])
-
- line = `"test1 test2"`
- fields = fieldsFromLine(line)
- require.Equal(t, 1, len(fields))
- require.Equal(t, `"test1 test2"`, fields[0])
-
- line = `\"test1 test2"`
- fields = fieldsFromLine(line)
- require.Equal(t, 2, len(fields))
- require.Equal(t, `\"test1`, fields[0])
- require.Equal(t, `test2"`, fields[1])
-
- line = `"" // test`
- fields = fieldsFromLine(line)
- require.Equal(t, 1, len(fields))
- require.Equal(t, `""`, fields[0])
+ check := func(line string, tokens ...string) {
+ t.Helper()
+ assert.Equal(t, tokensFromLine(line), tokens)
+ }
+
+ check("op arg", "op", "arg")
+ check("op arg // test", "op", "arg")
+ check("op base64 ABC//==", "op", "base64", "ABC//==")
+ check("op base64 base64", "op", "base64", "base64")
+ check("op base64 base64 //comment", "op", "base64", "base64")
+ check("op base64 base64; op2 //done", "op", "base64", "base64", ";", "op2")
+ check("op base64 ABC/==", "op", "base64", "ABC/==")
+ check("op base64 ABC/== /", "op", "base64", "ABC/==", "/")
+ check("op base64 ABC/== //", "op", "base64", "ABC/==")
+ check("op base64 ABC//== //", "op", "base64", "ABC//==")
+ check("op b64 ABC//== //", "op", "b64", "ABC//==")
+ check("op b64(ABC//==) // comment", "op", "b64(ABC//==)")
+ check("op base64(ABC//==) // comment", "op", "base64(ABC//==)")
+ check("op b64(ABC/==) // comment", "op", "b64(ABC/==)")
+ check("op base64(ABC/==) // comment", "op", "base64(ABC/==)")
+ check("base64(ABC//==)", "base64(ABC//==)")
+ check("b(ABC//==)", "b(ABC")
+ check("b(ABC//==) //", "b(ABC")
+ check("b(ABC ==) //", "b(ABC", "==)")
+ check("op base64 ABC)", "op", "base64", "ABC)")
+ check("op base64 ABC) // comment", "op", "base64", "ABC)")
+ check("op base64 ABC//) // comment", "op", "base64", "ABC//)")
+ check(`op "test"`, "op", `"test"`)
+ check(`op "test1 test2"`, "op", `"test1 test2"`)
+ check(`op "test1 test2" // comment`, "op", `"test1 test2"`)
+ check(`op "test1 test2 // not a comment"`, "op", `"test1 test2 // not a comment"`)
+ check(`op "test1 test2 // not a comment" // comment`, "op", `"test1 test2 // not a comment"`)
+ check(`op "test1 test2" //`, "op", `"test1 test2"`)
+ check(`op "test1 test2"//`, "op", `"test1 test2"`)
+ check(`op "test1 test2`, "op", `"test1 test2`) // non-terminated string literal
+ check(`op "test1 test2\"`, "op", `"test1 test2\"`) // non-terminated string literal
+ check(`op \"test1 test2\"`, "op", `\"test1`, `test2\"`) // not a string literal
+ check(`"test1 test2"`, `"test1 test2"`)
+ check(`\"test1 test2"`, `\"test1`, `test2"`)
+ check(`"" // test`, `""`)
+ check("int 1; int 2", "int", "1", ";", "int", "2")
+ check("int 1;;;int 2", "int", "1", ";", ";", ";", "int", "2")
+ check("int 1; ;int 2;; ; ;; ", "int", "1", ";", ";", "int", "2", ";", ";", ";", ";", ";")
+ check(";", ";")
+ check("; ; ;;;;", ";", ";", ";", ";", ";", ";")
+ check(" ;", ";")
+ check(" ; ", ";")
+}
+
+func TestSplitTokens(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ check := func(tokens []string, left []string, right []string) {
+ t.Helper()
+ current, next := splitTokens(tokens)
+ assert.Equal(t, left, current)
+ assert.Equal(t, right, next)
+ }
+
+ check([]string{"hey,", "how's", ";", ";", "it", "going", ";"},
+ []string{"hey,", "how's"},
+ []string{";", "it", "going", ";"},
+ )
+
+ check([]string{";"},
+ []string{},
+ []string{},
+ )
+
+ check([]string{";", "it", "going"},
+ []string{},
+ []string{"it", "going"},
+ )
+
+ check([]string{"hey,", "how's"},
+ []string{"hey,", "how's"},
+ nil,
+ )
+
+ check([]string{`"hey in quotes;"`, "getting", `";"`, ";", "tricky"},
+ []string{`"hey in quotes;"`, "getting", `";"`},
+ []string{"tricky"},
+ )
+
}
func TestAssembleRejectNegJump(t *testing.T) {
@@ -1798,22 +1822,22 @@ func TestAssembleAsset(t *testing.T) {
testProg(t, "asset_holding_get ABC 1", v,
Expect{1, "asset_holding_get ABC 1 expects 2 stack arguments..."})
testProg(t, "int 1; asset_holding_get ABC 1", v,
- Expect{2, "asset_holding_get ABC 1 expects 2 stack arguments..."})
+ Expect{1, "asset_holding_get ABC 1 expects 2 stack arguments..."})
testProg(t, "int 1; int 1; asset_holding_get ABC 1", v,
- Expect{3, "asset_holding_get expects 1 immediate argument"})
+ Expect{1, "asset_holding_get expects 1 immediate argument"})
testProg(t, "int 1; int 1; asset_holding_get ABC", v,
- Expect{3, "asset_holding_get unknown field: \"ABC\""})
+ Expect{1, "asset_holding_get unknown field: \"ABC\""})
testProg(t, "byte 0x1234; asset_params_get ABC 1", v,
- Expect{2, "asset_params_get ABC 1 arg 0 wanted type uint64..."})
+ Expect{1, "asset_params_get ABC 1 arg 0 wanted type uint64..."})
// Test that AssetUnitName is known to return bytes
testProg(t, "int 1; asset_params_get AssetUnitName; pop; int 1; +", v,
- Expect{5, "+ arg 0 wanted type uint64..."})
+ Expect{1, "+ arg 0 wanted type uint64..."})
// Test that AssetTotal is known to return uint64
testProg(t, "int 1; asset_params_get AssetTotal; pop; byte 0x12; concat", v,
- Expect{5, "concat arg 0 wanted type []byte..."})
+ Expect{1, "concat arg 0 wanted type []byte..."})
testLine(t, "asset_params_get ABC 1", v, "asset_params_get expects 1 immediate argument")
testLine(t, "asset_params_get ABC", v, "asset_params_get unknown field: \"ABC\"")
@@ -1955,8 +1979,7 @@ intc_0 // 1
bnz label1
label1:
`, v)
- ops, err := AssembleStringWithVersion(source, v)
- require.NoError(t, err)
+ ops := testProg(t, source, v)
dis, err := Disassemble(ops.Program)
require.NoError(t, err)
require.Equal(t, source, dis)
@@ -2069,8 +2092,7 @@ func TestHasStatefulOps(t *testing.T) {
t.Parallel()
source := "int 1"
- ops, err := AssembleStringWithVersion(source, AssemblerMaxVersion)
- require.NoError(t, err)
+ ops := testProg(t, source, AssemblerMaxVersion)
has, err := HasStatefulOps(ops.Program)
require.NoError(t, err)
require.False(t, has)
@@ -2080,8 +2102,7 @@ int 1
app_opted_in
err
`
- ops, err = AssembleStringWithVersion(source, AssemblerMaxVersion)
- require.NoError(t, err)
+ ops = testProg(t, source, AssemblerMaxVersion)
has, err = HasStatefulOps(ops.Program)
require.NoError(t, err)
require.True(t, has)
@@ -2258,46 +2279,38 @@ func TestAssemblePragmaVersion(t *testing.T) {
text := `#pragma version 1
int 1
`
- ops, err := AssembleStringWithVersion(text, 1)
- require.NoError(t, err)
- ops1, err := AssembleStringWithVersion("int 1", 1)
- require.NoError(t, err)
+ ops := testProg(t, text, 1)
+ ops1 := testProg(t, "int 1", 1)
require.Equal(t, ops1.Program, ops.Program)
testProg(t, text, 0, Expect{1, "version mismatch..."})
testProg(t, text, 2, Expect{1, "version mismatch..."})
testProg(t, text, assemblerNoVersion)
- ops, err = AssembleStringWithVersion(text, assemblerNoVersion)
- require.NoError(t, err)
+ ops = testProg(t, text, assemblerNoVersion)
require.Equal(t, ops1.Program, ops.Program)
text = `#pragma version 2
int 1
`
- ops, err = AssembleStringWithVersion(text, 2)
- require.NoError(t, err)
- ops2, err := AssembleStringWithVersion("int 1", 2)
- require.NoError(t, err)
+ ops = testProg(t, text, 2)
+ ops2 := testProg(t, "int 1", 2)
require.Equal(t, ops2.Program, ops.Program)
testProg(t, text, 0, Expect{1, "version mismatch..."})
testProg(t, text, 1, Expect{1, "version mismatch..."})
- ops, err = AssembleStringWithVersion(text, assemblerNoVersion)
- require.NoError(t, err)
+ ops = testProg(t, text, assemblerNoVersion)
require.Equal(t, ops2.Program, ops.Program)
// check if no version it defaults to v1
text = `byte "test"
len
`
- ops, err = AssembleStringWithVersion(text, assemblerNoVersion)
- require.NoError(t, err)
- ops1, err = AssembleStringWithVersion(text, 1)
+ ops = testProg(t, text, assemblerNoVersion)
+ ops1 = testProg(t, text, 1)
require.Equal(t, ops1.Program, ops.Program)
- require.NoError(t, err)
- ops2, err = AssembleString(text)
+ ops2, err := AssembleString(text)
require.NoError(t, err)
require.Equal(t, ops2.Program, ops.Program)
@@ -2325,9 +2338,8 @@ func TestErrShortBytecblock(t *testing.T) {
t.Parallel()
text := `intcblock 0x1234567812345678 0x1234567812345671 0x1234567812345672 0x1234567812345673 4 5 6 7 8`
- ops, err := AssembleStringWithVersion(text, 1)
- require.NoError(t, err)
- _, _, err = parseIntcblock(ops.Program, 1)
+ ops := testProg(t, text, 1)
+ _, _, err := parseIntcblock(ops.Program, 1)
require.Equal(t, err, errShortIntcblock)
var cx EvalContext
@@ -2369,8 +2381,7 @@ func TestMethodWarning(t *testing.T) {
for _, test := range tests {
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
src := fmt.Sprintf("method \"%s\"\nint 1", test.method)
- ops, err := AssembleStringWithVersion(src, v)
- require.NoError(t, err)
+ ops := testProg(t, src, v)
if test.pass {
require.Len(t, ops.Warnings, 0)
@@ -2422,29 +2433,29 @@ func TestSwapTypeCheck(t *testing.T) {
t.Parallel()
/* reconfirm that we detect this type error */
- testProg(t, "int 1; byte 0x1234; +", AssemblerMaxVersion, Expect{3, "+ arg 1..."})
+ testProg(t, "int 1; byte 0x1234; +", AssemblerMaxVersion, Expect{1, "+ arg 1..."})
/* despite swap, we track types */
- testProg(t, "int 1; byte 0x1234; swap; +", AssemblerMaxVersion, Expect{4, "+ arg 0..."})
- testProg(t, "byte 0x1234; int 1; swap; +", AssemblerMaxVersion, Expect{4, "+ arg 1..."})
+ testProg(t, "int 1; byte 0x1234; swap; +", AssemblerMaxVersion, Expect{1, "+ arg 0..."})
+ testProg(t, "byte 0x1234; int 1; swap; +", AssemblerMaxVersion, Expect{1, "+ arg 1..."})
}
func TestDigAsm(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- testProg(t, "int 1; dig; +", AssemblerMaxVersion, Expect{2, "dig expects 1 immediate..."})
- testProg(t, "int 1; dig junk; +", AssemblerMaxVersion, Expect{2, "dig unable to parse..."})
+ testProg(t, "int 1; dig; +", AssemblerMaxVersion, Expect{1, "dig expects 1 immediate..."})
+ testProg(t, "int 1; dig junk; +", AssemblerMaxVersion, Expect{1, "dig unable to parse..."})
testProg(t, "int 1; byte 0x1234; int 2; dig 2; +", AssemblerMaxVersion)
testProg(t, "byte 0x32; byte 0x1234; int 2; dig 2; +", AssemblerMaxVersion,
- Expect{5, "+ arg 1..."})
+ Expect{1, "+ arg 1..."})
testProg(t, "byte 0x32; byte 0x1234; int 2; dig 3; +", AssemblerMaxVersion,
- Expect{4, "dig 3 expects 4..."})
+ Expect{1, "dig 3 expects 4..."})
testProg(t, "int 1; byte 0x1234; int 2; dig 12; +", AssemblerMaxVersion,
- Expect{4, "dig 12 expects 13..."})
+ Expect{1, "dig 12 expects 13..."})
// Confirm that digging something out does not ruin our knowledge about the types in the middle
testProg(t, "int 1; byte 0x1234; byte 0x1234; dig 2; dig 3; +; pop; +", AssemblerMaxVersion,
- Expect{8, "+ arg 1..."})
+ Expect{1, "+ arg 1..."})
testProg(t, "int 3; pushbytes \"123456\"; int 1; dig 2; substring3", AssemblerMaxVersion)
}
@@ -2452,39 +2463,39 @@ func TestDigAsm(t *testing.T) {
func TestEqualsTypeCheck(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- testProg(t, "int 1; byte 0x1234; ==", AssemblerMaxVersion, Expect{3, "== arg 0..."})
- testProg(t, "int 1; byte 0x1234; !=", AssemblerMaxVersion, Expect{3, "!= arg 0..."})
- testProg(t, "byte 0x1234; int 1; ==", AssemblerMaxVersion, Expect{3, "== arg 0..."})
- testProg(t, "byte 0x1234; int 1; !=", AssemblerMaxVersion, Expect{3, "!= arg 0..."})
+ testProg(t, "int 1; byte 0x1234; ==", AssemblerMaxVersion, Expect{1, "== arg 0..."})
+ testProg(t, "int 1; byte 0x1234; !=", AssemblerMaxVersion, Expect{1, "!= arg 0..."})
+ testProg(t, "byte 0x1234; int 1; ==", AssemblerMaxVersion, Expect{1, "== arg 0..."})
+ testProg(t, "byte 0x1234; int 1; !=", AssemblerMaxVersion, Expect{1, "!= arg 0..."})
}
func TestDupTypeCheck(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- testProg(t, "byte 0x1234; dup; int 1; +", AssemblerMaxVersion, Expect{4, "+ arg 0..."})
+ testProg(t, "byte 0x1234; dup; int 1; +", AssemblerMaxVersion, Expect{1, "+ arg 0..."})
testProg(t, "byte 0x1234; int 1; dup; +", AssemblerMaxVersion)
- testProg(t, "byte 0x1234; int 1; dup2; +", AssemblerMaxVersion, Expect{4, "+ arg 0..."})
- testProg(t, "int 1; byte 0x1234; dup2; +", AssemblerMaxVersion, Expect{4, "+ arg 1..."})
+ testProg(t, "byte 0x1234; int 1; dup2; +", AssemblerMaxVersion, Expect{1, "+ arg 0..."})
+ testProg(t, "int 1; byte 0x1234; dup2; +", AssemblerMaxVersion, Expect{1, "+ arg 1..."})
- testProg(t, "byte 0x1234; int 1; dup; dig 1; len", AssemblerMaxVersion, Expect{5, "len arg 0..."})
- testProg(t, "int 1; byte 0x1234; dup; dig 1; !", AssemblerMaxVersion, Expect{5, "! arg 0..."})
+ testProg(t, "byte 0x1234; int 1; dup; dig 1; len", AssemblerMaxVersion, Expect{1, "len arg 0..."})
+ testProg(t, "int 1; byte 0x1234; dup; dig 1; !", AssemblerMaxVersion, Expect{1, "! arg 0..."})
- testProg(t, "byte 0x1234; int 1; dup2; dig 2; len", AssemblerMaxVersion, Expect{5, "len arg 0..."})
- testProg(t, "int 1; byte 0x1234; dup2; dig 2; !", AssemblerMaxVersion, Expect{5, "! arg 0..."})
+ testProg(t, "byte 0x1234; int 1; dup2; dig 2; len", AssemblerMaxVersion, Expect{1, "len arg 0..."})
+ testProg(t, "int 1; byte 0x1234; dup2; dig 2; !", AssemblerMaxVersion, Expect{1, "! arg 0..."})
}
func TestSelectTypeCheck(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- testProg(t, "int 1; int 2; int 3; select; len", AssemblerMaxVersion, Expect{5, "len arg 0..."})
- testProg(t, "byte 0x1234; byte 0x5678; int 3; select; !", AssemblerMaxVersion, Expect{5, "! arg 0..."})
+ testProg(t, "int 1; int 2; int 3; select; len", AssemblerMaxVersion, Expect{1, "len arg 0..."})
+ testProg(t, "byte 0x1234; byte 0x5678; int 3; select; !", AssemblerMaxVersion, Expect{1, "! arg 0..."})
}
func TestSetBitTypeCheck(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- testProg(t, "int 1; int 2; int 3; setbit; len", AssemblerMaxVersion, Expect{5, "len arg 0..."})
- testProg(t, "byte 0x1234; int 2; int 3; setbit; !", AssemblerMaxVersion, Expect{5, "! arg 0..."})
+ testProg(t, "int 1; int 2; int 3; setbit; len", AssemblerMaxVersion, Expect{1, "len arg 0..."})
+ testProg(t, "byte 0x1234; int 2; int 3; setbit; !", AssemblerMaxVersion, Expect{1, "! arg 0..."})
}
func TestScratchTypeCheck(t *testing.T) {
@@ -2493,13 +2504,13 @@ func TestScratchTypeCheck(t *testing.T) {
// All scratch slots should start as uint64
testProg(t, "load 0; int 1; +", AssemblerMaxVersion)
// Check load and store accurately using the scratch space
- testProg(t, "byte 0x01; store 0; load 0; int 1; +", AssemblerMaxVersion, Expect{5, "+ arg 0..."})
+ testProg(t, "byte 0x01; store 0; load 0; int 1; +", AssemblerMaxVersion, Expect{1, "+ arg 0..."})
// Loads should know the type it's loading if all the slots are the same type
- testProg(t, "int 0; loads; btoi", AssemblerMaxVersion, Expect{3, "btoi arg 0..."})
+ testProg(t, "int 0; loads; btoi", AssemblerMaxVersion, Expect{1, "btoi arg 0..."})
// Loads doesn't know the type when slot types vary
testProg(t, "byte 0x01; store 0; int 1; loads; btoi", AssemblerMaxVersion)
// Stores should only set slots to StackAny if they are not the same type as what is being stored
- testProg(t, "byte 0x01; store 0; int 3; byte 0x01; stores; load 0; int 1; +", AssemblerMaxVersion, Expect{8, "+ arg 0..."})
+ testProg(t, "byte 0x01; store 0; int 3; byte 0x01; stores; load 0; int 1; +", AssemblerMaxVersion, Expect{1, "+ arg 0..."})
// ScratchSpace should reset after hitting label in deadcode
testProg(t, "byte 0x01; store 0; b label1; label1:; load 0; int 1; +", AssemblerMaxVersion)
// But it should reset to StackAny not uint64
@@ -2507,7 +2518,7 @@ func TestScratchTypeCheck(t *testing.T) {
// Callsubs should also reset the scratch space
testProg(t, "callsub A; load 0; btoi; return; A: byte 0x01; store 0; retsub", AssemblerMaxVersion)
// But the scratchspace should still be tracked after the callsub
- testProg(t, "callsub A; int 1; store 0; load 0; btoi; return; A: retsub", AssemblerMaxVersion, Expect{5, "btoi arg 0..."})
+ testProg(t, "callsub A; int 1; store 0; load 0; btoi; return; A: retsub", AssemblerMaxVersion, Expect{1, "btoi arg 0..."})
}
func TestCoverAsm(t *testing.T) {
@@ -2515,9 +2526,9 @@ func TestCoverAsm(t *testing.T) {
t.Parallel()
testProg(t, `int 4; byte "john"; int 5; cover 2; pop; +`, AssemblerMaxVersion)
testProg(t, `int 4; byte "ayush"; int 5; cover 1; pop; +`, AssemblerMaxVersion)
- testProg(t, `int 4; byte "john"; int 5; cover 2; +`, AssemblerMaxVersion, Expect{5, "+ arg 1..."})
+ testProg(t, `int 4; byte "john"; int 5; cover 2; +`, AssemblerMaxVersion, Expect{1, "+ arg 1..."})
- testProg(t, `int 4; cover junk`, AssemblerMaxVersion, Expect{2, "cover unable to parse n ..."})
+ testProg(t, `int 4; cover junk`, AssemblerMaxVersion, Expect{1, "cover unable to parse n ..."})
}
func TestUncoverAsm(t *testing.T) {
@@ -2526,38 +2537,38 @@ func TestUncoverAsm(t *testing.T) {
testProg(t, `int 4; byte "john"; int 5; uncover 2; +`, AssemblerMaxVersion)
testProg(t, `int 4; byte "ayush"; int 5; uncover 1; pop; +`, AssemblerMaxVersion)
testProg(t, `int 1; byte "jj"; byte "ayush"; byte "john"; int 5; uncover 4; +`, AssemblerMaxVersion)
- testProg(t, `int 4; byte "ayush"; int 5; uncover 1; +`, AssemblerMaxVersion, Expect{5, "+ arg 1..."})
+ testProg(t, `int 4; byte "ayush"; int 5; uncover 1; +`, AssemblerMaxVersion, Expect{1, "+ arg 1..."})
}
func TestTxTypes(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- testProg(t, "itxn_begin; itxn_field Sender", 5, Expect{2, "itxn_field Sender expects 1 stack argument..."})
- testProg(t, "itxn_begin; int 1; itxn_field Sender", 5, Expect{3, "...wanted type []byte got uint64"})
+ testProg(t, "itxn_begin; itxn_field Sender", 5, Expect{1, "itxn_field Sender expects 1 stack argument..."})
+ testProg(t, "itxn_begin; int 1; itxn_field Sender", 5, Expect{1, "...wanted type []byte got uint64"})
testProg(t, "itxn_begin; byte 0x56127823; itxn_field Sender", 5)
- testProg(t, "itxn_begin; itxn_field Amount", 5, Expect{2, "itxn_field Amount expects 1 stack argument..."})
- testProg(t, "itxn_begin; byte 0x87123376; itxn_field Amount", 5, Expect{3, "...wanted type uint64 got []byte"})
+ testProg(t, "itxn_begin; itxn_field Amount", 5, Expect{1, "itxn_field Amount expects 1 stack argument..."})
+ testProg(t, "itxn_begin; byte 0x87123376; itxn_field Amount", 5, Expect{1, "...wanted type uint64 got []byte"})
testProg(t, "itxn_begin; int 1; itxn_field Amount", 5)
}
func TestBadInnerFields(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- testProg(t, "itxn_begin; int 1000; itxn_field FirstValid", 5, Expect{3, "...is not allowed."})
- testProg(t, "itxn_begin; int 1000; itxn_field FirstValidTime", 5, Expect{3, "...is not allowed."})
- testProg(t, "itxn_begin; int 1000; itxn_field LastValid", 5, Expect{3, "...is not allowed."})
- testProg(t, "itxn_begin; int 32; bzero; itxn_field Lease", 5, Expect{4, "...is not allowed."})
- testProg(t, "itxn_begin; byte 0x7263; itxn_field Note", 5, Expect{3, "...Note field was introduced in v6..."})
- testProg(t, "itxn_begin; byte 0x7263; itxn_field VotePK", 5, Expect{3, "...VotePK field was introduced in v6..."})
- testProg(t, "itxn_begin; int 32; bzero; itxn_field TxID", 5, Expect{4, "...is not allowed."})
-
- testProg(t, "itxn_begin; int 1000; itxn_field FirstValid", 6, Expect{3, "...is not allowed."})
- testProg(t, "itxn_begin; int 1000; itxn_field LastValid", 6, Expect{3, "...is not allowed."})
- testProg(t, "itxn_begin; int 32; bzero; itxn_field Lease", 6, Expect{4, "...is not allowed."})
+ testProg(t, "itxn_begin; int 1000; itxn_field FirstValid", 5, Expect{1, "...is not allowed."})
+ testProg(t, "itxn_begin; int 1000; itxn_field FirstValidTime", 5, Expect{1, "...is not allowed."})
+ testProg(t, "itxn_begin; int 1000; itxn_field LastValid", 5, Expect{1, "...is not allowed."})
+ testProg(t, "itxn_begin; int 32; bzero; itxn_field Lease", 5, Expect{1, "...is not allowed."})
+ testProg(t, "itxn_begin; byte 0x7263; itxn_field Note", 5, Expect{1, "...Note field was introduced in v6..."})
+ testProg(t, "itxn_begin; byte 0x7263; itxn_field VotePK", 5, Expect{1, "...VotePK field was introduced in v6..."})
+ testProg(t, "itxn_begin; int 32; bzero; itxn_field TxID", 5, Expect{1, "...is not allowed."})
+
+ testProg(t, "itxn_begin; int 1000; itxn_field FirstValid", 6, Expect{1, "...is not allowed."})
+ testProg(t, "itxn_begin; int 1000; itxn_field LastValid", 6, Expect{1, "...is not allowed."})
+ testProg(t, "itxn_begin; int 32; bzero; itxn_field Lease", 6, Expect{1, "...is not allowed."})
testProg(t, "itxn_begin; byte 0x7263; itxn_field Note", 6)
testProg(t, "itxn_begin; byte 0x7263; itxn_field VotePK", 6)
- testProg(t, "itxn_begin; int 32; bzero; itxn_field TxID", 6, Expect{4, "...is not allowed."})
+ testProg(t, "itxn_begin; int 32; bzero; itxn_field TxID", 6, Expect{1, "...is not allowed."})
}
func TestTypeTracking(t *testing.T) {
@@ -2573,7 +2584,7 @@ func TestTypeTracking(t *testing.T) {
// but we do want to ensure we're not just treating the code after callsub as dead
testProg(t, "callsub A; int 1; concat; return; A: int 1; int 2; retsub", LogicVersion,
- Expect{3, "concat arg 1 wanted..."})
+ Expect{1, "concat arg 1 wanted..."})
// retsub deadens code, like any unconditional branch
testProg(t, "callsub A; +; return; A: int 1; int 2; retsub; concat", LogicVersion)
@@ -2673,7 +2684,7 @@ func TestMergeProtos(t *testing.T) {
func TestGetSpec(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- ops, _ := AssembleStringWithVersion("int 1", AssemblerMaxVersion)
+ ops := testProg(t, "int 1", AssemblerMaxVersion)
ops.versionedPseudoOps["dummyPseudo"] = make(map[int]OpSpec)
ops.versionedPseudoOps["dummyPseudo"][1] = OpSpec{Name: "b:", Version: AssemblerMaxVersion, Proto: proto("b:")}
ops.versionedPseudoOps["dummyPseudo"][2] = OpSpec{Name: ":", Version: AssemblerMaxVersion}
@@ -2697,7 +2708,7 @@ func TestAddPseudoDocTags(t *testing.T) {
delete(opDocByName, "any")
}()
- pseudoOps["tests"] = map[int]OpSpec{2: OpSpec{Name: "multiple"}, 1: OpSpec{Name: "single"}, 0: OpSpec{Name: "none"}, anyImmediates: OpSpec{Name: "any"}}
+ pseudoOps["tests"] = map[int]OpSpec{2: {Name: "multiple"}, 1: {Name: "single"}, 0: {Name: "none"}, anyImmediates: {Name: "any"}}
addPseudoDocTags()
require.Equal(t, "`multiple` can be called using `tests` with 2 immediates.", opDocByName["multiple"])
require.Equal(t, "`single` can be called using `tests` with 1 immediate.", opDocByName["single"])
@@ -2711,7 +2722,114 @@ func TestReplacePseudo(t *testing.T) {
for v := uint64(replaceVersion); v <= AssemblerMaxVersion; v++ {
testProg(t, "byte 0x0000; byte 0x1234; replace 0", v)
testProg(t, "byte 0x0000; int 0; byte 0x1234; replace", v)
- testProg(t, "byte 0x0000; byte 0x1234; replace", v, Expect{3, "replace without immediates expects 3 stack arguments but stack height is 2"})
- testProg(t, "byte 0x0000; int 0; byte 0x1234; replace 0", v, Expect{4, "replace 0 arg 0 wanted type []byte got uint64"})
+ testProg(t, "byte 0x0000; byte 0x1234; replace", v, Expect{1, "replace without immediates expects 3 stack arguments but stack height is 2"})
+ testProg(t, "byte 0x0000; int 0; byte 0x1234; replace 0", v, Expect{1, "replace 0 arg 0 wanted type []byte got uint64"})
+ }
+}
+
+func checkSame(t *testing.T, version uint64, first string, compares ...string) {
+ t.Helper()
+ if version == 0 {
+ version = assemblerNoVersion
+ }
+ ops := testProg(t, first, version)
+ for _, compare := range compares {
+ other := testProg(t, compare, version)
+ if bytes.Compare(other.Program, ops.Program) != 0 {
+ t.Log(Disassemble(ops.Program))
+ t.Log(Disassemble(other.Program))
+ }
+ assert.Equal(t, ops.Program, other.Program, "%s unlike %s", first, compare)
+ }
+}
+
+func TestSemiColon(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ checkSame(t, AssemblerMaxVersion,
+ "pushint 0 ; pushint 1 ; +; int 3 ; *",
+ "pushint 0\npushint 1\n+\nint 3\n*",
+ "pushint 0; pushint 1; +; int 3; *; // comment; int 2",
+ "pushint 0; ; ; pushint 1 ; +; int 3 ; *//check",
+ )
+
+ checkSame(t, 0,
+ "#pragma version 7\nint 1",
+ "// junk;\n#pragma version 7\nint 1",
+ "// junk;\n #pragma version 7\nint 1",
+ )
+
+ checkSame(t, AssemblerMaxVersion,
+ `byte "test;this"; pop;`,
+ `byte "test;this"; ; pop;`,
+ `byte "test;this";;;pop;`,
+ )
+}
+
+func TestAssembleSwitch(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // fail when target doesn't correspond to existing label
+ source := `
+ pushint 1
+ switch label1 label2
+ label1:
+ `
+ testProg(t, source, AssemblerMaxVersion, NewExpect(3, "reference to undefined label \"label2\""))
+
+ // fail when target index != uint64
+ testProg(t, `
+ byte "fail"
+ switch label1
+ labe11:
+ `, AssemblerMaxVersion, Expect{3, "switch label1 arg 0 wanted type uint64..."})
+
+ // No labels is pretty degenerate, but ok, I suppose. It's just a no-op
+ testProg(t, `
+int 0
+switch
+int 1
+`, AssemblerMaxVersion)
+
+ // confirm arg limit
+ source = `
+ pushint 1
+ switch label1 label2
+ label1:
+ label2:
+ `
+ ops := testProg(t, source, AssemblerMaxVersion)
+ require.Len(t, ops.Program, 9) // ver (1) + pushint (2) + opcode (1) + length (1) + labels (2*2)
+
+ var labels []string
+ for i := 0; i < 255; i++ {
+ labels = append(labels, fmt.Sprintf("label%d", i))
}
+
+ // test that 255 labels is ok
+ source = fmt.Sprintf(`
+ pushint 1
+ switch %s
+ %s
+ `, strings.Join(labels, " "), strings.Join(labels, ":\n")+":\n")
+ ops = testProg(t, source, AssemblerMaxVersion)
+ require.Len(t, ops.Program, 515) // ver (1) + pushint (2) + opcode (1) + length (1) + labels (2*255)
+
+ // 256 is too many
+ source = fmt.Sprintf(`
+ pushint 1
+ switch %s extra
+ %s
+ `, strings.Join(labels, " "), strings.Join(labels, ":\n")+":\n")
+ ops = testProg(t, source, AssemblerMaxVersion, Expect{3, "switch cannot take more than 255 labels"})
+
+ // allow duplicate label reference
+ source = `
+ pushint 1
+ switch label1 label1
+ label1:
+ `
+ testProg(t, source, AssemblerMaxVersion)
}
diff --git a/data/transactions/logic/backwardCompat_test.go b/data/transactions/logic/backwardCompat_test.go
index 086741dcde..13a19ddaa7 100644
--- a/data/transactions/logic/backwardCompat_test.go
+++ b/data/transactions/logic/backwardCompat_test.go
@@ -467,15 +467,15 @@ func TestBackwardCompatAssemble(t *testing.T) {
source := "int 1; int 1; bnz done; done:"
t.Run("v=default", func(t *testing.T) {
- testProg(t, source, assemblerNoVersion, Expect{4, "label \"done\" is too far away"})
+ testProg(t, source, assemblerNoVersion, Expect{1, "label \"done\" is too far away"})
})
t.Run("v=default", func(t *testing.T) {
- testProg(t, source, 0, Expect{4, "label \"done\" is too far away"})
+ testProg(t, source, 0, Expect{1, "label \"done\" is too far away"})
})
t.Run("v=default", func(t *testing.T) {
- testProg(t, source, 1, Expect{4, "label \"done\" is too far away"})
+ testProg(t, source, 1, Expect{1, "label \"done\" is too far away"})
})
for v := uint64(2); v <= AssemblerMaxVersion; v++ {
diff --git a/data/transactions/logic/doc.go b/data/transactions/logic/doc.go
index 1a43995c21..bfcf927f0a 100644
--- a/data/transactions/logic/doc.go
+++ b/data/transactions/logic/doc.go
@@ -192,7 +192,9 @@ var opDocByName = map[string]string{
"itxn_submit": "execute the current inner transaction group. Fail if executing this group would exceed the inner transaction limit, or if any transaction in the group fails.",
"vrf_verify": "Verify the proof B of message A against pubkey C. Returns vrf output and verification flag.",
- "block": "field F of block A. Fail unless A falls between txn.LastValid-1002 and the current round (exclusive)",
+ "block": "field F of block A. Fail unless A falls between txn.LastValid-1002 and txn.FirstValid (exclusive)",
+
+ "switch": "branch to the Ath label. Continue at following instruction if index A exceeds the number of labels.",
}
// OpDoc returns a description of the op
@@ -261,6 +263,8 @@ var opcodeImmediateNotes = map[string]string{
"vrf_verify": "{uint8 parameters index}",
"block": "{uint8 block field}",
+
+ "switch": "{uint8 branch count} [{int16 branch offset, big-endian}, ...]",
}
// OpImmediateNote returns a short string about immediate data which follows the op byte
@@ -339,7 +343,7 @@ var OpGroups = map[string][]string{
"Byte Array Arithmetic": {"b+", "b-", "b/", "b*", "b<", "b>", "b<=", "b>=", "b==", "b!=", "b%", "bsqrt"},
"Byte Array Logic": {"b|", "b&", "b^", "b~"},
"Loading Values": {"intcblock", "intc", "intc_0", "intc_1", "intc_2", "intc_3", "pushint", "bytecblock", "bytec", "bytec_0", "bytec_1", "bytec_2", "bytec_3", "pushbytes", "bzero", "arg", "arg_0", "arg_1", "arg_2", "arg_3", "args", "txn", "gtxn", "txna", "txnas", "gtxna", "gtxnas", "gtxns", "gtxnsa", "gtxnsas", "global", "load", "loads", "store", "stores", "gload", "gloads", "gloadss", "gaid", "gaids"},
- "Flow Control": {"err", "bnz", "bz", "b", "return", "pop", "dup", "dup2", "dig", "cover", "uncover", "swap", "select", "assert", "callsub", "retsub"},
+ "Flow Control": {"err", "bnz", "bz", "b", "return", "pop", "dup", "dup2", "dig", "cover", "uncover", "swap", "select", "assert", "callsub", "retsub", "switch"},
"State Access": {"balance", "min_balance", "app_opted_in", "app_local_get", "app_local_get_ex", "app_global_get", "app_global_get_ex", "app_local_put", "app_global_put", "app_local_del", "app_global_del", "asset_holding_get", "asset_params_get", "app_params_get", "acct_params_get", "log", "block"},
"Inner Transactions": {"itxn_begin", "itxn_next", "itxn_field", "itxn_submit", "itxn", "itxna", "itxnas", "gitxn", "gitxna", "gitxnas"},
}
diff --git a/data/transactions/logic/eval.go b/data/transactions/logic/eval.go
index b648ff7781..5e4cf4c377 100644
--- a/data/transactions/logic/eval.go
+++ b/data/transactions/logic/eval.go
@@ -1341,17 +1341,17 @@ func opLt(cx *EvalContext) error {
// opSwap, opLt, and opNot always succeed (return nil). So error checking elided in Gt,Le,Ge
func opGt(cx *EvalContext) error {
- opSwap(cx)
+ opSwap(cx) //nolint:errcheck // opSwap always succeeds
return opLt(cx)
}
func opLe(cx *EvalContext) error {
- opGt(cx)
+ opGt(cx) //nolint:errcheck // opGt always succeeds
return opNot(cx)
}
func opGe(cx *EvalContext) error {
- opLt(cx)
+ opLt(cx) //nolint:errcheck // opLt always succeeds
return opNot(cx)
}
@@ -1965,12 +1965,17 @@ func opArgs(cx *EvalContext) error {
return opArgN(cx, n)
}
+func decodeBranchOffset(program []byte, pos int) int {
+ // tricky casting to preserve signed value
+ return int(int16(program[pos])<<8 | int16(program[pos+1]))
+}
+
func branchTarget(cx *EvalContext) (int, error) {
- offset := int16(uint16(cx.program[cx.pc+1])<<8 | uint16(cx.program[cx.pc+2]))
+ offset := decodeBranchOffset(cx.program, cx.pc+1)
if offset < 0 && cx.version < backBranchEnabledVersion {
return 0, fmt.Errorf("negative branch offset %x", offset)
}
- target := cx.pc + 3 + int(offset)
+ target := cx.pc + 3 + offset
var branchTooFar bool
if cx.version >= 2 {
// branching to exactly the end of the program (target == len(cx.program)), the next pc after the last instruction, is okay and ends normally
@@ -1985,6 +1990,32 @@ func branchTarget(cx *EvalContext) (int, error) {
return target, nil
}
+func switchTarget(cx *EvalContext, branchIdx uint64) (int, error) {
+ numOffsets := int(cx.program[cx.pc+1])
+
+ end := cx.pc + 2 // end of opcode + number of offsets, beginning of offset list
+ eoi := end + 2*numOffsets // end of instruction
+
+ if eoi > len(cx.program) { // eoi will equal len(p) if switch is last instruction
+ return 0, fmt.Errorf("switch claims to extend beyond program")
+ }
+
+ offset := 0
+ if branchIdx < uint64(numOffsets) {
+ pos := end + int(2*branchIdx) // position of referenced offset: each offset is 2 bytes
+ offset = decodeBranchOffset(cx.program, pos)
+ }
+
+ target := eoi + offset
+
+ // branching to exactly the end of the program (target == len(cx.program)), the next pc after the last instruction,
+ // is okay and ends normally
+ if target > len(cx.program) || target < 0 {
+ return 0, fmt.Errorf("branch target %d outside of program", target)
+ }
+ return target, nil
+}
+
// checks any branch that is {op} {int16 be offset}
func checkBranch(cx *EvalContext) error {
target, err := branchTarget(cx)
@@ -2000,6 +2031,32 @@ func checkBranch(cx *EvalContext) error {
cx.branchTargets[target] = true
return nil
}
+
+// checks switch is encoded properly (and calculates nextpc)
+func checkSwitch(cx *EvalContext) error {
+ numOffsets := int(cx.program[cx.pc+1])
+ eoi := cx.pc + 2 + 2*numOffsets
+
+ for branchIdx := 0; branchIdx < numOffsets; branchIdx++ {
+ target, err := switchTarget(cx, uint64(branchIdx))
+ if err != nil {
+ return err
+ }
+
+ if target < eoi {
+ // If a branch goes backwards, we should have already noted that an instruction began at that location.
+ if _, ok := cx.instructionStarts[target]; !ok {
+ return fmt.Errorf("back branch target %d is not an aligned instruction", target)
+ }
+ }
+ cx.branchTargets[target] = true
+ }
+
+ // this opcode's size is dynamic so nextpc must be set here
+ cx.nextpc = eoi
+ return nil
+}
+
func opBnz(cx *EvalContext) error {
last := len(cx.stack) - 1
cx.nextpc = cx.pc + 3
@@ -2039,6 +2096,19 @@ func opB(cx *EvalContext) error {
return nil
}
+func opSwitch(cx *EvalContext) error {
+ last := len(cx.stack) - 1
+ branchIdx := cx.stack[last].Uint
+
+ cx.stack = cx.stack[:last]
+ target, err := switchTarget(cx, branchIdx)
+ if err != nil {
+ return err
+ }
+ cx.nextpc = target
+ return nil
+}
+
func opCallSub(cx *EvalContext) error {
cx.callstack = append(cx.callstack, cx.pc+3)
return opB(cx)
diff --git a/data/transactions/logic/evalCrypto_test.go b/data/transactions/logic/evalCrypto_test.go
index b2c6bec0e5..773330fab3 100644
--- a/data/transactions/logic/evalCrypto_test.go
+++ b/data/transactions/logic/evalCrypto_test.go
@@ -109,6 +109,9 @@ byte 0x%s
}
func TestVrfVerify(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
ep, _, _ := makeSampleEnv()
testApp(t, notrack("int 1; int 2; int 3; vrf_verify VrfAlgorand"), ep, "arg 0 wanted")
testApp(t, notrack("byte 0x1122; int 2; int 3; vrf_verify VrfAlgorand"), ep, "arg 1 wanted")
diff --git a/data/transactions/logic/evalStateful_test.go b/data/transactions/logic/evalStateful_test.go
index 3e051fc6be..1652702dfb 100644
--- a/data/transactions/logic/evalStateful_test.go
+++ b/data/transactions/logic/evalStateful_test.go
@@ -314,7 +314,7 @@ func TestBalance(t *testing.T) {
text = `txn Accounts 1; balance; int 177; ==;`
// won't assemble in old version teal
- testProg(t, text, directRefEnabledVersion-1, Expect{2, "balance arg 0 wanted type uint64..."})
+ testProg(t, text, directRefEnabledVersion-1, Expect{1, "balance arg 0 wanted type uint64..."})
// but legal after that
testApp(t, text, ep)
@@ -475,7 +475,7 @@ func TestMinBalance(t *testing.T) {
testApp(t, "int 1; min_balance; int 1001; ==", ep) // 1 == Accounts[0]
testProg(t, "txn Accounts 1; min_balance; int 1001; ==", directRefEnabledVersion-1,
- Expect{2, "min_balance arg 0 wanted type uint64..."})
+ Expect{1, "min_balance arg 0 wanted type uint64..."})
testProg(t, "txn Accounts 1; min_balance; int 1001; ==", directRefEnabledVersion)
testApp(t, "txn Accounts 1; min_balance; int 1001; ==", ep) // 1 == Accounts[0]
// Receiver opts in
@@ -528,7 +528,7 @@ func TestAppCheckOptedIn(t *testing.T) {
testApp(t, "int 1; int 2; app_opted_in; int 0; ==", pre) // in pre, int 2 is an actual app id
testApp(t, "byte \"aoeuiaoeuiaoeuiaoeuiaoeuiaoeui01\"; int 2; app_opted_in; int 1; ==", now)
testProg(t, "byte \"aoeuiaoeuiaoeuiaoeuiaoeuiaoeui01\"; int 2; app_opted_in; int 1; ==", directRefEnabledVersion-1,
- Expect{3, "app_opted_in arg 0 wanted type uint64..."})
+ Expect{1, "app_opted_in arg 0 wanted type uint64..."})
// Receiver opts into 888, the current app in testApp
ledger.NewLocals(txn.Txn.Receiver, 888)
@@ -939,7 +939,7 @@ func testAssetsByVersion(t *testing.T, assetsTestProgram string, version uint64)
// it wasn't legal to use a direct ref for account
testProg(t, `byte "aoeuiaoeuiaoeuiaoeuiaoeuiaoeui00"; int 54; asset_holding_get AssetBalance`,
- directRefEnabledVersion-1, Expect{3, "asset_holding_get AssetBalance arg 0 wanted type uint64..."})
+ directRefEnabledVersion-1, Expect{1, "asset_holding_get AssetBalance arg 0 wanted type uint64..."})
// but it is now (empty asset yields 0,0 on stack)
testApp(t, `byte "aoeuiaoeuiaoeuiaoeuiaoeuiaoeui00"; int 55; asset_holding_get AssetBalance; ==`, now)
// This is receiver, who is in Assets array
@@ -2436,6 +2436,8 @@ func TestReturnTypes(t *testing.T) {
cmd += " 0x12 0x34 0x56"
case immLabel:
cmd += " done; done: ;"
+ case immLabels:
+ cmd += " done1 done2; done1: ; done2: ;"
default:
require.Fail(t, "bad immediate", "%s", imm)
}
@@ -2536,6 +2538,9 @@ func TestLatestTimestamp(t *testing.T) {
}
func TestBlockSeed(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
ep, txn, l := makeSampleEnv()
// makeSampleEnv creates txns with fv, lv that don't actually fit the round
diff --git a/data/transactions/logic/eval_test.go b/data/transactions/logic/eval_test.go
index ba7df73e9c..64c6c04808 100644
--- a/data/transactions/logic/eval_test.go
+++ b/data/transactions/logic/eval_test.go
@@ -340,12 +340,12 @@ func TestSimpleMath(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- testAccepts(t, "int 2; int 3; + ;int 5;==", 1)
- testAccepts(t, "int 22; int 3; - ;int 19;==", 1)
- testAccepts(t, "int 8; int 7; * ;int 56;==", 1)
- testAccepts(t, "int 21; int 7; / ;int 3;==", 1)
+ testAccepts(t, "int 2; int 3; + ; int 5; ==", 1)
+ testAccepts(t, "int 22; int 3; - ; int 19; ==", 1)
+ testAccepts(t, "int 8; int 7; * ; int 56; ==", 1)
+ testAccepts(t, "int 21; int 7; / ; int 3; ==", 1)
- testPanics(t, "int 1; int 2; - ;int 0; ==", 1)
+ testPanics(t, "int 1; int 2; - ; int 0; ==", 1)
}
func TestSha256EqArg(t *testing.T) {
@@ -896,6 +896,18 @@ func TestBytecTooFar(t *testing.T) {
testPanics(t, "byte 0x23; bytec_1; btoi", 1)
}
+func TestManualCBlockEval(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // TestManualCBlock in assembler_test.go demonstrates that these will use
+ // an inserted constant block.
+ testAccepts(t, "int 4; int 4; +; int 8; ==; return; intcblock 10", 2)
+ testAccepts(t, "b skip; intcblock 10; skip: int 4; int 4; +; int 8; ==;", 2)
+ testAccepts(t, "byte 0x2222; byte 0x2222; concat; len; int 4; ==; return; bytecblock 0x11", 2)
+ testAccepts(t, "b skip; bytecblock 0x11; skip: byte 0x2222; byte 0x2222; concat; len; int 4; ==", 2)
+}
+
func TestTxnBadField(t *testing.T) {
partitiontest.PartitionTest(t)
@@ -962,7 +974,7 @@ func TestArg(t *testing.T) {
t.Parallel()
for v := uint64(1); v <= AssemblerMaxVersion; v++ {
t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) {
- source := "arg 0; arg 1; ==; arg 2; arg 3; !=; &&; arg 4; len; int 9; <; &&;"
+ source := "arg 0; arg 1; ==; arg 2; arg 3; !=; &&; arg 4; len; int 9; <; &&; "
if v >= 5 {
source += "int 0; args; int 1; args; ==; assert; int 2; args; int 3; args; !=; assert"
}
@@ -2836,16 +2848,16 @@ func TestSlowLogic(t *testing.T) {
t.Parallel()
fragment := `byte 0x666E6F7264; keccak256
- byte 0xc195eca25a6f4c82bfba0287082ddb0d602ae9230f9cf1f1a40b68f8e2c41567; ==;`
+ byte 0xc195eca25a6f4c82bfba0287082ddb0d602ae9230f9cf1f1a40b68f8e2c41567; ==; `
// Sanity check. Running a short sequence of these fragments passes in all versions.
- source := fragment + strings.Repeat(fragment+"&&;", 5)
+ source := fragment + strings.Repeat(fragment+"&&; ", 5)
testAccepts(t, source, 1)
// in v1, each repeat costs 30
- v1overspend := fragment + strings.Repeat(fragment+"&&;", 20000/30)
+ v1overspend := fragment + strings.Repeat(fragment+"&&; ", 20000/30)
// in v2,v3 each repeat costs 134
- v2overspend := fragment + strings.Repeat(fragment+"&&;", 20000/134)
+ v2overspend := fragment + strings.Repeat(fragment+"&&; ", 20000/134)
// v1overspend fails (on v1)
ops := testProg(t, v1overspend, 1)
@@ -3546,8 +3558,7 @@ func benchmarkOperation(b *testing.B, prefix string, operation string, suffix st
b.Helper()
runs := 1 + b.N/2000
inst := strings.Count(operation, ";") + strings.Count(operation, "\n")
- source := prefix + ";" + strings.Repeat(operation+";", 2000) + ";" + suffix
- source = strings.ReplaceAll(source, ";", "\n")
+ source := prefix + ";" + strings.Repeat(operation+"\n", 2000) + ";" + suffix
ops := testProg(b, source, AssemblerMaxVersion)
evalLoop(b, runs, ops.Program)
b.ReportMetric(float64(inst), "extra/op")
@@ -3860,9 +3871,9 @@ func TestStackOverflow(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- source := "int 1; int 2;"
+ source := "int 1; int 2; "
for i := 1; i < maxStackDepth/2; i++ {
- source += "dup2;"
+ source += "dup2; "
}
testAccepts(t, source+"return", 2)
testPanics(t, source+"dup2; return", 2)
@@ -4266,11 +4277,11 @@ func TestAssert(t *testing.T) {
partitiontest.PartitionTest(t)
t.Parallel()
- testAccepts(t, "int 1;assert;int 1", 3)
- testRejects(t, "int 1;assert;int 0", 3)
- testPanics(t, "int 0;assert;int 1", 3)
- testPanics(t, notrack("assert;int 1"), 3)
- testPanics(t, notrack(`byte "john";assert;int 1`), 3)
+ testAccepts(t, "int 1; assert; int 1", 3)
+ testRejects(t, "int 1; assert; int 0", 3)
+ testPanics(t, "int 0; assert; int 1", 3)
+ testPanics(t, notrack("assert; int 1"), 3)
+ testPanics(t, notrack(`byte "john"; assert; int 1`), 3)
}
func TestBits(t *testing.T) {
@@ -4766,7 +4777,7 @@ func TestLog(t *testing.T) {
loglen: 2,
},
{
- source: fmt.Sprintf(`%s int 1`, strings.Repeat(`byte "a logging message"; log;`, maxLogCalls)),
+ source: fmt.Sprintf(`%s int 1`, strings.Repeat(`byte "a logging message"; log; `, maxLogCalls)),
loglen: maxLogCalls,
},
{
@@ -4811,7 +4822,7 @@ func TestLog(t *testing.T) {
runMode: modeApp,
},
{
- source: fmt.Sprintf(`%s; int 1`, strings.Repeat(`byte "a"; log;`, maxLogCalls+1)),
+ source: fmt.Sprintf(`%s; int 1`, strings.Repeat(`byte "a"; log; `, maxLogCalls+1)),
errContains: "too many log calls",
runMode: modeApp,
},
@@ -5123,7 +5134,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONUint64;
int 0;
==`,
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 0,\"key1\": \"algo\",\"key2\":{\"key3\": \"teal\", \"key4\": 3}, \"key5\": 18446744073709551615 }";
@@ -5131,7 +5142,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONUint64;
int 18446744073709551615; //max uint64 value
==`,
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 0,\"key1\": \"algo\",\"key2\":{\"key3\": \"teal\", \"key4\": 3}, \"key5\": 18446744073709551615 }";
@@ -5139,7 +5150,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONString;
byte "algo";
==`,
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 0,\"key1\": \"\\u0061\\u006C\\u0067\\u006F\",\"key2\":{\"key3\": \"teal\", \"key4\": 3}, \"key5\": 18446744073709551615 }";
@@ -5147,7 +5158,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONString;
byte "algo";
==`,
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 0,\"key1\": \"algo\",\"key2\":{\"key3\": \"teal\", \"key4\": {\"key40\": 10}}, \"key5\": 18446744073709551615 }";
@@ -5159,7 +5170,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONUint64
int 10
==`,
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}, {9, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}, {5, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 0,\"key1\": \"algo\",\"key2\":{\"key3\": \"teal\", \"key4\": {\"key40\": 10}}, \"key5\": 18446744073709551615 }";
@@ -5169,7 +5180,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONString;
byte "teal"
==`,
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}, {9, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}, {5, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 0,\"key1\": \"algo\",\"key2\":{\"key3\": \"\\"teal\\"\", \"key4\": {\"key40\": 10}}, \"key5\": 18446744073709551615 }";
@@ -5179,7 +5190,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONString;
byte ""teal"" // quotes match
==`,
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}, {9, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}, {5, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 0,\"key1\": \"algo\",\"key2\":{\"key3\": \" teal \", \"key4\": {\"key40\": 10}}, \"key5\": 18446744073709551615 }";
@@ -5189,7 +5200,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONString;
byte " teal " // spaces match
==`,
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}, {9, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}, {5, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 0,\"key1\": \"algo\",\"key2\":{\"key3\": \"teal\", \"key4\": {\"key40\": 10, \"key40\": \"10\"}}, \"key5\": 18446744073709551615 }";
@@ -5200,7 +5211,7 @@ func TestOpJSONRef(t *testing.T) {
byte "{\"key40\": 10, \"key40\": \"10\"}"
==
`,
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"rawId\": \"responseId\",\"id\": \"0\",\"response\": {\"attestationObject\": \"based64url_encoded_buffer\",\"clientDataJSON\": \" based64url_encoded_client_data\"},\"getClientExtensionResults\": {},\"type\": \"public-key\"}";
@@ -5208,7 +5219,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONObject;
byte "{\"attestationObject\": \"based64url_encoded_buffer\",\"clientDataJSON\": \" based64url_encoded_client_data\"}" // object as it appeared in input
==`,
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"rawId\": \"responseId\",\"id\": \"0\",\"response\": {\"attestationObject\": \"based64url_encoded_buffer\",\"clientD\\u0061taJSON\": \" based64url_encoded_client_data\"},\"getClientExtensionResults\": {},\"type\": \"public-key\"}";
@@ -5216,7 +5227,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONObject;
byte "{\"attestationObject\": \"based64url_encoded_buffer\",\"clientD\\u0061taJSON\": \" based64url_encoded_client_data\"}" // object as it appeared in input
==`,
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"rawId\": \"responseId\",\"id\": \"0\",\"response\": {\"attestationObject\": \"based64url_encoded_buffer\",\"clientDataJSON\": \" based64url_encoded_client_data\"},\"getClientExtensionResults\": {},\"type\": \"public-key\"}";
@@ -5226,7 +5237,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONString;
byte " based64url_encoded_client_data";
==`,
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}, {9, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}, {5, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"\\u0072\\u0061\\u0077\\u0049\\u0044\": \"responseId\",\"id\": \"0\",\"response\": {\"attestationObject\": \"based64url_encoded_buffer\",\"clientDataJSON\": \" based64url_encoded_client_data\"},\"getClientExtensionResults\": {},\"type\": \"public-key\"}";
@@ -5234,7 +5245,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONString;
byte "responseId"
==`,
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
},
// JavaScript MAX_SAFE_INTEGER
{
@@ -5243,7 +5254,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONUint64;
int 9007199254740991;
==`,
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
},
// maximum uint64
{
@@ -5252,7 +5263,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONUint64;
int 18446744073709551615;
==`,
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
},
// larger-than-uint64s are allowed if not requested
{
@@ -5261,7 +5272,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONUint64;
int 0;
==`,
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
},
}
@@ -5301,52 +5312,52 @@ func TestOpJSONRef(t *testing.T) {
{
source: `byte "{\"key0\": 1 }"; byte "key0"; json_ref JSONString;`,
error: "json: cannot unmarshal number into Go value of type string",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": [1] }"; byte "key0"; json_ref JSONString;`,
error: "json: cannot unmarshal array into Go value of type string",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": {\"key1\":1} }"; byte "key0"; json_ref JSONString;`,
error: "json: cannot unmarshal object into Go value of type string",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": \"1\" }"; byte "key0"; json_ref JSONUint64;`,
error: "json: cannot unmarshal string into Go value of type uint64",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": [\"1\"] }"; byte "key0"; json_ref JSONUint64;`,
error: "json: cannot unmarshal array into Go value of type uint64",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": {\"key1\":1} }"; byte "key0"; json_ref JSONUint64;`,
error: "json: cannot unmarshal object into Go value of type uint64",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": [1]}"; byte "key0"; json_ref JSONObject;`,
error: "json: cannot unmarshal array into Go value of type map[string]json.RawMessage",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 1}"; byte "key0"; json_ref JSONObject;`,
error: "json: cannot unmarshal number into Go value of type map[string]json.RawMessage",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": \"1\"}"; byte "key0"; json_ref JSONObject;`,
error: "json: cannot unmarshal string into Go value of type map[string]json.RawMessage",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 1,\"key1\": \"algo\",\"key2\":{\"key3\": \"teal\", \"key4\": [1,2,3]} }"; byte "key3"; json_ref JSONString;`,
error: "key key3 not found in JSON text",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 1,\"key1\": \"algo\",\"key2\":{\"key3\": \"teal\", \"key4\": [1,2,3]}}";
@@ -5356,52 +5367,52 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONString
`,
error: "key key5 not found in JSON text",
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}, {9, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}, {5, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": -0,\"key1\": 2.5,\"key2\": -3}"; byte "key0"; json_ref JSONUint64;`,
error: "json: cannot unmarshal number -0 into Go value of type uint64",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 1e10,\"key1\": 2.5,\"key2\": -3}"; byte "key0"; json_ref JSONUint64;`,
error: "json: cannot unmarshal number 1e10 into Go value of type uint64",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 0.2e-2,\"key1\": 2.5,\"key2\": -3}"; byte "key0"; json_ref JSONUint64;`,
error: "json: cannot unmarshal number 0.2e-2 into Go value of type uint64",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 1.0,\"key1\": 2.5,\"key2\": -3}"; byte "key0"; json_ref JSONUint64;`,
error: "json: cannot unmarshal number 1.0 into Go value of type uint64",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 1.0,\"key1\": 2.5,\"key2\": -3}"; byte "key1"; json_ref JSONUint64;`,
error: "json: cannot unmarshal number 2.5 into Go value of type uint64",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 1.0,\"key1\": 2.5,\"key2\": -3}"; byte "key2"; json_ref JSONUint64;`,
error: "json: cannot unmarshal number -3 into Go value of type uint64",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 18446744073709551616}"; byte "key0"; json_ref JSONUint64;`,
error: "json: cannot unmarshal number 18446744073709551616 into Go value of type uint64",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 1,}"; byte "key0"; json_ref JSONString;`,
error: "error while parsing JSON text, invalid json text",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 1, \"key0\": \"3\"}"; byte "key0"; json_ref JSONString;`,
error: "error while parsing JSON text, invalid json text, duplicate keys not allowed",
- previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{1, "unknown opcode: json_ref"}},
},
{
source: `byte "{\"key0\": 0,\"key1\": \"algo\",\"key2\":{\"key3\": \"teal\", \"key4\": {\"key40\": 10, \"key40\": \"should fail!\"}}}";
@@ -5413,7 +5424,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONString
`,
error: "error while parsing JSON text, invalid json text, duplicate keys not allowed",
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}, {9, "unknown opcode: json_ref"}, {13, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}, {5, "unknown opcode: json_ref"}, {7, "unknown opcode: json_ref"}},
},
{
source: `byte "[1,2,3]";
@@ -5421,7 +5432,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONUint64
`,
error: "error while parsing JSON text, invalid json text, only json object is allowed",
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
},
{
source: `byte "2";
@@ -5429,7 +5440,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONUint64
`,
error: "error while parsing JSON text, invalid json text, only json object is allowed",
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
},
{
source: `byte "null";
@@ -5437,7 +5448,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONUint64
`,
error: "error while parsing JSON text, invalid json text, only json object is allowed",
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
},
{
source: `byte "true";
@@ -5445,7 +5456,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONUint64
`,
error: "error while parsing JSON text, invalid json text, only json object is allowed",
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
},
{
source: `byte "\"sometext\"";
@@ -5453,7 +5464,7 @@ func TestOpJSONRef(t *testing.T) {
json_ref JSONUint64
`,
error: "error while parsing JSON text, invalid json text, only json object is allowed",
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
},
{
source: `byte "{noquotes: \"shouldn't work\"}";
@@ -5462,7 +5473,7 @@ func TestOpJSONRef(t *testing.T) {
byte "shouldn't work";
==`,
error: "error while parsing JSON text, invalid json text",
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
},
// max uint64 + 1 should fail
{
@@ -5472,7 +5483,7 @@ func TestOpJSONRef(t *testing.T) {
int 1;
return`,
error: "json: cannot unmarshal number 18446744073709551616 into Go value of type uint64",
- previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}},
+ previousVersErrors: []Expect{{3, "unknown opcode: json_ref"}},
},
}
@@ -5510,6 +5521,90 @@ func TestOpJSONRef(t *testing.T) {
}
func TestTypeComplaints(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
testProg(t, "err; store 0", AssemblerMaxVersion)
testProg(t, "int 1; return; store 0", AssemblerMaxVersion)
}
+
+func TestSwitchInt(t *testing.T) {
+ partitiontest.PartitionTest(t)
+ t.Parallel()
+
+ // take the 0th label
+ testAccepts(t, `
+int 0
+switch zero one
+err
+zero: int 1; return
+one: int 0;
+`, 8)
+
+ // take the 1th label
+ testRejects(t, `
+int 1
+switch zero one
+err
+zero: int 1; return
+one: int 0;
+`, 8)
+
+ // same, but jumping to end of program
+ testAccepts(t, `
+int 1; dup
+switch zero one
+zero: err
+one:
+`, 8)
+
+ // no match
+ testAccepts(t, `
+int 2
+switch zero one
+int 1; return // falls through to here
+zero: int 0; return
+one: int 0; return
+`, 8)
+
+ // jump forward and backward
+ testAccepts(t, `
+int 0
+start:
+int 1
++
+dup
+int 1
+-
+switch start end
+err
+end:
+int 2
+==
+assert
+int 1
+`, 8)
+
+ // 0 labels are allowed, but weird!
+ testAccepts(t, `
+int 0
+switch
+int 1
+`, 8)
+
+ testPanics(t, notrack("switch; int 1"), 8)
+
+ // make the switch the final instruction
+ testAccepts(t, `
+int 1
+int 0
+switch done1 done2; done1: ; done2: ;
+`, 8)
+
+ // make the switch the final instruction, and don't match
+ testAccepts(t, `
+int 1
+int 88
+switch done1 done2; done1: ; done2: ;
+`, 8)
+}
diff --git a/data/transactions/logic/fields_test.go b/data/transactions/logic/fields_test.go
index 0a29288134..2b5008f5c8 100644
--- a/data/transactions/logic/fields_test.go
+++ b/data/transactions/logic/fields_test.go
@@ -225,7 +225,7 @@ func TestAssetParamsFieldsVersions(t *testing.T) {
ep, _, _ := makeSampleEnv()
ep.Proto.LogicSigVersion = v
if field.version > v {
- testProg(t, text, v, Expect{3, "...was introduced in..."})
+ testProg(t, text, v, Expect{1, "...was introduced in..."})
ops := testProg(t, text, field.version) // assemble in the future
ops.Program[0] = byte(v)
testAppBytes(t, ops.Program, ep, "invalid asset_params_get field")
diff --git a/data/transactions/logic/langspec.json b/data/transactions/logic/langspec.json
index 4e29b9a88b..58f3ded24a 100644
--- a/data/transactions/logic/langspec.json
+++ b/data/transactions/logic/langspec.json
@@ -1,5 +1,5 @@
{
- "EvalMaxVersion": 7,
+ "EvalMaxVersion": 8,
"LogicSigVersion": 7,
"Ops": [
{
@@ -1576,6 +1576,17 @@
"Flow Control"
]
},
+ {
+ "Opcode": 138,
+ "Name": "switch",
+ "Args": "U",
+ "Size": 0,
+ "Doc": "branch to the Ath label. Continue at following instruction if index A exceeds the number of labels.",
+ "ImmediateNote": "{uint8 branch count} [{int16 branch offset, big-endian}, ...]",
+ "Groups": [
+ "Flow Control"
+ ]
+ },
{
"Opcode": 144,
"Name": "shl",
@@ -2297,7 +2308,7 @@
"Args": "U",
"Returns": ".",
"Size": 2,
- "Doc": "field F of block A. Fail unless A falls between txn.LastValid-1002 and the current round (exclusive)",
+ "Doc": "field F of block A. Fail unless A falls between txn.LastValid-1002 and txn.FirstValid (exclusive)",
"ImmediateNote": "{uint8 block field}",
"Groups": [
"State Access"
diff --git a/data/transactions/logic/opcodes.go b/data/transactions/logic/opcodes.go
index dc5627422e..1e3dcfc5b6 100644
--- a/data/transactions/logic/opcodes.go
+++ b/data/transactions/logic/opcodes.go
@@ -68,7 +68,7 @@ const randomnessVersion = 7 // vrf_verify, block
// EXPERIMENTAL. These should be revisited whenever a new LogicSigVersion is
// moved from vFuture to a new consensus version. If they remain unready, bump
// their version, and fixup TestAssemble() in assembler_test.go.
-const pairingVersion = 8 // bn256 opcodes. will add bls12-381, and unify the available opcodes.
+const pairingVersion = 9 // bn256 opcodes. will add bls12-381, and unify the available opcodes.
type linearCost struct {
baseCost int
@@ -167,7 +167,7 @@ func (d *OpDetails) Cost(program []byte, pc int, stack []stackValue) int {
return cost
}
-func opDefault() OpDetails {
+func detDefault() OpDetails {
return OpDetails{asmDefault, nil, nil, modeAny, linearCost{baseCost: 1}, 1, nil}
}
@@ -175,8 +175,8 @@ func constants(asm asmFunc, checker checkFunc, name string, kind immKind) OpDeta
return OpDetails{asm, checker, nil, modeAny, linearCost{baseCost: 1}, 0, []immediate{imm(name, kind)}}
}
-func opBranch() OpDetails {
- d := opDefault()
+func detBranch() OpDetails {
+ d := detDefault()
d.asm = asmBranch
d.check = checkBranch
d.Size = 3
@@ -184,8 +184,17 @@ func opBranch() OpDetails {
return d
}
+func detSwitch() OpDetails {
+ d := detDefault()
+ d.asm = asmSwitch
+ d.check = checkSwitch
+ d.Size = 0
+ d.Immediates = []immediate{imm("target ...", immLabels)}
+ return d
+}
+
func assembler(asm asmFunc) OpDetails {
- d := opDefault()
+ d := detDefault()
d.asm = asm
return d
}
@@ -197,7 +206,7 @@ func (d OpDetails) assembler(asm asmFunc) OpDetails {
}
func costly(cost int) OpDetails {
- d := opDefault()
+ d := detDefault()
d.FullCost.baseCost = cost
return d
}
@@ -209,7 +218,7 @@ func (d OpDetails) costs(cost int) OpDetails {
}
func only(m runMode) OpDetails {
- d := opDefault()
+ d := detDefault()
d.Modes = m
return d
}
@@ -227,7 +236,7 @@ func (d OpDetails) costByLength(initial, perChunk, chunkSize, depth int) OpDetai
}
func immediates(names ...string) OpDetails {
- d := opDefault()
+ d := detDefault()
d.Size = len(names) + 1
d.Immediates = make([]immediate, len(names))
for i, name := range names {
@@ -273,7 +282,7 @@ func costByLength(initial, perChunk, chunkSize, depth int) OpDetails {
if initial < 1 || perChunk <= 0 || chunkSize < 1 || chunkSize > maxStringSize {
panic("bad cost configuration")
}
- d := opDefault()
+ d := detDefault()
d.FullCost = linearCost{initial, perChunk, chunkSize, depth}
return d
}
@@ -288,6 +297,7 @@ const (
immBytes
immInts
immBytess // "ss" not a typo. Multiple "bytes"
+ immLabels
)
type immediate struct {
@@ -369,7 +379,7 @@ func (spec *OpSpec) deadens() bool {
// Note: assembly can specialize an Any return type if known at
// assembly-time, with ops.returns()
var OpSpecs = []OpSpec{
- {0x00, "err", opErr, proto(":x"), 1, opDefault()},
+ {0x00, "err", opErr, proto(":x"), 1, detDefault()},
{0x01, "sha256", opSHA256, proto("b:b"), 1, costly(7)},
{0x02, "keccak256", opKeccak256, proto("b:b"), 1, costly(26)},
{0x03, "sha512_256", opSHA512_256, proto("b:b"), 1, costly(9)},
@@ -399,43 +409,43 @@ var OpSpecs = []OpSpec{
{0x06, "ecdsa_pk_decompress", opEcdsaPkDecompress, proto("b:bb"), 5, costByField("v", &EcdsaCurves, ecdsaDecompressCosts)},
{0x07, "ecdsa_pk_recover", opEcdsaPkRecover, proto("bibb:bb"), 5, field("v", &EcdsaCurves).costs(2000)},
- {0x08, "+", opPlus, proto("ii:i"), 1, opDefault()},
- {0x09, "-", opMinus, proto("ii:i"), 1, opDefault()},
- {0x0a, "/", opDiv, proto("ii:i"), 1, opDefault()},
- {0x0b, "*", opMul, proto("ii:i"), 1, opDefault()},
- {0x0c, "<", opLt, proto("ii:i"), 1, opDefault()},
- {0x0d, ">", opGt, proto("ii:i"), 1, opDefault()},
- {0x0e, "<=", opLe, proto("ii:i"), 1, opDefault()},
- {0x0f, ">=", opGe, proto("ii:i"), 1, opDefault()},
- {0x10, "&&", opAnd, proto("ii:i"), 1, opDefault()},
- {0x11, "||", opOr, proto("ii:i"), 1, opDefault()},
+ {0x08, "+", opPlus, proto("ii:i"), 1, detDefault()},
+ {0x09, "-", opMinus, proto("ii:i"), 1, detDefault()},
+ {0x0a, "/", opDiv, proto("ii:i"), 1, detDefault()},
+ {0x0b, "*", opMul, proto("ii:i"), 1, detDefault()},
+ {0x0c, "<", opLt, proto("ii:i"), 1, detDefault()},
+ {0x0d, ">", opGt, proto("ii:i"), 1, detDefault()},
+ {0x0e, "<=", opLe, proto("ii:i"), 1, detDefault()},
+ {0x0f, ">=", opGe, proto("ii:i"), 1, detDefault()},
+ {0x10, "&&", opAnd, proto("ii:i"), 1, detDefault()},
+ {0x11, "||", opOr, proto("ii:i"), 1, detDefault()},
{0x12, "==", opEq, proto("aa:i"), 1, stacky(typeEquals)},
{0x13, "!=", opNeq, proto("aa:i"), 1, stacky(typeEquals)},
- {0x14, "!", opNot, proto("i:i"), 1, opDefault()},
- {0x15, "len", opLen, proto("b:i"), 1, opDefault()},
- {0x16, "itob", opItob, proto("i:b"), 1, opDefault()},
- {0x17, "btoi", opBtoi, proto("b:i"), 1, opDefault()},
- {0x18, "%", opModulo, proto("ii:i"), 1, opDefault()},
- {0x19, "|", opBitOr, proto("ii:i"), 1, opDefault()},
- {0x1a, "&", opBitAnd, proto("ii:i"), 1, opDefault()},
- {0x1b, "^", opBitXor, proto("ii:i"), 1, opDefault()},
- {0x1c, "~", opBitNot, proto("i:i"), 1, opDefault()},
- {0x1d, "mulw", opMulw, proto("ii:ii"), 1, opDefault()},
- {0x1e, "addw", opAddw, proto("ii:ii"), 2, opDefault()},
+ {0x14, "!", opNot, proto("i:i"), 1, detDefault()},
+ {0x15, "len", opLen, proto("b:i"), 1, detDefault()},
+ {0x16, "itob", opItob, proto("i:b"), 1, detDefault()},
+ {0x17, "btoi", opBtoi, proto("b:i"), 1, detDefault()},
+ {0x18, "%", opModulo, proto("ii:i"), 1, detDefault()},
+ {0x19, "|", opBitOr, proto("ii:i"), 1, detDefault()},
+ {0x1a, "&", opBitAnd, proto("ii:i"), 1, detDefault()},
+ {0x1b, "^", opBitXor, proto("ii:i"), 1, detDefault()},
+ {0x1c, "~", opBitNot, proto("i:i"), 1, detDefault()},
+ {0x1d, "mulw", opMulw, proto("ii:ii"), 1, detDefault()},
+ {0x1e, "addw", opAddw, proto("ii:ii"), 2, detDefault()},
{0x1f, "divmodw", opDivModw, proto("iiii:iiii"), 4, costly(20)},
{0x20, "intcblock", opIntConstBlock, proto(":"), 1, constants(asmIntCBlock, checkIntConstBlock, "uint ...", immInts)},
{0x21, "intc", opIntConstLoad, proto(":i"), 1, immediates("i").assembler(asmIntC)},
- {0x22, "intc_0", opIntConst0, proto(":i"), 1, opDefault()},
- {0x23, "intc_1", opIntConst1, proto(":i"), 1, opDefault()},
- {0x24, "intc_2", opIntConst2, proto(":i"), 1, opDefault()},
- {0x25, "intc_3", opIntConst3, proto(":i"), 1, opDefault()},
+ {0x22, "intc_0", opIntConst0, proto(":i"), 1, detDefault()},
+ {0x23, "intc_1", opIntConst1, proto(":i"), 1, detDefault()},
+ {0x24, "intc_2", opIntConst2, proto(":i"), 1, detDefault()},
+ {0x25, "intc_3", opIntConst3, proto(":i"), 1, detDefault()},
{0x26, "bytecblock", opByteConstBlock, proto(":"), 1, constants(asmByteCBlock, checkByteConstBlock, "bytes ...", immBytess)},
{0x27, "bytec", opByteConstLoad, proto(":b"), 1, immediates("i").assembler(asmByteC)},
- {0x28, "bytec_0", opByteConst0, proto(":b"), 1, opDefault()},
- {0x29, "bytec_1", opByteConst1, proto(":b"), 1, opDefault()},
- {0x2a, "bytec_2", opByteConst2, proto(":b"), 1, opDefault()},
- {0x2b, "bytec_3", opByteConst3, proto(":b"), 1, opDefault()},
+ {0x28, "bytec_0", opByteConst0, proto(":b"), 1, detDefault()},
+ {0x29, "bytec_1", opByteConst1, proto(":b"), 1, detDefault()},
+ {0x2a, "bytec_2", opByteConst2, proto(":b"), 1, detDefault()},
+ {0x2b, "bytec_3", opByteConst3, proto(":b"), 1, detDefault()},
{0x2c, "arg", opArg, proto(":b"), 1, immediates("n").only(modeSig).assembler(asmArg)},
{0x2d, "arg_0", opArg0, proto(":b"), 1, only(modeSig)},
{0x2e, "arg_1", opArg1, proto(":b"), 1, only(modeSig)},
@@ -464,12 +474,12 @@ var OpSpecs = []OpSpec{
{0x3e, "loads", opLoads, proto("i:a"), 5, stacky(typeLoads)},
{0x3f, "stores", opStores, proto("ia:"), 5, stacky(typeStores)},
- {0x40, "bnz", opBnz, proto("i:"), 1, opBranch()},
- {0x41, "bz", opBz, proto("i:"), 2, opBranch()},
- {0x42, "b", opB, proto(":"), 2, opBranch()},
- {0x43, "return", opReturn, proto("i:x"), 2, opDefault()},
- {0x44, "assert", opAssert, proto("i:"), 3, opDefault()},
- {0x48, "pop", opPop, proto("a:"), 1, opDefault()},
+ {0x40, "bnz", opBnz, proto("i:"), 1, detBranch()},
+ {0x41, "bz", opBz, proto("i:"), 2, detBranch()},
+ {0x42, "b", opB, proto(":"), 2, detBranch()},
+ {0x43, "return", opReturn, proto("i:x"), 2, detDefault()},
+ {0x44, "assert", opAssert, proto("i:"), 3, detDefault()},
+ {0x48, "pop", opPop, proto("a:"), 1, detDefault()},
{0x49, "dup", opDup, proto("a:aa", "A, A"), 1, stacky(typeDup)},
{0x4a, "dup2", opDup2, proto("aa:aaaa", "A, B, A, B"), 2, stacky(typeDupTwo)},
// There must be at least one thing on the stack for dig, but
@@ -481,20 +491,20 @@ var OpSpecs = []OpSpec{
{0x4f, "uncover", opUncover, proto("a:a", "A, [N items]", "[N items], A"), 5, stacky(typeUncover, "n")},
// byteslice processing / StringOps
- {0x50, "concat", opConcat, proto("bb:b"), 2, opDefault()},
+ {0x50, "concat", opConcat, proto("bb:b"), 2, detDefault()},
{0x51, "substring", opSubstring, proto("b:b"), 2, immediates("s", "e").assembler(asmSubstring)},
- {0x52, "substring3", opSubstring3, proto("bii:b"), 2, opDefault()},
- {0x53, "getbit", opGetBit, proto("ai:i"), 3, opDefault()},
+ {0x52, "substring3", opSubstring3, proto("bii:b"), 2, detDefault()},
+ {0x53, "getbit", opGetBit, proto("ai:i"), 3, detDefault()},
{0x54, "setbit", opSetBit, proto("aii:a"), 3, stacky(typeSetBit)},
- {0x55, "getbyte", opGetByte, proto("bi:i"), 3, opDefault()},
- {0x56, "setbyte", opSetByte, proto("bii:b"), 3, opDefault()},
+ {0x55, "getbyte", opGetByte, proto("bi:i"), 3, detDefault()},
+ {0x56, "setbyte", opSetByte, proto("bii:b"), 3, detDefault()},
{0x57, "extract", opExtract, proto("b:b"), 5, immediates("s", "l")},
- {0x58, "extract3", opExtract3, proto("bii:b"), 5, opDefault()},
- {0x59, "extract_uint16", opExtract16Bits, proto("bi:i"), 5, opDefault()},
- {0x5a, "extract_uint32", opExtract32Bits, proto("bi:i"), 5, opDefault()},
- {0x5b, "extract_uint64", opExtract64Bits, proto("bi:i"), 5, opDefault()},
+ {0x58, "extract3", opExtract3, proto("bii:b"), 5, detDefault()},
+ {0x59, "extract_uint16", opExtract16Bits, proto("bi:i"), 5, detDefault()},
+ {0x5a, "extract_uint32", opExtract32Bits, proto("bi:i"), 5, detDefault()},
+ {0x5b, "extract_uint64", opExtract64Bits, proto("bi:i"), 5, detDefault()},
{0x5c, "replace2", opReplace2, proto("bb:b"), 7, immediates("s")},
- {0x5d, "replace3", opReplace3, proto("bib:b"), 7, opDefault()},
+ {0x5d, "replace3", opReplace3, proto("bib:b"), 7, detDefault()},
{0x5e, "base64_decode", opBase64Decode, proto("b:b"), fidoVersion, field("e", &Base64Encodings).costByLength(1, 1, 16, 0)},
{0x5f, "json_ref", opJSONRef, proto("bb:a"), fidoVersion, field("r", &JSONRefTypes).costByLength(25, 2, 7, 1)},
@@ -532,19 +542,20 @@ var OpSpecs = []OpSpec{
{0x84, "ed25519verify_bare", opEd25519VerifyBare, proto("bbb:i"), 7, costly(1900)},
// "Function oriented"
- {0x88, "callsub", opCallSub, proto(":"), 4, opBranch()},
- {0x89, "retsub", opRetSub, proto(":"), 4, opDefault()},
- // Leave a little room for indirect function calls, or similar
+ {0x88, "callsub", opCallSub, proto(":"), 4, detBranch()},
+ {0x89, "retsub", opRetSub, proto(":"), 4, detDefault()},
+ {0x8a, "switch", opSwitch, proto("i:"), 8, detSwitch()},
+ // 0x8b will likely be a switch on pairs of values/targets
// More math
- {0x90, "shl", opShiftLeft, proto("ii:i"), 4, opDefault()},
- {0x91, "shr", opShiftRight, proto("ii:i"), 4, opDefault()},
+ {0x90, "shl", opShiftLeft, proto("ii:i"), 4, detDefault()},
+ {0x91, "shr", opShiftRight, proto("ii:i"), 4, detDefault()},
{0x92, "sqrt", opSqrt, proto("i:i"), 4, costly(4)},
- {0x93, "bitlen", opBitLen, proto("a:i"), 4, opDefault()},
- {0x94, "exp", opExp, proto("ii:i"), 4, opDefault()},
+ {0x93, "bitlen", opBitLen, proto("a:i"), 4, detDefault()},
+ {0x94, "exp", opExp, proto("ii:i"), 4, detDefault()},
{0x95, "expw", opExpw, proto("ii:ii"), 4, costly(10)},
{0x96, "bsqrt", opBytesSqrt, proto("b:b"), 6, costly(40)},
- {0x97, "divw", opDivw, proto("iii:i"), 6, opDefault()},
+ {0x97, "divw", opDivw, proto("iii:i"), 6, detDefault()},
{0x98, "sha3_256", opSHA3_256, proto("b:b"), 7, costly(130)},
/* Will end up following keccak256 -
{0x98, "sha3_256", opSHA3_256, proto("b:b"), unlimitedStorage, costByLength(58, 4, 8)},},
@@ -553,25 +564,24 @@ var OpSpecs = []OpSpec{
{0x99, "bn256_add", opBn256Add, proto("bb:b"), pairingVersion, costly(70)},
{0x9a, "bn256_scalar_mul", opBn256ScalarMul, proto("bb:b"), pairingVersion, costly(970)},
{0x9b, "bn256_pairing", opBn256Pairing, proto("bb:i"), pairingVersion, costly(8700)},
- // leave room here for eip-2537 style opcodes
// Byteslice math.
{0xa0, "b+", opBytesPlus, proto("bb:b"), 4, costly(10)},
{0xa1, "b-", opBytesMinus, proto("bb:b"), 4, costly(10)},
{0xa2, "b/", opBytesDiv, proto("bb:b"), 4, costly(20)},
{0xa3, "b*", opBytesMul, proto("bb:b"), 4, costly(20)},
- {0xa4, "b<", opBytesLt, proto("bb:i"), 4, opDefault()},
- {0xa5, "b>", opBytesGt, proto("bb:i"), 4, opDefault()},
- {0xa6, "b<=", opBytesLe, proto("bb:i"), 4, opDefault()},
- {0xa7, "b>=", opBytesGe, proto("bb:i"), 4, opDefault()},
- {0xa8, "b==", opBytesEq, proto("bb:i"), 4, opDefault()},
- {0xa9, "b!=", opBytesNeq, proto("bb:i"), 4, opDefault()},
+ {0xa4, "b<", opBytesLt, proto("bb:i"), 4, detDefault()},
+ {0xa5, "b>", opBytesGt, proto("bb:i"), 4, detDefault()},
+ {0xa6, "b<=", opBytesLe, proto("bb:i"), 4, detDefault()},
+ {0xa7, "b>=", opBytesGe, proto("bb:i"), 4, detDefault()},
+ {0xa8, "b==", opBytesEq, proto("bb:i"), 4, detDefault()},
+ {0xa9, "b!=", opBytesNeq, proto("bb:i"), 4, detDefault()},
{0xaa, "b%", opBytesModulo, proto("bb:b"), 4, costly(20)},
{0xab, "b|", opBytesBitOr, proto("bb:b"), 4, costly(6)},
{0xac, "b&", opBytesBitAnd, proto("bb:b"), 4, costly(6)},
{0xad, "b^", opBytesBitXor, proto("bb:b"), 4, costly(6)},
{0xae, "b~", opBytesBitNot, proto("b:b"), 4, costly(4)},
- {0xaf, "bzero", opBytesZero, proto("i:b"), 4, opDefault()},
+ {0xaf, "bzero", opBytesZero, proto("i:b"), 4, detDefault()},
// AVM "effects"
{0xb0, "log", opLog, proto("b:"), 5, only(modeApp)},
diff --git a/data/transactions/logic/teal.tmLanguage.json b/data/transactions/logic/teal.tmLanguage.json
index 127f129619..863d0a0c43 100644
--- a/data/transactions/logic/teal.tmLanguage.json
+++ b/data/transactions/logic/teal.tmLanguage.json
@@ -64,7 +64,7 @@
},
{
"name": "keyword.control.teal",
- "match": "^(assert|b|bnz|bz|callsub|cover|dig|dup|dup2|err|pop|retsub|return|select|swap|uncover)\\b"
+ "match": "^(assert|b|bnz|bz|callsub|cover|dig|dup|dup2|err|pop|retsub|return|select|swap|switch|uncover)\\b"
},
{
"name": "keyword.other.teal",
diff --git a/data/transactions/verify/txn.go b/data/transactions/verify/txn.go
index 1d947d31ae..885d0882ec 100644
--- a/data/transactions/verify/txn.go
+++ b/data/transactions/verify/txn.go
@@ -112,10 +112,7 @@ func Txn(s *transactions.SignedTxn, txnIdx int, groupCtx *GroupContext) error {
if batchVerifier.GetNumberOfEnqueuedSignatures() == 0 {
return nil
}
- if err := batchVerifier.Verify(); err != nil {
- return err
- }
- return nil
+ return batchVerifier.Verify()
}
// TxnBatchVerify verifies a SignedTxn having no obviously inconsistent data.
@@ -258,10 +255,7 @@ func LogicSigSanityCheck(txn *transactions.SignedTxn, groupIndex int, groupCtx *
return nil
}
- if err := batchVerifier.Verify(); err != nil {
- return err
- }
- return nil
+ return batchVerifier.Verify()
}
// LogicSigSanityCheckBatchVerify checks that the signature is valid and that the program is basically well formed.
diff --git a/data/transactions/verify/verifiedTxnCache.go b/data/transactions/verify/verifiedTxnCache.go
index b41993be94..82f0e9772e 100644
--- a/data/transactions/verify/verifiedTxnCache.go
+++ b/data/transactions/verify/verifiedTxnCache.go
@@ -60,8 +60,8 @@ type VerifiedTransactionCache interface {
Add(txgroup []transactions.SignedTxn, groupCtx *GroupContext)
// AddPayset works in a similar way to Add, but is intended for adding an array of transaction groups, along with their corresponding contexts.
AddPayset(txgroup [][]transactions.SignedTxn, groupCtxs []*GroupContext) error
- // GetUnverifiedTranscationGroups compares the provided payset against the currently cached transactions and figure which transaction groups aren't fully cached.
- GetUnverifiedTranscationGroups(payset [][]transactions.SignedTxn, CurrSpecAddrs transactions.SpecialAddresses, CurrProto protocol.ConsensusVersion) [][]transactions.SignedTxn
+ // GetUnverifiedTransactionGroups compares the provided payset against the currently cached transactions and figure which transaction groups aren't fully cached.
+ GetUnverifiedTransactionGroups(payset [][]transactions.SignedTxn, CurrSpecAddrs transactions.SpecialAddresses, CurrProto protocol.ConsensusVersion) [][]transactions.SignedTxn
// UpdatePinned replaces the pinned entries with the one provided in the pinnedTxns map. This is typically expected to be a subset of the
// already-pinned transactions. If a transaction is not currently pinned, and it's can't be found in the cache, a errMissingPinnedEntry error would be generated.
UpdatePinned(pinnedTxns map[transactions.Txid]transactions.SignedTxn) error
@@ -115,8 +115,8 @@ func (v *verifiedTransactionCache) AddPayset(txgroup [][]transactions.SignedTxn,
return nil
}
-// GetUnverifiedTranscationGroups compares the provided payset against the currently cached transactions and figure which transaction groups aren't fully cached.
-func (v *verifiedTransactionCache) GetUnverifiedTranscationGroups(txnGroups [][]transactions.SignedTxn, currSpecAddrs transactions.SpecialAddresses, currProto protocol.ConsensusVersion) (unverifiedGroups [][]transactions.SignedTxn) {
+// GetUnverifiedTransactionGroups compares the provided payset against the currently cached transactions and figure which transaction groups aren't fully cached.
+func (v *verifiedTransactionCache) GetUnverifiedTransactionGroups(txnGroups [][]transactions.SignedTxn, currSpecAddrs transactions.SpecialAddresses, currProto protocol.ConsensusVersion) (unverifiedGroups [][]transactions.SignedTxn) {
v.bucketsLock.Lock()
defer v.bucketsLock.Unlock()
groupCtx := &GroupContext{
@@ -272,7 +272,7 @@ func (v *mockedCache) AddPayset(txgroup [][]transactions.SignedTxn, groupCtxs []
return nil
}
-func (v *mockedCache) GetUnverifiedTranscationGroups(txnGroups [][]transactions.SignedTxn, currSpecAddrs transactions.SpecialAddresses, currProto protocol.ConsensusVersion) (unverifiedGroups [][]transactions.SignedTxn) {
+func (v *mockedCache) GetUnverifiedTransactionGroups(txnGroups [][]transactions.SignedTxn, currSpecAddrs transactions.SpecialAddresses, currProto protocol.ConsensusVersion) (unverifiedGroups [][]transactions.SignedTxn) {
if v.alwaysVerified {
return nil
}
diff --git a/data/transactions/verify/verifiedTxnCache_test.go b/data/transactions/verify/verifiedTxnCache_test.go
index 35d958e354..e3001db674 100644
--- a/data/transactions/verify/verifiedTxnCache_test.go
+++ b/data/transactions/verify/verifiedTxnCache_test.go
@@ -76,7 +76,7 @@ func TestBucketCycling(t *testing.T) {
require.Equal(t, 1, len(impl.buckets[0]))
}
-func TestGetUnverifiedTranscationGroups50(t *testing.T) {
+func TestGetUnverifiedTransactionGroups50(t *testing.T) {
partitiontest.PartitionTest(t)
size := 300
@@ -97,11 +97,11 @@ func TestGetUnverifiedTranscationGroups50(t *testing.T) {
}
}
- unverifiedGroups := impl.GetUnverifiedTranscationGroups(txnGroups, spec, protocol.ConsensusCurrentVersion)
+ unverifiedGroups := impl.GetUnverifiedTransactionGroups(txnGroups, spec, protocol.ConsensusCurrentVersion)
require.Equal(t, len(expectedUnverifiedGroups), len(unverifiedGroups))
}
-func BenchmarkGetUnverifiedTranscationGroups50(b *testing.B) {
+func BenchmarkGetUnverifiedTransactionGroups50(b *testing.B) {
if b.N < 20000 {
b.N = 20000
}
@@ -125,7 +125,7 @@ func BenchmarkGetUnverifiedTranscationGroups50(b *testing.B) {
startTime := time.Now()
measuringMultipler := 1000
for i := 0; i < measuringMultipler; i++ {
- impl.GetUnverifiedTranscationGroups(queryTxnGroups, spec, protocol.ConsensusCurrentVersion)
+ impl.GetUnverifiedTransactionGroups(queryTxnGroups, spec, protocol.ConsensusCurrentVersion)
}
duration := time.Now().Sub(startTime)
// calculate time per 10K verified entries:
diff --git a/data/txHandler.go b/data/txHandler.go
index 46248b4edb..cd4c25c8e0 100644
--- a/data/txHandler.go
+++ b/data/txHandler.go
@@ -219,7 +219,7 @@ func (handler *TxHandler) asyncVerifySignature(arg interface{}) interface{} {
}
func (handler *TxHandler) processIncomingTxn(rawmsg network.IncomingMessage) network.OutgoingMessage {
- dec := protocol.NewDecoderBytes(rawmsg.Data)
+ dec := protocol.NewMsgpDecoderBytes(rawmsg.Data)
ntx := 0
unverifiedTxGroup := make([]transactions.SignedTxn, 1)
for {
@@ -265,11 +265,13 @@ func (handler *TxHandler) processIncomingTxn(rawmsg network.IncomingMessage) net
// Note that this also checks the consistency of the transaction's group hash,
// which is required for safe transaction signature caching behavior.
func (handler *TxHandler) checkAlreadyCommitted(tx *txBacklogMsg) (processingDone bool) {
- txids := make([]transactions.Txid, len(tx.unverifiedTxGroup))
- for i := range tx.unverifiedTxGroup {
- txids[i] = tx.unverifiedTxGroup[i].ID()
+ if logging.Base().IsLevelEnabled(logging.Debug) {
+ txids := make([]transactions.Txid, len(tx.unverifiedTxGroup))
+ for i := range tx.unverifiedTxGroup {
+ txids[i] = tx.unverifiedTxGroup[i].ID()
+ }
+ logging.Base().Debugf("got a tx group with IDs %v", txids)
}
- logging.Base().Debugf("got a tx group with IDs %v", txids)
// do a quick test to check that this transaction could potentially be committed, to reject dup pending transactions
err := handler.txPool.Test(tx.unverifiedTxGroup)
diff --git a/data/txHandler_test.go b/data/txHandler_test.go
index 653cd51e9e..14a5495eb2 100644
--- a/data/txHandler_test.go
+++ b/data/txHandler_test.go
@@ -18,6 +18,7 @@ package data
import (
"fmt"
+ "io"
"math/rand"
"testing"
"time"
@@ -31,17 +32,18 @@ import (
"github.com/algorand/go-algorand/data/bookkeeping"
"github.com/algorand/go-algorand/data/pools"
"github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/data/transactions/verify"
"github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/network"
"github.com/algorand/go-algorand/protocol"
+ "github.com/algorand/go-algorand/test/partitiontest"
"github.com/algorand/go-algorand/util/execpool"
)
-func BenchmarkTxHandlerProcessDecoded(b *testing.B) {
- b.StopTimer()
- b.ResetTimer()
- const numRounds = 10
+func BenchmarkTxHandlerProcessing(b *testing.B) {
const numUsers = 100
log := logging.TestingLog(b)
+ log.SetLevel(logging.Warn)
secrets := make([]*crypto.SignatureSecrets, numUsers)
addresses := make([]basics.Address, numUsers)
@@ -73,17 +75,20 @@ func BenchmarkTxHandlerProcessDecoded(b *testing.B) {
l := ledger
- cfg.TxPoolSize = 20000
+ cfg.TxPoolSize = 75000
cfg.EnableProcessBlockStats = false
tp := pools.MakeTransactionPool(l.Ledger, cfg, logging.Base())
- signedTransactions := make([]transactions.SignedTxn, 0, b.N)
- for i := 0; i < b.N/numUsers; i++ {
- for u := 0; u < numUsers; u++ {
+ backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil)
+ txHandler := MakeTxHandler(tp, l, &mocks.MockNetwork{}, "", crypto.Digest{}, backlogPool)
+
+ makeTxns := func(N int) [][]transactions.SignedTxn {
+ ret := make([][]transactions.SignedTxn, 0, N)
+ for u := 0; u < N; u++ {
// generate transactions
tx := transactions.Transaction{
Type: protocol.PaymentTx,
Header: transactions.Header{
- Sender: addresses[u],
+ Sender: addresses[u%numUsers],
Fee: basics.MicroAlgos{Raw: proto.MinTxnFee * 2},
FirstValid: 0,
LastValid: basics.Round(proto.MaxTxnLife),
@@ -94,18 +99,51 @@ func BenchmarkTxHandlerProcessDecoded(b *testing.B) {
Amount: basics.MicroAlgos{Raw: mockBalancesMinBalance + (rand.Uint64() % 10000)},
},
}
- signedTx := tx.Sign(secrets[u])
- signedTransactions = append(signedTransactions, signedTx)
+ signedTx := tx.Sign(secrets[u%numUsers])
+ ret = append(ret, []transactions.SignedTxn{signedTx})
}
+ return ret
}
- backlogPool := execpool.MakeBacklog(nil, 0, execpool.LowPriority, nil)
- txHandler := MakeTxHandler(tp, l, &mocks.MockNetwork{}, "", crypto.Digest{}, backlogPool)
- b.StartTimer()
- for _, signedTxn := range signedTransactions {
- txHandler.processDecoded([]transactions.SignedTxn{signedTxn})
- }
+
+ b.Run("processDecoded", func(b *testing.B) {
+ signedTransactionGroups := makeTxns(b.N)
+ b.ResetTimer()
+ for i := range signedTransactionGroups {
+ txHandler.processDecoded(signedTransactionGroups[i])
+ }
+ })
+ b.Run("verify.TxnGroup", func(b *testing.B) {
+ signedTransactionGroups := makeTxns(b.N)
+ b.ResetTimer()
+ // make a header including only the fields needed by PrepareGroupContext
+ hdr := bookkeeping.BlockHeader{}
+ hdr.FeeSink = basics.Address{}
+ hdr.RewardsPool = basics.Address{}
+ hdr.CurrentProtocol = protocol.ConsensusCurrentVersion
+ vtc := vtCache{}
+ b.Logf("verifying %d signedTransactionGroups", len(signedTransactionGroups))
+ b.ResetTimer()
+ for i := range signedTransactionGroups {
+ verify.TxnGroup(signedTransactionGroups[i], hdr, vtc, l)
+ }
+ })
}
+// vtCache is a noop VerifiedTransactionCache
+type vtCache struct{}
+
+func (vtCache) Add(txgroup []transactions.SignedTxn, groupCtx *verify.GroupContext) {}
+func (vtCache) AddPayset(txgroup [][]transactions.SignedTxn, groupCtxs []*verify.GroupContext) error {
+ return nil
+}
+func (vtCache) GetUnverifiedTransactionGroups(payset [][]transactions.SignedTxn, CurrSpecAddrs transactions.SpecialAddresses, CurrProto protocol.ConsensusVersion) [][]transactions.SignedTxn {
+ return nil
+}
+func (vtCache) UpdatePinned(pinnedTxns map[transactions.Txid]transactions.SignedTxn) error {
+ return nil
+}
+func (vtCache) Pin(txgroup []transactions.SignedTxn) error { return nil }
+
func BenchmarkTimeAfter(b *testing.B) {
b.StopTimer()
b.ResetTimer()
@@ -121,3 +159,92 @@ func BenchmarkTimeAfter(b *testing.B) {
}
}
}
+
+func makeRandomTransactions(num int) ([]transactions.SignedTxn, []byte) {
+ stxns := make([]transactions.SignedTxn, num)
+ result := make([]byte, 0, num*200)
+ for i := 0; i < num; i++ {
+ var sig crypto.Signature
+ crypto.RandBytes(sig[:])
+ var addr basics.Address
+ crypto.RandBytes(addr[:])
+ stxns[i] = transactions.SignedTxn{
+ Sig: sig,
+ AuthAddr: addr,
+ Txn: transactions.Transaction{
+ Header: transactions.Header{
+ Sender: addr,
+ Fee: basics.MicroAlgos{Raw: crypto.RandUint64()},
+ Note: sig[:],
+ },
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: addr,
+ Amount: basics.MicroAlgos{Raw: crypto.RandUint64()},
+ },
+ },
+ }
+
+ d2 := protocol.Encode(&stxns[i])
+ result = append(result, d2...)
+ }
+ return stxns, result
+}
+
+func TestTxHandlerProcessIncomingTxn(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ const numTxns = 11
+ handler := TxHandler{
+ backlogQueue: make(chan *txBacklogMsg, 1),
+ }
+ stxns, blob := makeRandomTransactions(numTxns)
+ action := handler.processIncomingTxn(network.IncomingMessage{Data: blob})
+ require.Equal(t, network.OutgoingMessage{Action: network.Ignore}, action)
+
+ require.Equal(t, 1, len(handler.backlogQueue))
+ msg := <-handler.backlogQueue
+ require.Equal(t, numTxns, len(msg.unverifiedTxGroup))
+ for i := 0; i < numTxns; i++ {
+ require.Equal(t, stxns[i], msg.unverifiedTxGroup[i])
+ }
+}
+
+const benchTxnNum = 25_000
+
+func BenchmarkTxHandlerDecoder(b *testing.B) {
+ _, blob := makeRandomTransactions(benchTxnNum)
+ var err error
+ stxns := make([]transactions.SignedTxn, benchTxnNum+1)
+ for i := 0; i < b.N; i++ {
+ dec := protocol.NewDecoderBytes(blob)
+ var idx int
+ for {
+ err = dec.Decode(&stxns[idx])
+ if err == io.EOF {
+ break
+ }
+ require.NoError(b, err)
+ idx++
+ }
+ require.Equal(b, benchTxnNum, idx)
+ }
+}
+
+func BenchmarkTxHandlerDecoderMsgp(b *testing.B) {
+ _, blob := makeRandomTransactions(benchTxnNum)
+ var err error
+ stxns := make([]transactions.SignedTxn, benchTxnNum+1)
+ for i := 0; i < b.N; i++ {
+ dec := protocol.NewMsgpDecoderBytes(blob)
+ var idx int
+ for {
+ err = dec.Decode(&stxns[idx])
+ if err == io.EOF {
+ break
+ }
+ require.NoError(b, err)
+ idx++
+ }
+ require.Equal(b, benchTxnNum, idx)
+ }
+}
diff --git a/gen/generate.go b/gen/generate.go
index 804e893c23..15eb091032 100644
--- a/gen/generate.go
+++ b/gen/generate.go
@@ -19,7 +19,6 @@ package gen
import (
"fmt"
"io"
- "io/ioutil"
"math"
"os"
"path/filepath"
@@ -374,7 +373,7 @@ func generateGenesisFiles(outDir string, protoVersion protocol.ConsensusVersion,
}
jsonData := protocol.EncodeJSON(g)
- err = ioutil.WriteFile(filepath.Join(outDir, config.GenesisJSONFile), append(jsonData, '\n'), 0666)
+ err = os.WriteFile(filepath.Join(outDir, config.GenesisJSONFile), append(jsonData, '\n'), 0666)
if (verbose) && (rootKeyCreated > 0 || partKeyCreated > 0) {
fmt.Printf("Created %d new rootkeys and %d new partkeys in %s.\n", rootKeyCreated, partKeyCreated, time.Since(createStart))
diff --git a/go.mod b/go.mod
index dd46477e86..f20b25dc6d 100644
--- a/go.mod
+++ b/go.mod
@@ -3,6 +3,7 @@ module github.com/algorand/go-algorand
go 1.17
require (
+ github.com/algorand/avm-abi v0.1.0
github.com/algorand/falcon v0.0.0-20220727072124-02a2a64c4414
github.com/algorand/go-codec/codec v1.1.8
github.com/algorand/go-deadlock v0.2.2
@@ -12,7 +13,6 @@ require (
github.com/algorand/oapi-codegen v1.3.7
github.com/algorand/websocket v1.4.5
github.com/aws/aws-sdk-go v1.16.5
- github.com/chrismcguire/gobberish v0.0.0-20150821175641-1d8adb509a0e
github.com/consensys/gnark-crypto v0.7.0
github.com/davidlazar/go-crypto v0.0.0-20170701192655-dcfb0a7ac018
github.com/dchest/siphash v1.2.1
@@ -39,6 +39,7 @@ require (
)
require (
+ github.com/chrismcguire/gobberish v0.0.0-20150821175641-1d8adb509a0e // indirect
github.com/cpuguy83/go-md2man v1.0.8 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect
diff --git a/go.sum b/go.sum
index 0537d11015..58fbbdb98c 100644
--- a/go.sum
+++ b/go.sum
@@ -1,3 +1,5 @@
+github.com/algorand/avm-abi v0.1.0 h1:znZFQXpSUVYz37vXbaH5OZG2VK4snTyXwnc/tV9CVr4=
+github.com/algorand/avm-abi v0.1.0/go.mod h1:+CgwM46dithy850bpTeHh9MC99zpn2Snirb3QTl2O/g=
github.com/algorand/falcon v0.0.0-20220727072124-02a2a64c4414 h1:nwYN+GQ7Z5OOfZwqBO1ma7DSlP7S1YrKWICOyjkwqrc=
github.com/algorand/falcon v0.0.0-20220727072124-02a2a64c4414/go.mod h1:OkQyHlGvS0kLNcIWbC21/uQcnbfwSOQm+wiqWwBG9pQ=
github.com/algorand/go-codec v1.1.8/go.mod h1:XhzVs6VVyWMLu6cApb9/192gBjGRVGm5cX5j203Heg4=
diff --git a/installer/genesis/alphanet/genesis.json b/installer/genesis/alphanet/genesis.json
new file mode 100644
index 0000000000..b3944e7af2
--- /dev/null
+++ b/installer/genesis/alphanet/genesis.json
@@ -0,0 +1,313 @@
+{
+ "alloc": [
+ {
+ "addr": "7777777777777777777777777777777777777777777777777774MSJUVU",
+ "comment": "RewardsPool",
+ "state": {
+ "algo": 125000000000000,
+ "onl": 2
+ }
+ },
+ {
+ "addr": "OOZZ32IHB6SS6ZTARKJ2PQP3QKE7R3IWQTOPXRGLTAGPVCDS3FHJOEOYVM",
+ "comment": "FeeSink",
+ "state": {
+ "algo": 100000,
+ "onl": 2
+ }
+ },
+ {
+ "addr": "NXD653KPZRLYFZKUWNYVZUDUBMB5NWGRZYSSMNOAR2GKNR4WE4D6JJ6SDQ",
+ "comment": "Wallet1",
+ "state": {
+ "algo": 250000000000000,
+ "onl": 1,
+ "sel": "3s5/mxcllqsE0KabL4rzHC9bxLp3BKHLHUfHrl2aYRI=",
+ "stprf": "9yV+6Z2KoQuHJWhDqNZ/ULQtcatXQXVtk2Ei/nBB0aH+3p4NcMj8ONbJNi88sqrsCHR1wArBYnVtwSk+Qwq/6Q==",
+ "vote": "F3ZUaQ+NHy0+Oi39s/ah4riH10kVh9wqdo2E8Vq1Q/s=",
+ "voteKD": 10000,
+ "voteLst": 3000000
+ }
+ },
+ {
+ "addr": "GMD5VUK6MOF5TNKJB7MGB5TRWZLFDG435LGZFCB4GK7TB75EDT3XEJKDSI",
+ "comment": "Wallet10",
+ "state": {
+ "algo": 250000000000000,
+ "onl": 1,
+ "sel": "aSAlZpmcmnp/ITcKu+tJqIppOaXy1SrvtzIbLW5ZVxQ=",
+ "stprf": "D9rUr2pfk2rcE2h1BZgyHvzsiHC2Lco6fxTgulZd66A1t2+IY7TiyE+cW/yzyASrcFM1ku6HbDpM+dsn81BTFw==",
+ "vote": "FlVJov8Pt7nkVuaV0g8MWW0KoWX6QnLilA35wrXWXn4=",
+ "voteKD": 10000,
+ "voteLst": 3000000
+ }
+ },
+ {
+ "addr": "QB5WTO545MRUDHPP3H4EM2H7TJDWEB3CBDDZGAVIGIXR56S5EIZDPYLU44",
+ "comment": "Wallet11",
+ "state": {
+ "algo": 250000000000000,
+ "onl": 1,
+ "sel": "hNlU23ZwJ0uIasSwuy7urtPVbHVmwUyUSBYumIqxuKY=",
+ "stprf": "3eJyN0gNwYluwBPZQDjjPj+lsSsuyJlRMKm1yDeNfw/lMyNdkUJU59aJdkMuye3qd5Av6wjnxhGiCXq42WUo4Q==",
+ "vote": "wmXN99MQgOuMgeShyJ4NcL6jKQbouKrdUBgYUhJLDfQ=",
+ "voteKD": 10000,
+ "voteLst": 3000000
+ }
+ },
+ {
+ "addr": "FYT3QP5FN27QM53TZD77W4LL6SNS4ULEAIQF6AYPCFEJIUSGAYIXMWCEI4",
+ "comment": "Wallet12",
+ "state": {
+ "algo": 250000000000000,
+ "onl": 1,
+ "sel": "FVVkzU8fEt+LhT6Zk1ITNSIvEcY6uVXzKWQbU+WIp+k=",
+ "stprf": "GZ903Jk56IN2uG+OjKl8jfRuuEKReyzuyU23MZxLvDjgrcHfIVhs/z3pgzOrrnajV4jX5PSasI5L6Vz8iuOLfQ==",
+ "vote": "lVPjjTJMZpvLvrhNj8U4D6Emc2vii27ZaeClBk5emIM=",
+ "voteKD": 10000,
+ "voteLst": 3000000
+ }
+ },
+ {
+ "addr": "UHJSEAVFDIOLLT6UYIFRLTEV7CTQPYJEQDAIHRVYAPPSX6UJIMVZR4P7AA",
+ "comment": "Wallet13",
+ "state": {
+ "algo": 250000000000000,
+ "onl": 1,
+ "sel": "0P5nHFzLSU+PEcM5r58V71E6Mf4IR+iYZXHbldH60MU=",
+ "stprf": "TEyFNqFjWUmxmjMTDatOP8Aua8P1m6qIRYqwDJsCUOAvUSsekq+2MxZ4MT+Pfjl69GH6SR9siLHvPd9vgL1mlQ==",
+ "vote": "g8+LvQ+DgbIA1aDbUkkNfYKcwFWTO/UK6ljxeBYcp5M=",
+ "voteKD": 10000,
+ "voteLst": 3000000
+ }
+ },
+ {
+ "addr": "6JLQSZQMVQUWPOYLEAOAOHDMC2PI3ZXPT5ADUE6YMBVL36EKQ2DHCP2WQI",
+ "comment": "Wallet14",
+ "state": {
+ "algo": 250000000000000,
+ "onl": 1,
+ "sel": "5xOCDWQnV1wQccNxeXOPsvsxFu2rEXslM/JWtPKxeA4=",
+ "stprf": "zemmbbCDp5ekkJr6IrqrGewFrOHltjVBNBoCak4AmUOCqwtDdnCKbQq4NphemyEghfpfXKfYWhFQ44NYqCLTpQ==",
+ "vote": "gjDiA9kbvjXUahdEOjygyw86x1irFZbeyHCw/qHExwg=",
+ "voteKD": 10000,
+ "voteLst": 3000000
+ }
+ },
+ {
+ "addr": "HHOHUW5U2BLQEWUUQLPXZMVZPCJQUJFPZCWHCBYOVHQYON2FWAARD6OMDM",
+ "comment": "Wallet15",
+ "state": {
+ "algo": 250000000000000,
+ "onl": 1,
+ "sel": "h84u9vUCoCM85CLMfiCTPfNDO1bPqaafWlqC+PEmKQw=",
+ "stprf": "2MgJ5pgaDhGuo00re4oOsz+TQxCavOglg1Zmrir3K0USmj6grt1NYiJv+51xvt4BIkKG/Pk2E/wUk38jQD3CKA==",
+ "vote": "pbyNjS5u9THxb+y6lsH28T70EPGvBJ7Y89bzCdJAP7E=",
+ "voteKD": 10000,
+ "voteLst": 3000000
+ }
+ },
+ {
+ "addr": "4U3ACMGUA7OBUE4BJR2U6CZQVJOBWQXEIKKG74CYOFHW7RYSLM6WUZWO2U",
+ "comment": "Wallet16",
+ "state": {
+ "algo": 250000000000000,
+ "onl": 1,
+ "sel": "LHWqqjw5rp/ar0/EjU0luGknE8+TYASmg8orMF+IAMY=",
+ "stprf": "k2kWdZ6Q3S3D/6mlI3QK7Z9kmCa4kbnJa985NLs5dYb5mkdsVueZVJ0kUnVdmsthCQVq5AYxeYEZiCCF4th2Ew==",
+ "vote": "1koKJypVgKG+8nt4k4dKbHpR64+aBeDi6frjXaOioro=",
+ "voteKD": 10000,
+ "voteLst": 3000000
+ }
+ },
+ {
+ "addr": "W6MAVGKBYY3DFJRNOWOWKOYFWZJWYILPAFB4IENTOT63ZHRYXNTOUXSDCA",
+ "comment": "Wallet17",
+ "state": {
+ "algo": 250000000000000,
+ "onl": 1,
+ "sel": "LRfmRYLM91aDXt2llKpnYAihw3iVgk8G3377N/wHhKk=",
+ "stprf": "GZFyAPtAtAo+QE4nR7bZ87G3U6aZOTfFmyufc1kYOd1RP6bGMQUspQ8B8XKC3Y/9mhnyrVHNzhEqqYASqcyGmg==",
+ "vote": "TSgNl6SMI8/0kfD/YMpnUEGNqka660SUa0Fhw+ESh4A=",
+ "voteKD": 10000,
+ "voteLst": 3000000
+ }
+ },
+ {
+ "addr": "SW6ZX2GHABLYFZ2GUFPNX65RAKJYSCSBPZMRDIE5ECKB5Q6NW34EMPWKYI",
+ "comment": "Wallet18",
+ "state": {
+ "algo": 250000000000000,
+ "onl": 1,
+ "sel": "LAkKAzZwUqWv5IiLGwILXiuEjo79YmfrBSraUGxBRvw=",
+ "stprf": "xljekFxo4XYiwL+te0DMuXqQigvRVVqW8Y1MSrz+nqTlJ8v07hEkDu2ZPVAdeWb8cL5K4fkI/wYICFbqfzf8Xw==",
+ "vote": "XJ8smxZOlGTzt922nabeReh/CfZ3nPs58/72gXXahq4=",
+ "voteKD": 10000,
+ "voteLst": 3000000
+ }
+ },
+ {
+ "addr": "EOGRZZV2MXCS5WG44XJ77S5YIX5JUWHJ6W6FDBOV5TSROTKMNJEGKNFR6U",
+ "comment": "Wallet19",
+ "state": {
+ "algo": 250000000000000,
+ "onl": 1,
+ "sel": "OBgdemFmMNVkkfNNnSgxxaH0VGbLCpAs23qk+/i5pDk=",
+ "stprf": "VS3bpYvHZsV+i4E9Rck1ADqOFCe9mv+xEuB/4AbwL5reIs+XmgeSqeZYtjA44Yhu4nMMrsUq5KEfeJt+0zWw5Q==",
+ "vote": "Rcwn7JRB5X45mXj80+ra2C/1DB9uUS1W/E29+CBI1w8=",
+ "voteKD": 10000,
+ "voteLst": 3000000
+ }
+ },
+ {
+ "addr": "UQCHKHGD7D5N6R5Q6VPQU6XNOPSULM53DGP27ISJINZIH3XKW2LHBYAKXU",
+ "comment": "Wallet2",
+ "state": {
+ "algo": 250000000000000,
+ "onl": 1,
+ "sel": "loxF/eE88JkhmCvdMl+DmFsUvLeFuUaJNv1ecvnBDjU=",
+ "stprf": "faSsGeoQ5SWNDgD01rg+zZ3PZWqTMBIl69zyKdP0N/wSYdmO2zApuXEXj8ZEzzV3sG0d7/vdnlf1hTR8awofug==",
+ "vote": "FS57UeflDSSSrlRUlwabMBJRKHGef2Td3dSS148tc8w=",
+ "voteKD": 10000,
+ "voteLst": 3000000
+ }
+ },
+ {
+ "addr": "GMXFVGAXXUEFXSREXM6JUQD4SVLXPSHG7YU5FDU4UEAY5WNANGAHJQOFGQ",
+ "comment": "Wallet20",
+ "state": {
+ "algo": 250000000000000,
+ "onl": 1,
+ "sel": "DKoC+wEPaybLRdj+MKm3Qehy0br4i/UOaP+usLztyik=",
+ "stprf": "YqHEbJ6P1HhxLNai9oqBEWnthLcnBZjPuEoGxbM8QE/LWgAFWA8p8ZZ1UpW/l4dHZWq5BKYym3bZGpbtFMjuZg==",
+ "vote": "XtL1RzpArMLvV5yKlbfhKMR0NhNphJMUgkINSQ1yTXY=",
+ "voteKD": 10000,
+ "voteLst": 3000000
+ }
+ },
+ {
+ "addr": "FO2JFV5TECTX5IPM23NHVQ2G7KM2CB463RBZPWYAIYPG2CFX2NHDZKTDHA",
+ "comment": "Wallet21",
+ "state": {
+ "algo": 1250000000000000
+ }
+ },
+ {
+ "addr": "M5G7TXRP7LQYML7RWTJWYKVC63PSNLPT2Q4OML4L4INNYIHCOFMROUALDM",
+ "comment": "Wallet22",
+ "state": {
+ "algo": 1250000000000000
+ }
+ },
+ {
+ "addr": "VNIGFIMNLXWU3HU3PTTTFTNXYUV7BDIR3AEH7F7T736XKBSEBWRIVXXYKU",
+ "comment": "Wallet23",
+ "state": {
+ "algo": 1250000000000000
+ }
+ },
+ {
+ "addr": "GKMGMA4PNAYFXY3ZGN3XYZGT4H6BVMN23AZNN6OW77QZOKTTJMYF54FWP4",
+ "comment": "Wallet24",
+ "state": {
+ "algo": 1250000000000000
+ }
+ },
+ {
+ "addr": "3SDISKXXLMPWTBSABV2NJQ5MLIVKHGDXOGHMSJY4ZQ3AEBYLESF5AJYMNI",
+ "comment": "Wallet3",
+ "state": {
+ "algo": 250000000000000,
+ "onl": 1,
+ "sel": "vfxc0A3ROMRug/YbwOFxbJGk+6Skh7rzG5r8CzQL7us=",
+ "stprf": "/bS8MveMPCzKd1Nwl4aFuAfGvwMwj/tLdbCVHBDV1mPUSgvqwgxTXWNZaRF3tX1ietC0DMxfJcb/51P9IblG8Q==",
+ "vote": "XG9EJKvg6GPfhQK+4A9qCYjOtCJrJvL4tBSy9pG7YWs=",
+ "voteKD": 10000,
+ "voteLst": 3000000
+ }
+ },
+ {
+ "addr": "P22FLWO27IIZNALADADVFXM5SI4Y7EQPDDB4WQDJS7OTMD5RGE6VFQCJAE",
+ "comment": "Wallet4",
+ "state": {
+ "algo": 250000000000000,
+ "onl": 1,
+ "sel": "jCXkeH6uE6YC3XQJLC53Y1rg0hERpgt8Y/HePQwnSRA=",
+ "stprf": "Pk2Nve+Ngpl8Y2ZoFE0yh1jMdKqKCX7QVcynsl1Kb4mo91S/xJ+HCEPV7KCOE4gapMhYEsfmtHxBwmArGcBkJA==",
+ "vote": "5B9VGMBD/U/AojL6cDv86DuqhhGGGp1ZMxuqDVE8lZA=",
+ "voteKD": 10000,
+ "voteLst": 3000000
+ }
+ },
+ {
+ "addr": "WVJHEL3VPUI5RR4IBOG3VI6OFEJPELDQ7L2RMSZIUFTC3X5G4H5FI2NOGI",
+ "comment": "Wallet5",
+ "state": {
+ "algo": 250000000000000,
+ "onl": 1,
+ "sel": "SRTJ0EUewq8ZQKMzygOm4b10HYchjg7xWTHGdKfZYig=",
+ "stprf": "rsvC8L2Ko+4LStTjvVEq88RnKNp8iHff67r3F7YqKTnk4UiwgsDblvsdQoT6BfMJ7f4N8x1ORgw6trGf1VHNjw==",
+ "vote": "lpXIAEYxDEB7hNdvdf+uCt7SktCFIOhejrmHL0mjg34=",
+ "voteKD": 10000,
+ "voteLst": 3000000
+ }
+ },
+ {
+ "addr": "5TM5MD5FZBKNY3SKH7GYPIUAJ7PXIERXFLQ4O4FNL7OLUD6MHNJN4ZAB6Y",
+ "comment": "Wallet6",
+ "state": {
+ "algo": 250000000000000,
+ "onl": 1,
+ "sel": "L9oncIYILrFOKQZTtYUT7VhVxs4oJSsalLQDEmKnfHw=",
+ "stprf": "2nBNjnteR7en0E6/9oSy1YiEAS0QDgI0kE2nhJPBo1Ehw//xgcu9GvTaZZBuIMho+56Uol/qCr+HZuOp5Z5bPw==",
+ "vote": "unBd7foxxmX6M3tFJiNX5nD9c30/MNvXkub7/0lUHAc=",
+ "voteKD": 10000,
+ "voteLst": 3000000
+ }
+ },
+ {
+ "addr": "H3RSJSIQYCM5BIBDHHRSGAL2O2NVXV55F4HHTIDZKI2UZIEASLGH6B3STU",
+ "comment": "Wallet7",
+ "state": {
+ "algo": 250000000000000,
+ "onl": 1,
+ "sel": "E1pGwlUFBmi1WvSV3NO3UbkbwotKFZlG1bR+gGBmPpo=",
+ "stprf": "sd0gkoXKFsqLqimLDP0DfWBOqc7h9gmHzZEB/o932f7X93KCZhvlDvgOSbgU03LjP8Bn+7H0CpuL/TXi7SoiCg==",
+ "vote": "G4N2xDms0d+MIjaTVubbNYXiK4Ef7kbQAC1praFOHGA=",
+ "voteKD": 10000,
+ "voteLst": 3000000
+ }
+ },
+ {
+ "addr": "NJFY7INZLZVXAHAIEIR2VYTDXMBLAYKHQO3APX4RRTFN2WRUW6JWJ6L6RA",
+ "comment": "Wallet8",
+ "state": {
+ "algo": 250000000000000,
+ "onl": 1,
+ "sel": "wGveX8ZZCA2YY4mvgGAkrGw4TuTki9aWf3bOAuLblhw=",
+ "stprf": "llVw21zZ7lsxQ99EPz3FVMUIfy3vkBemly60BZv9HURxjMvqNHC5XIm1slN339IHC54t1WXt5YMlzqGxTNEBjw==",
+ "vote": "mBNaxPrxkhReW3O+yYFdsBdhh642N2lbyQqAWFpkUsc=",
+ "voteKD": 10000,
+ "voteLst": 3000000
+ }
+ },
+ {
+ "addr": "AQ4PRMUOLU26M3XSGGT7UIYWICBV3DCB7FYKSKPAXYBSOFEDBHPFAEOPXE",
+ "comment": "Wallet9",
+ "state": {
+ "algo": 250000000000000,
+ "onl": 1,
+ "sel": "BI3DwXKQrnqxulSpUVGGtz4TBKh0RC/qbpSqDdZXFhc=",
+ "stprf": "0XeVxzG/voI2z0Imz79sN5CHI3U0P7ljlTpMLqODE5FaDCkB0s9vkACKiTQfNZCbfQl+20seL/7cyOOhF+OOVA==",
+ "vote": "5zFeVWFVN2huVvdsmYt0vhlFuggwpfY8QFGLcWbkGDg=",
+ "voteKD": 10000,
+ "voteLst": 3000000
+ }
+ }
+ ],
+ "fees": "OOZZ32IHB6SS6ZTARKJ2PQP3QKE7R3IWQTOPXRGLTAGPVCDS3FHJOEOYVM",
+ "id": "v1",
+ "network": "alphanet",
+ "proto": "alpha1",
+ "rwd": "7777777777777777777777777777777777777777777777777774MSJUVU"
+}
diff --git a/installer/rpm/algorand/algorand.spec b/installer/rpm/algorand/algorand.spec
index c7cd519bb1..ef58c0db1a 100644
--- a/installer/rpm/algorand/algorand.spec
+++ b/installer/rpm/algorand/algorand.spec
@@ -59,7 +59,7 @@ install -m 644 ${REPO_DIR}/installer/rpm/algorand/algorand.repo %{buildroot}/usr
mkdir -p %{buildroot}/var/lib/algorand/genesis
if [ "%{RELEASE_GENESIS_PROCESS}" != "x" ]; then
- genesis_dirs=("devnet" "testnet" "mainnet" "betanet")
+ genesis_dirs=("devnet" "testnet" "mainnet" "betanet" "alphanet")
for dir in "${genesis_dirs[@]}"; do
mkdir -p %{buildroot}/var/lib/algorand/genesis/${dir}
cp ${REPO_DIR}/installer/genesis/${dir}/genesis.json %{buildroot}/var/lib/algorand/genesis/${dir}/genesis.json
@@ -89,6 +89,7 @@ fi
/var/lib/algorand/genesis/testnet/genesis.json
/var/lib/algorand/genesis/betanet/genesis.json
/var/lib/algorand/genesis/mainnet/genesis.json
+ /var/lib/algorand/genesis/alphanet/genesis.json
%endif
/lib/systemd/system/algorand.service
/lib/systemd/system/algorand@.service
diff --git a/ledger/accountdb.go b/ledger/accountdb.go
index 0542b9071b..f0b32a8d18 100644
--- a/ledger/accountdb.go
+++ b/ledger/accountdb.go
@@ -302,7 +302,7 @@ type compactAccountDeltas struct {
}
// onlineAccountDelta track all changes of account state within a range,
-// used in conjunction wih compactOnlineAccountDeltas to group and represent per-account changes.
+// used in conjunction with compactOnlineAccountDeltas to group and represent per-account changes.
// oldAcct represents the "old" state of the account in the DB, and compared against newAcct[0]
// to determine if the acct became online or went offline.
type onlineAccountDelta struct {
@@ -967,13 +967,17 @@ func (a *compactOnlineAccountDeltas) updateOld(idx int, old persistedOnlineAccou
// writeCatchpointStagingBalances inserts all the account balances in the provided array into the catchpoint balance staging table catchpointbalances.
func writeCatchpointStagingBalances(ctx context.Context, tx *sql.Tx, bals []normalizedAccountBalance) error {
+ selectAcctStmt, err := tx.PrepareContext(ctx, "SELECT rowid FROM catchpointbalances WHERE address = ?")
+ if err != nil {
+ return err
+ }
+
insertAcctStmt, err := tx.PrepareContext(ctx, "INSERT INTO catchpointbalances(address, normalizedonlinebalance, data) VALUES(?, ?, ?)")
if err != nil {
return err
}
- var insertRscStmt *sql.Stmt
- insertRscStmt, err = tx.PrepareContext(ctx, "INSERT INTO catchpointresources(addrid, aidx, data) VALUES(?, ?, ?)")
+ insertRscStmt, err := tx.PrepareContext(ctx, "INSERT INTO catchpointresources(addrid, aidx, data) VALUES(?, ?, ?)")
if err != nil {
return err
}
@@ -982,27 +986,41 @@ func writeCatchpointStagingBalances(ctx context.Context, tx *sql.Tx, bals []norm
var rowID int64
for _, balance := range bals {
result, err = insertAcctStmt.ExecContext(ctx, balance.address[:], balance.normalizedBalance, balance.encodedAccountData)
- if err != nil {
- return err
- }
- aff, err := result.RowsAffected()
- if err != nil {
- return err
- }
- if aff != 1 {
- return fmt.Errorf("number of affected record in insert was expected to be one, but was %d", aff)
- }
- rowID, err = result.LastInsertId()
- if err != nil {
- return err
+ if err == nil {
+ var aff int64
+ aff, err = result.RowsAffected()
+ if err != nil {
+ return err
+ }
+ if aff != 1 {
+ return fmt.Errorf("number of affected record in insert was expected to be one, but was %d", aff)
+ }
+ rowID, err = result.LastInsertId()
+ if err != nil {
+ return err
+ }
+ } else {
+ var sqliteErr sqlite3.Error
+ if errors.As(err, &sqliteErr) && sqliteErr.Code == sqlite3.ErrConstraint && sqliteErr.ExtendedCode == sqlite3.ErrConstraintUnique {
+ // address exists: overflowed account record: find addrid
+ err = selectAcctStmt.QueryRowContext(ctx, balance.address[:]).Scan(&rowID)
+ if err != nil {
+ return err
+ }
+ } else {
+ return err
+ }
}
+
// write resources
for aidx := range balance.resources {
- result, err := insertRscStmt.ExecContext(ctx, rowID, aidx, balance.encodedResources[aidx])
+ var result sql.Result
+ result, err = insertRscStmt.ExecContext(ctx, rowID, aidx, balance.encodedResources[aidx])
if err != nil {
return err
}
- aff, err := result.RowsAffected()
+ var aff int64
+ aff, err = result.RowsAffected()
if err != nil {
return err
}
@@ -1593,7 +1611,7 @@ func (bo *baseOnlineAccountData) SetCoreAccountData(ad *ledgercore.AccountData)
type resourceFlags uint8
const (
- resourceFlagsHolding resourceFlags = 0 //nolint:deadcode,varcheck
+ resourceFlagsHolding resourceFlags = 0
resourceFlagsNotHolding resourceFlags = 1
resourceFlagsOwnership resourceFlags = 2
resourceFlagsEmptyAsset resourceFlags = 4
@@ -3965,16 +3983,26 @@ func (mc *MerkleCommitter) LoadPage(page uint64) (content []byte, err error) {
return content, nil
}
+// catchpointAccountResourceCounter keeps track of the resources processed for the current account
+type catchpointAccountResourceCounter struct {
+ totalAppParams uint64
+ totalAppLocalStates uint64
+ totalAssetParams uint64
+ totalAssets uint64
+}
+
// encodedAccountsBatchIter allows us to iterate over the accounts data stored in the accountbase table.
type encodedAccountsBatchIter struct {
- accountsRows *sql.Rows
- resourcesRows *sql.Rows
- nextRow pendingRow
+ accountsRows *sql.Rows
+ resourcesRows *sql.Rows
+ nextBaseRow pendingBaseRow
+ nextResourceRow pendingResourceRow
+ acctResCnt catchpointAccountResourceCounter
}
// Next returns an array containing the account data, in the same way it appear in the database
// returning accountCount accounts data at a time.
-func (iterator *encodedAccountsBatchIter) Next(ctx context.Context, tx *sql.Tx, accountCount int) (bals []encodedBalanceRecordV6, err error) {
+func (iterator *encodedAccountsBatchIter) Next(ctx context.Context, tx *sql.Tx, accountCount int, resourceCount int) (bals []encodedBalanceRecordV6, numAccountsProcessed uint64, err error) {
if iterator.accountsRows == nil {
iterator.accountsRows, err = tx.QueryContext(ctx, "SELECT rowid, address, data FROM accountbase ORDER BY rowid")
if err != nil {
@@ -4000,9 +4028,11 @@ func (iterator *encodedAccountsBatchIter) Next(ctx context.Context, tx *sql.Tx,
return nil
}
- var totalAppParams, totalAppLocalStates, totalAssetParams, totalAssets uint64
+ var totalResources int
+
// emptyCount := 0
- resCb := func(addr basics.Address, cidx basics.CreatableIndex, resData *resourcesData, encodedResourceData []byte) error {
+ resCb := func(addr basics.Address, cidx basics.CreatableIndex, resData *resourcesData, encodedResourceData []byte, lastResource bool) error {
+
emptyBaseAcct := baseAcct.TotalAppParams == 0 && baseAcct.TotalAppLocalStates == 0 && baseAcct.TotalAssetParams == 0 && baseAcct.TotalAssets == 0
if !emptyBaseAcct && resData != nil {
if encodedRecord.Resources == nil {
@@ -4010,47 +4040,56 @@ func (iterator *encodedAccountsBatchIter) Next(ctx context.Context, tx *sql.Tx,
}
encodedRecord.Resources[uint64(cidx)] = encodedResourceData
if resData.IsApp() && resData.IsOwning() {
- totalAppParams++
+ iterator.acctResCnt.totalAppParams++
}
if resData.IsApp() && resData.IsHolding() {
- totalAppLocalStates++
+ iterator.acctResCnt.totalAppLocalStates++
}
if resData.IsAsset() && resData.IsOwning() {
- totalAssetParams++
+ iterator.acctResCnt.totalAssetParams++
}
if resData.IsAsset() && resData.IsHolding() {
- totalAssets++
+ iterator.acctResCnt.totalAssets++
}
-
+ totalResources++
}
- if baseAcct.TotalAppParams == totalAppParams &&
- baseAcct.TotalAppLocalStates == totalAppLocalStates &&
- baseAcct.TotalAssetParams == totalAssetParams &&
- baseAcct.TotalAssets == totalAssets {
+ if baseAcct.TotalAppParams == iterator.acctResCnt.totalAppParams &&
+ baseAcct.TotalAppLocalStates == iterator.acctResCnt.totalAppLocalStates &&
+ baseAcct.TotalAssetParams == iterator.acctResCnt.totalAssetParams &&
+ baseAcct.TotalAssets == iterator.acctResCnt.totalAssets {
+
+ encodedRecord.ExpectingMoreEntries = false
+ bals = append(bals, encodedRecord)
+ numAccountsProcessed++
+
+ iterator.acctResCnt = catchpointAccountResourceCounter{}
+
+ return nil
+ }
+ // max resources per chunk reached, stop iterating.
+ if lastResource {
+ encodedRecord.ExpectingMoreEntries = true
bals = append(bals, encodedRecord)
- totalAppParams = 0
- totalAppLocalStates = 0
- totalAssetParams = 0
- totalAssets = 0
+ encodedRecord.Resources = nil
}
return nil
}
- _, iterator.nextRow, err = processAllBaseAccountRecords(
+ _, iterator.nextBaseRow, iterator.nextResourceRow, err = processAllBaseAccountRecords(
iterator.accountsRows, iterator.resourcesRows,
baseCb, resCb,
- iterator.nextRow, accountCount,
+ iterator.nextBaseRow, iterator.nextResourceRow, accountCount, resourceCount,
)
if err != nil {
iterator.Close()
return
}
- if len(bals) == accountCount {
+ if len(bals) == accountCount || totalResources == resourceCount {
// we're done with this iteration.
return
}
@@ -4106,27 +4145,37 @@ const (
// orderedAccountsIter allows us to iterate over the accounts addresses in the order of the account hashes.
type orderedAccountsIter struct {
- step orderedAccountsIterStep
- accountBaseRows *sql.Rows
- hashesRows *sql.Rows
- resourcesRows *sql.Rows
- tx *sql.Tx
- pendingRow pendingRow
- accountCount int
- insertStmt *sql.Stmt
+ step orderedAccountsIterStep
+ accountBaseRows *sql.Rows
+ hashesRows *sql.Rows
+ resourcesRows *sql.Rows
+ tx *sql.Tx
+ pendingBaseRow pendingBaseRow
+ pendingResourceRow pendingResourceRow
+ accountCount int
+ resourceCount int
+ insertStmt *sql.Stmt
}
// makeOrderedAccountsIter creates an ordered account iterator. Note that due to implementation reasons,
// only a single iterator can be active at a time.
-func makeOrderedAccountsIter(tx *sql.Tx, accountCount int) *orderedAccountsIter {
+func makeOrderedAccountsIter(tx *sql.Tx, accountCount int, resourceCount int) *orderedAccountsIter {
return &orderedAccountsIter{
- tx: tx,
- accountCount: accountCount,
- step: oaiStepStartup,
+ tx: tx,
+ accountCount: accountCount,
+ resourceCount: resourceCount,
+ step: oaiStepStartup,
}
}
-type pendingRow struct {
+type pendingBaseRow struct {
+ addr basics.Address
+ rowid int64
+ accountData *baseAccountData
+ encodedAccountData []byte
+}
+
+type pendingResourceRow struct {
addrid int64
aidx basics.CreatableIndex
buf []byte
@@ -4134,10 +4183,11 @@ type pendingRow struct {
func processAllResources(
resRows *sql.Rows,
- addr basics.Address, accountData *baseAccountData, acctRowid int64, pr pendingRow,
- callback func(addr basics.Address, creatableIdx basics.CreatableIndex, resData *resourcesData, encodedResourceData []byte) error,
-) (pendingRow, error) {
+ addr basics.Address, accountData *baseAccountData, acctRowid int64, pr pendingResourceRow, resourceCount int,
+ callback func(addr basics.Address, creatableIdx basics.CreatableIndex, resData *resourcesData, encodedResourceData []byte, lastResource bool) error,
+) (pendingResourceRow, int, error) {
var err error
+ count := 0
// Declare variabled outside of the loop to prevent allocations per iteration.
// At least resData is resolved as "escaped" because of passing it by a pointer to protocol.Decode()
@@ -4152,57 +4202,63 @@ func processAllResources(
// in this case addrid = 3 after processing resources from 1, but acctRowid = 2
// and we need to skip accounts without resources
if pr.addrid > acctRowid {
- err = callback(addr, 0, nil, nil)
- return pr, err
+ err = callback(addr, 0, nil, nil, false)
+ return pr, count, err
}
if pr.addrid < acctRowid {
err = fmt.Errorf("resource table entries mismatches accountbase table entries : reached addrid %d while expecting resource for %d", pr.addrid, acctRowid)
- return pendingRow{}, err
+ return pendingResourceRow{}, count, err
}
addrid = pr.addrid
buf = pr.buf
aidx = pr.aidx
- pr = pendingRow{}
+ pr = pendingResourceRow{}
} else {
if !resRows.Next() {
- err = callback(addr, 0, nil, nil)
+ err = callback(addr, 0, nil, nil, false)
if err != nil {
- return pendingRow{}, err
+ return pendingResourceRow{}, count, err
}
break
}
err = resRows.Scan(&addrid, &aidx, &buf)
if err != nil {
- return pendingRow{}, err
+ return pendingResourceRow{}, count, err
}
if addrid < acctRowid {
err = fmt.Errorf("resource table entries mismatches accountbase table entries : reached addrid %d while expecting resource for %d", addrid, acctRowid)
- return pendingRow{}, err
+ return pendingResourceRow{}, count, err
} else if addrid > acctRowid {
- err = callback(addr, 0, nil, nil)
- return pendingRow{addrid, aidx, buf}, err
+ err = callback(addr, 0, nil, nil, false)
+ return pendingResourceRow{addrid, aidx, buf}, count, err
}
}
resData = resourcesData{}
err = protocol.Decode(buf, &resData)
if err != nil {
- return pendingRow{}, err
+ return pendingResourceRow{}, count, err
}
- err = callback(addr, aidx, &resData, buf)
+ count++
+ if resourceCount > 0 && count == resourceCount {
+ // last resource to be included in chunk
+ err := callback(addr, aidx, &resData, buf, true)
+ return pendingResourceRow{}, count, err
+ }
+ err = callback(addr, aidx, &resData, buf, false)
if err != nil {
- return pendingRow{}, err
+ return pendingResourceRow{}, count, err
}
}
- return pendingRow{}, nil
+ return pendingResourceRow{}, count, nil
}
func processAllBaseAccountRecords(
baseRows *sql.Rows,
resRows *sql.Rows,
baseCb func(addr basics.Address, rowid int64, accountData *baseAccountData, encodedAccountData []byte) error,
- resCb func(addr basics.Address, creatableIdx basics.CreatableIndex, resData *resourcesData, encodedResourceData []byte) error,
- pending pendingRow, accountCount int,
-) (int, pendingRow, error) {
+ resCb func(addr basics.Address, creatableIdx basics.CreatableIndex, resData *resourcesData, encodedResourceData []byte, lastResource bool) error,
+ pendingBase pendingBaseRow, pendingResource pendingResourceRow, accountCount int, resourceCount int,
+) (int, pendingBaseRow, pendingResourceRow, error) {
var addr basics.Address
var prevAddr basics.Address
var err error
@@ -4212,44 +4268,70 @@ func processAllBaseAccountRecords(
var addrbuf []byte
var buf []byte
var rowid int64
- for baseRows.Next() {
- err = baseRows.Scan(&rowid, &addrbuf, &buf)
- if err != nil {
- return 0, pendingRow{}, err
- }
+ for {
+ if pendingBase.rowid != 0 {
+ addr = pendingBase.addr
+ rowid = pendingBase.rowid
+ accountData = *pendingBase.accountData
+ buf = pendingBase.encodedAccountData
+ pendingBase = pendingBaseRow{}
+ } else {
+ if !baseRows.Next() {
+ break
+ }
- if len(addrbuf) != len(addr) {
- err = fmt.Errorf("account DB address length mismatch: %d != %d", len(addrbuf), len(addr))
- return 0, pendingRow{}, err
- }
+ err = baseRows.Scan(&rowid, &addrbuf, &buf)
+ if err != nil {
+ return 0, pendingBaseRow{}, pendingResourceRow{}, err
+ }
- copy(addr[:], addrbuf)
+ if len(addrbuf) != len(addr) {
+ err = fmt.Errorf("account DB address length mismatch: %d != %d", len(addrbuf), len(addr))
+ return 0, pendingBaseRow{}, pendingResourceRow{}, err
+ }
- accountData = baseAccountData{}
- err = protocol.Decode(buf, &accountData)
- if err != nil {
- return 0, pendingRow{}, err
+ copy(addr[:], addrbuf)
+
+ accountData = baseAccountData{}
+ err = protocol.Decode(buf, &accountData)
+ if err != nil {
+ return 0, pendingBaseRow{}, pendingResourceRow{}, err
+ }
}
+
err = baseCb(addr, rowid, &accountData, buf)
if err != nil {
- return 0, pendingRow{}, err
+ return 0, pendingBaseRow{}, pendingResourceRow{}, err
}
- pending, err = processAllResources(resRows, addr, &accountData, rowid, pending, resCb)
+ var resourcesProcessed int
+ pendingResource, resourcesProcessed, err = processAllResources(resRows, addr, &accountData, rowid, pendingResource, resourceCount, resCb)
if err != nil {
err = fmt.Errorf("failed to gather resources for account %v, addrid %d, prev address %v : %w", addr, rowid, prevAddr, err)
- return 0, pendingRow{}, err
+ return 0, pendingBaseRow{}, pendingResourceRow{}, err
+ }
+
+ if resourcesProcessed == resourceCount {
+ // we're done with this iteration.
+ pendingBase := pendingBaseRow{
+ addr: addr,
+ rowid: rowid,
+ accountData: &accountData,
+ encodedAccountData: buf,
+ }
+ return count, pendingBase, pendingResource, nil
}
+ resourceCount -= resourcesProcessed
count++
if accountCount > 0 && count == accountCount {
// we're done with this iteration.
- return count, pending, nil
+ return count, pendingBaseRow{}, pendingResource, nil
}
prevAddr = addr
}
- return count, pending, nil
+ return count, pendingBaseRow{}, pendingResource, nil
}
// loadFullAccount converts baseAccountData into basics.AccountData and loads all resources as needed
@@ -4458,7 +4540,7 @@ func (iterator *orderedAccountsIter) Next(ctx context.Context) (acct []accountAd
return nil
}
- resCb := func(addr basics.Address, cidx basics.CreatableIndex, resData *resourcesData, encodedResourceData []byte) error {
+ resCb := func(addr basics.Address, cidx basics.CreatableIndex, resData *resourcesData, encodedResourceData []byte, lastResource bool) error {
var err error
if resData != nil {
var ctype basics.CreatableType
@@ -4477,10 +4559,10 @@ func (iterator *orderedAccountsIter) Next(ctx context.Context) (acct []accountAd
}
count := 0
- count, iterator.pendingRow, err = processAllBaseAccountRecords(
+ count, iterator.pendingBaseRow, iterator.pendingResourceRow, err = processAllBaseAccountRecords(
iterator.accountBaseRows, iterator.resourcesRows,
baseCb, resCb,
- iterator.pendingRow, iterator.accountCount,
+ iterator.pendingBaseRow, iterator.pendingResourceRow, iterator.accountCount, iterator.resourceCount,
)
if err != nil {
iterator.Close(ctx)
diff --git a/ledger/acctonline.go b/ledger/acctonline.go
index cb09d7c827..03af7908db 100644
--- a/ledger/acctonline.go
+++ b/ledger/acctonline.go
@@ -634,7 +634,6 @@ func (ao *onlineAccounts) lookupOnlineAccountData(rnd basics.Round, addr basics.
}
// the round number cannot be found in deltas, it is in history
inHistory = true
- err = nil
}
paramsOffset, err = ao.roundParamsOffset(rnd)
if err != nil {
@@ -764,7 +763,6 @@ func (ao *onlineAccounts) TopOnlineAccounts(rnd basics.Round, voteRnd basics.Rou
}
// the round number cannot be found in deltas, it is in history
inMemory = false
- err = nil
}
modifiedAccounts := make(map[basics.Address]*ledgercore.OnlineAccount)
diff --git a/ledger/acctupdates.go b/ledger/acctupdates.go
index 852df6df41..db2a991262 100644
--- a/ledger/acctupdates.go
+++ b/ledger/acctupdates.go
@@ -1041,14 +1041,17 @@ func (au *accountUpdates) lookupResource(rnd basics.Round, addr basics.Address,
// a separate transaction here, and directly use a prepared SQL query
// against the database.
persistedData, err = au.accountsq.lookupResources(addr, aidx, ctype)
+ if err != nil {
+ return ledgercore.AccountResource{}, basics.Round(0), err
+ }
if persistedData.round == currentDbRound {
if persistedData.addrid != 0 {
// if we read actual data return it
au.baseResources.writePending(persistedData, addr)
- return persistedData.AccountResource(), rnd, err
+ return persistedData.AccountResource(), rnd, nil
}
// otherwise return empty
- return ledgercore.AccountResource{}, rnd, err
+ return ledgercore.AccountResource{}, rnd, nil
}
if synchronized {
if persistedData.round < currentDbRound {
@@ -1140,19 +1143,22 @@ func (au *accountUpdates) lookupWithoutRewards(rnd basics.Round, addr basics.Add
// a separate transaction here, and directly use a prepared SQL query
// against the database.
persistedData, err = au.accountsq.lookup(addr)
+ if err != nil {
+ return ledgercore.AccountData{}, basics.Round(0), "", 0, err
+ }
if persistedData.round == currentDbRound {
if persistedData.rowid != 0 {
// if we read actual data return it
au.baseAccounts.writePending(persistedData)
- return persistedData.accountData.GetLedgerCoreAccountData(), rnd, rewardsVersion, rewardsLevel, err
+ return persistedData.accountData.GetLedgerCoreAccountData(), rnd, rewardsVersion, rewardsLevel, nil
}
// otherwise return empty
- return ledgercore.AccountData{}, rnd, rewardsVersion, rewardsLevel, err
+ return ledgercore.AccountData{}, rnd, rewardsVersion, rewardsLevel, nil
}
if synchronized {
if persistedData.round < currentDbRound {
au.log.Errorf("accountUpdates.lookupWithoutRewards: database round %d is behind in-memory round %d", persistedData.round, currentDbRound)
- return ledgercore.AccountData{}, basics.Round(0), rewardsVersion, rewardsLevel, &StaleDatabaseRoundError{databaseRound: persistedData.round, memoryRound: currentDbRound}
+ return ledgercore.AccountData{}, basics.Round(0), "", 0, &StaleDatabaseRoundError{databaseRound: persistedData.round, memoryRound: currentDbRound}
}
au.accountsMu.RLock()
needUnlock = true
@@ -1162,7 +1168,7 @@ func (au *accountUpdates) lookupWithoutRewards(rnd basics.Round, addr basics.Add
} else {
// in non-sync mode, we don't wait since we already assume that we're synchronized.
au.log.Errorf("accountUpdates.lookupWithoutRewards: database round %d mismatching in-memory round %d", persistedData.round, currentDbRound)
- return ledgercore.AccountData{}, basics.Round(0), rewardsVersion, rewardsLevel, &MismatchingDatabaseRoundError{databaseRound: persistedData.round, memoryRound: currentDbRound}
+ return ledgercore.AccountData{}, basics.Round(0), "", 0, &MismatchingDatabaseRoundError{databaseRound: persistedData.round, memoryRound: currentDbRound}
}
}
}
@@ -1219,9 +1225,11 @@ func (au *accountUpdates) getCreatorForRound(rnd basics.Round, cidx basics.Creat
}
// Check the database
creator, ok, dbRound, err = au.accountsq.lookupCreator(cidx, ctype)
-
+ if err != nil {
+ return basics.Address{}, false, err
+ }
if dbRound == currentDbRound {
- return
+ return creator, ok, nil
}
if synchronized {
if dbRound < currentDbRound {
diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go
index 740ac91d03..edd44549ed 100644
--- a/ledger/acctupdates_test.go
+++ b/ledger/acctupdates_test.go
@@ -22,7 +22,6 @@ import (
"database/sql"
"errors"
"fmt"
- "io/ioutil"
"os"
"runtime"
"strings"
@@ -121,12 +120,14 @@ func (ml *mockLedgerForTracker) fork(t testing.TB) *mockLedgerForTracker {
dblogger := logging.TestingLog(t)
dblogger.SetLevel(logging.Info)
newLedgerTracker := &mockLedgerForTracker{
- inMemory: false,
- log: dblogger,
- blocks: make([]blockEntry, len(ml.blocks)),
- deltas: make([]ledgercore.StateDelta, len(ml.deltas)),
- accts: make(map[basics.Address]basics.AccountData),
- filename: fn,
+ inMemory: false,
+ log: dblogger,
+ blocks: make([]blockEntry, len(ml.blocks)),
+ deltas: make([]ledgercore.StateDelta, len(ml.deltas)),
+ accts: make(map[basics.Address]basics.AccountData),
+ filename: fn,
+ consensusParams: ml.consensusParams,
+ consensusVersion: ml.consensusVersion,
}
for k, v := range ml.accts {
newLedgerTracker.accts[k] = v
@@ -138,9 +139,9 @@ func (ml *mockLedgerForTracker) fork(t testing.TB) *mockLedgerForTracker {
ml.dbs.Wdb.Vacuum(context.Background())
// copy the database files.
for _, ext := range []string{"", "-shm", "-wal"} {
- bytes, err := ioutil.ReadFile(ml.filename + ext)
+ bytes, err := os.ReadFile(ml.filename + ext)
require.NoError(t, err)
- err = ioutil.WriteFile(newLedgerTracker.filename+ext, bytes, 0600)
+ err = os.WriteFile(newLedgerTracker.filename+ext, bytes, 0600)
require.NoError(t, err)
}
dbs, err := db.OpenPair(newLedgerTracker.filename, false)
@@ -290,7 +291,7 @@ func checkAcctUpdates(t *testing.T, au *accountUpdates, ao *onlineAccounts, base
require.Error(t, err)
require.Equal(t, basics.Round(0), validThrough)
- if base > 0 {
+ if base > 0 && base >= basics.Round(ao.maxBalLookback()) {
_, err := ao.onlineTotals(base - basics.Round(ao.maxBalLookback()))
require.Error(t, err)
diff --git a/ledger/archival_test.go b/ledger/archival_test.go
index cb6dc4ae51..6e61ee0abf 100644
--- a/ledger/archival_test.go
+++ b/ledger/archival_test.go
@@ -143,6 +143,7 @@ func TestArchival(t *testing.T) {
cfg := config.GetDefaultLocal()
cfg.Archival = true
log := logging.TestingLog(t)
+ log.SetLevel(logging.Info)
l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg)
require.NoError(t, err)
defer l.Close()
@@ -720,6 +721,7 @@ func TestArchivalFromNonArchival(t *testing.T) {
cfg.Archival = false
log := logging.TestingLog(t)
+ log.SetLevel(logging.Info)
l, err := OpenLedger(log, dbPrefix, inMem, genesisInitState, cfg)
require.NoError(t, err)
blk := genesisInitState.Block
diff --git a/ledger/catchpointtracker.go b/ledger/catchpointtracker.go
index 0cfc9089da..5cfe0f3c4f 100644
--- a/ledger/catchpointtracker.go
+++ b/ledger/catchpointtracker.go
@@ -1057,7 +1057,7 @@ func (ct *catchpointTracker) generateCatchpointData(ctx context.Context, account
start := time.Now()
ledgerGeneratecatchpointCount.Inc(nil)
err := ct.dbs.Rdb.Atomic(func(dbCtx context.Context, tx *sql.Tx) (err error) {
- catchpointWriter, err = makeCatchpointWriter(ctx, catchpointDataFilePath, tx)
+ catchpointWriter, err = makeCatchpointWriter(ctx, catchpointDataFilePath, tx, DefaultMaxResourcesPerChunk)
if err != nil {
return
}
@@ -1459,7 +1459,7 @@ func (ct *catchpointTracker) accountsInitializeHashes(ctx context.Context, tx *s
if rootHash.IsZero() {
ct.log.Infof("accountsInitialize rebuilding merkle trie for round %d", rnd)
- accountBuilderIt := makeOrderedAccountsIter(tx, trieRebuildAccountChunkSize)
+ accountBuilderIt := makeOrderedAccountsIter(tx, trieRebuildAccountChunkSize, DefaultMaxResourcesPerChunk)
defer accountBuilderIt.Close(ctx)
startTrieBuildTime := time.Now()
trieHashCount := 0
diff --git a/ledger/catchpointtracker_test.go b/ledger/catchpointtracker_test.go
index 2d7bebcd5e..a12e6fa9ad 100644
--- a/ledger/catchpointtracker_test.go
+++ b/ledger/catchpointtracker_test.go
@@ -21,7 +21,6 @@ import (
"database/sql"
"errors"
"fmt"
- "io/ioutil"
"os"
"path"
"path/filepath"
@@ -102,7 +101,7 @@ func TestGetCatchpointStream(t *testing.T) {
for i := 0; i < filesToCreate; i++ {
fileName := filepath.Join(CatchpointDirName, fmt.Sprintf("%d.catchpoint", i))
data := []byte{byte(i), byte(i + 1), byte(i + 2)}
- err = ioutil.WriteFile(filepath.Join(temporaryDirectory, fileName), data, 0666)
+ err = os.WriteFile(filepath.Join(temporaryDirectory, fileName), data, 0666)
require.NoError(t, err)
// Store the catchpoint into the database
diff --git a/ledger/catchpointwriter.go b/ledger/catchpointwriter.go
index 7b7f07d2f7..6f1e11dfe5 100644
--- a/ledger/catchpointwriter.go
+++ b/ledger/catchpointwriter.go
@@ -35,6 +35,10 @@ const (
// BalancesPerCatchpointFileChunk defines the number of accounts that would be stored in each chunk in the catchpoint file.
// note that the last chunk would typically be less than this number.
BalancesPerCatchpointFileChunk = 512
+
+ // DefaultMaxResourcesPerChunk defines the max number of resources that go in a singular chunk
+ // 300000 resources * 300B/resource => roughly max 100MB per chunk
+ DefaultMaxResourcesPerChunk = 300000
)
// catchpointWriter is the struct managing the persistence of accounts data into the catchpoint file.
@@ -42,19 +46,21 @@ const (
// the writing is complete. It might take multiple steps until the operation is over, and the caller
// has the option of throttling the CPU utilization in between the calls.
type catchpointWriter struct {
- ctx context.Context
- tx *sql.Tx
- filePath string
- totalAccounts uint64
- totalChunks uint64
- file *os.File
- tar *tar.Writer
- compressor io.WriteCloser
- balancesChunk catchpointFileBalancesChunkV6
- balancesChunkNum uint64
- writtenBytes int64
- biggestChunkLen uint64
- accountsIterator encodedAccountsBatchIter
+ ctx context.Context
+ tx *sql.Tx
+ filePath string
+ totalAccounts uint64
+ totalChunks uint64
+ file *os.File
+ tar *tar.Writer
+ compressor io.WriteCloser
+ balancesChunk catchpointFileBalancesChunkV6
+ balancesChunkNum uint64
+ numAccountsProcessed uint64
+ writtenBytes int64
+ biggestChunkLen uint64
+ accountsIterator encodedAccountsBatchIter
+ maxResourcesPerChunk int
}
type encodedBalanceRecordV5 struct {
@@ -79,14 +85,18 @@ type encodedBalanceRecordV6 struct {
Address basics.Address `codec:"a,allocbound=crypto.DigestSize"`
AccountData msgp.Raw `codec:"b,allocbound=basics.MaxEncodedAccountDataSize"`
Resources map[uint64]msgp.Raw `codec:"c,allocbound=basics.MaxEncodedAccountDataSize"`
+
+ // flag indicating whether there are more records for the same account coming up
+ ExpectingMoreEntries bool `codec:"e"`
}
type catchpointFileBalancesChunkV6 struct {
- _struct struct{} `codec:",omitempty,omitemptyarray"`
- Balances []encodedBalanceRecordV6 `codec:"bl,allocbound=BalancesPerCatchpointFileChunk"`
+ _struct struct{} `codec:",omitempty,omitemptyarray"`
+ Balances []encodedBalanceRecordV6 `codec:"bl,allocbound=BalancesPerCatchpointFileChunk"`
+ numAccounts uint64
}
-func makeCatchpointWriter(ctx context.Context, filePath string, tx *sql.Tx) (*catchpointWriter, error) {
+func makeCatchpointWriter(ctx context.Context, filePath string, tx *sql.Tx, maxResourcesPerChunk int) (*catchpointWriter, error) {
totalAccounts, err := totalAccounts(ctx, tx)
if err != nil {
return nil, err
@@ -107,14 +117,15 @@ func makeCatchpointWriter(ctx context.Context, filePath string, tx *sql.Tx) (*ca
tar := tar.NewWriter(compressor)
res := &catchpointWriter{
- ctx: ctx,
- tx: tx,
- filePath: filePath,
- totalAccounts: totalAccounts,
- totalChunks: (totalAccounts + BalancesPerCatchpointFileChunk - 1) / BalancesPerCatchpointFileChunk,
- file: file,
- compressor: compressor,
- tar: tar,
+ ctx: ctx,
+ tx: tx,
+ filePath: filePath,
+ totalAccounts: totalAccounts,
+ totalChunks: (totalAccounts + BalancesPerCatchpointFileChunk - 1) / BalancesPerCatchpointFileChunk,
+ file: file,
+ compressor: compressor,
+ tar: tar,
+ maxResourcesPerChunk: maxResourcesPerChunk,
}
return res, nil
}
@@ -135,7 +146,7 @@ func (cw *catchpointWriter) WriteStep(stepCtx context.Context) (more bool, err e
writerRequest := make(chan catchpointFileBalancesChunkV6, 1)
writerResponse := make(chan error, 2)
- go cw.asyncWriter(writerRequest, writerResponse, cw.balancesChunkNum)
+ go cw.asyncWriter(writerRequest, writerResponse, cw.balancesChunkNum, cw.numAccountsProcessed)
defer func() {
close(writerRequest)
// wait for the writerResponse to close.
@@ -180,9 +191,10 @@ func (cw *catchpointWriter) WriteStep(stepCtx context.Context) (more bool, err e
// write to disk.
if len(cw.balancesChunk.Balances) > 0 {
+ cw.numAccountsProcessed += cw.balancesChunk.numAccounts
cw.balancesChunkNum++
writerRequest <- cw.balancesChunk
- if len(cw.balancesChunk.Balances) < BalancesPerCatchpointFileChunk || cw.balancesChunkNum == cw.totalChunks {
+ if cw.numAccountsProcessed == cw.totalAccounts {
cw.accountsIterator.Close()
// if we're done, wait for the writer to complete it's writing.
err, opened := <-writerResponse
@@ -199,11 +211,13 @@ func (cw *catchpointWriter) WriteStep(stepCtx context.Context) (more bool, err e
}
}
-func (cw *catchpointWriter) asyncWriter(balances chan catchpointFileBalancesChunkV6, response chan error, initialBalancesChunkNum uint64) {
+func (cw *catchpointWriter) asyncWriter(balances chan catchpointFileBalancesChunkV6, response chan error, initialBalancesChunkNum uint64, initialNumAccounts uint64) {
defer close(response)
balancesChunkNum := initialBalancesChunkNum
+ numAccountsProcessed := initialNumAccounts
for bc := range balances {
balancesChunkNum++
+ numAccountsProcessed += bc.numAccounts
if len(bc.Balances) == 0 {
break
}
@@ -226,8 +240,7 @@ func (cw *catchpointWriter) asyncWriter(balances chan catchpointFileBalancesChun
if chunkLen := uint64(len(encodedChunk)); cw.biggestChunkLen < chunkLen {
cw.biggestChunkLen = chunkLen
}
-
- if len(bc.Balances) < BalancesPerCatchpointFileChunk || balancesChunkNum == cw.totalChunks {
+ if numAccountsProcessed == cw.totalAccounts {
cw.tar.Close()
cw.compressor.Close()
cw.file.Close()
@@ -244,7 +257,7 @@ func (cw *catchpointWriter) asyncWriter(balances chan catchpointFileBalancesChun
}
func (cw *catchpointWriter) readDatabaseStep(ctx context.Context, tx *sql.Tx) (err error) {
- cw.balancesChunk.Balances, err = cw.accountsIterator.Next(ctx, tx, BalancesPerCatchpointFileChunk)
+ cw.balancesChunk.Balances, cw.balancesChunk.numAccounts, err = cw.accountsIterator.Next(ctx, tx, BalancesPerCatchpointFileChunk, cw.maxResourcesPerChunk)
return
}
diff --git a/ledger/catchpointwriter_test.go b/ledger/catchpointwriter_test.go
index fa1819d972..c4fa8cf847 100644
--- a/ledger/catchpointwriter_test.go
+++ b/ledger/catchpointwriter_test.go
@@ -24,7 +24,7 @@ import (
"database/sql"
"fmt"
"io"
- "io/ioutil"
+ "os"
"path/filepath"
"runtime"
"testing"
@@ -199,7 +199,7 @@ func TestBasicCatchpointWriter(t *testing.T) {
protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
protoParams.CatchpointLookback = 32
config.Consensus[testProtocolVersion] = protoParams
- temporaryDirectroy := t.TempDir()
+ temporaryDirectory := t.TempDir()
defer func() {
delete(config.Consensus, testProtocolVersion)
}()
@@ -215,11 +215,11 @@ func TestBasicCatchpointWriter(t *testing.T) {
err := au.loadFromDisk(ml, 0)
require.NoError(t, err)
au.close()
- fileName := filepath.Join(temporaryDirectroy, "15.data")
+ fileName := filepath.Join(temporaryDirectory, "15.data")
readDb := ml.trackerDB().Rdb
err = readDb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- writer, err := makeCatchpointWriter(context.Background(), fileName, tx)
+ writer, err := makeCatchpointWriter(context.Background(), fileName, tx, DefaultMaxResourcesPerChunk)
if err != nil {
return err
}
@@ -235,7 +235,7 @@ func TestBasicCatchpointWriter(t *testing.T) {
require.NoError(t, err)
// load the file from disk.
- fileContent, err := ioutil.ReadFile(fileName)
+ fileContent, err := os.ReadFile(fileName)
require.NoError(t, err)
compressorReader, err := catchpointStage1Decoder(bytes.NewBuffer(fileContent))
require.NoError(t, err)
@@ -306,7 +306,312 @@ func TestFullCatchpointWriter(t *testing.T) {
var accountsRnd basics.Round
var totals ledgercore.AccountTotals
err = readDb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
- writer, err := makeCatchpointWriter(context.Background(), catchpointDataFilePath, tx)
+ writer, err := makeCatchpointWriter(context.Background(), catchpointDataFilePath, tx, DefaultMaxResourcesPerChunk)
+ if err != nil {
+ return err
+ }
+ for {
+ more, err := writer.WriteStep(context.Background())
+ require.NoError(t, err)
+ if !more {
+ break
+ }
+ }
+ totalAccounts = writer.GetTotalAccounts()
+ totalChunks = writer.GetTotalChunks()
+ biggestChunkLen = writer.GetBiggestChunkLen()
+ accountsRnd, err = accountsRound(tx)
+ if err != nil {
+ return
+ }
+ totals, err = accountsTotals(ctx, tx, false)
+ return
+ })
+ require.NoError(t, err)
+ blocksRound := accountsRnd + 1
+ blockHeaderDigest := crypto.Hash([]byte{1, 2, 3})
+ catchpointLabel := fmt.Sprintf("%d#%v", blocksRound, blockHeaderDigest) // this is not a correct way to create a label, but it's good enough for this unit test
+ catchpointFileHeader := CatchpointFileHeader{
+ Version: CatchpointFileVersionV6,
+ BalancesRound: accountsRnd,
+ BlocksRound: blocksRound,
+ Totals: totals,
+ TotalAccounts: totalAccounts,
+ TotalChunks: totalChunks,
+ Catchpoint: catchpointLabel,
+ BlockHeaderDigest: blockHeaderDigest,
+ }
+ err = repackCatchpoint(
+ context.Background(), catchpointFileHeader, biggestChunkLen,
+ catchpointDataFilePath, catchpointFilePath)
+ require.NoError(t, err)
+
+ // create a ledger.
+ var initState ledgercore.InitState
+ initState.Block.CurrentProtocol = protocol.ConsensusCurrentVersion
+ l, err := OpenLedger(ml.log, "TestFullCatchpointWriter", true, initState, conf)
+ require.NoError(t, err)
+ defer l.Close()
+ accessor := MakeCatchpointCatchupAccessor(l, l.log)
+
+ err = accessor.ResetStagingBalances(context.Background(), true)
+ require.NoError(t, err)
+
+ // load the file from disk.
+ fileContent, err := os.ReadFile(catchpointFilePath)
+ require.NoError(t, err)
+ gzipReader, err := gzip.NewReader(bytes.NewBuffer(fileContent))
+ require.NoError(t, err)
+ tarReader := tar.NewReader(gzipReader)
+ var catchupProgress CatchpointCatchupAccessorProgress
+ defer gzipReader.Close()
+ for {
+ header, err := tarReader.Next()
+ if err != nil {
+ if err == io.EOF {
+ break
+ }
+ require.NoError(t, err)
+ break
+ }
+ balancesBlockBytes := make([]byte, header.Size)
+ readComplete := int64(0)
+
+ for readComplete < header.Size {
+ bytesRead, err := tarReader.Read(balancesBlockBytes[readComplete:])
+ readComplete += int64(bytesRead)
+ if err != nil {
+ if err == io.EOF {
+ if readComplete == header.Size {
+ break
+ }
+ require.NoError(t, err)
+ }
+ break
+ }
+ }
+ err = accessor.ProgressStagingBalances(context.Background(), header.Name, balancesBlockBytes, &catchupProgress)
+ require.NoError(t, err)
+ }
+
+ err = l.trackerDBs.Wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ err := applyCatchpointStagingBalances(ctx, tx, 0, 0)
+ return err
+ })
+ require.NoError(t, err)
+
+ // verify that the account data aligns with what we originally stored :
+ for addr, acct := range accts {
+ acctData, validThrough, _, err := l.LookupLatest(addr)
+ require.NoErrorf(t, err, "failed to lookup for account %v after restoring from catchpoint", addr)
+ require.Equal(t, acct, acctData)
+ require.Equal(t, basics.Round(0), validThrough)
+ }
+}
+
+func TestCatchpointReadDatabaseOverflowSingleAccount(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ // create new protocol version, which has lower lookback
+ testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestFullCatchpointWriter")
+ protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
+ protoParams.CatchpointLookback = 32
+ config.Consensus[testProtocolVersion] = protoParams
+ temporaryDirectory := t.TempDir()
+ defer func() {
+ delete(config.Consensus, testProtocolVersion)
+ }()
+
+ maxResourcesPerChunk := 5
+
+ accts := ledgertesting.RandomAccounts(1, false)
+ // force acct to have overflowing number of resources
+ for addr, acct := range accts {
+ if acct.AssetParams == nil {
+ acct.AssetParams = make(map[basics.AssetIndex]basics.AssetParams, 0)
+ accts[addr] = acct
+ }
+ for i := uint64(0); i < 20; i++ {
+ ap := ledgertesting.RandomAssetParams()
+ acct.AssetParams[basics.AssetIndex(i+100)] = ap
+ }
+ }
+
+ ml := makeMockLedgerForTracker(t, true, 10, testProtocolVersion, []map[basics.Address]basics.AccountData{accts})
+ defer ml.Close()
+
+ conf := config.GetDefaultLocal()
+ conf.CatchpointInterval = 1
+ conf.Archival = true
+ au, _ := newAcctUpdates(t, ml, conf)
+ err := au.loadFromDisk(ml, 0)
+ require.NoError(t, err)
+ au.close()
+ catchpointDataFilePath := filepath.Join(temporaryDirectory, "15.data")
+ readDb := ml.trackerDB().Rdb
+
+ err = readDb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ expectedTotalAccounts := uint64(1)
+ totalAccountsWritten := uint64(0)
+ totalResources := 0
+ totalChunks := 0
+ var expectedTotalResources int
+ cw, err := makeCatchpointWriter(context.Background(), catchpointDataFilePath, tx, maxResourcesPerChunk)
+ err = cw.tx.QueryRowContext(cw.ctx, "SELECT count(1) FROM resources").Scan(&expectedTotalResources)
+ if err != nil {
+ return err
+ }
+ // repeat this until read all accts
+ for totalAccountsWritten < expectedTotalAccounts {
+ cw.balancesChunk.Balances = nil
+ err := cw.readDatabaseStep(cw.ctx, cw.tx)
+ if err != nil {
+ return err
+ }
+ totalAccountsWritten += cw.balancesChunk.numAccounts
+ numResources := 0
+ for _, balance := range cw.balancesChunk.Balances {
+ numResources += len(balance.Resources)
+ }
+ if numResources > maxResourcesPerChunk {
+ return fmt.Errorf("too many resources in this chunk: found %d resources, maximum %d resources", numResources, maxResourcesPerChunk)
+ }
+ totalResources += numResources
+ totalChunks++
+ }
+
+ if totalChunks <= 1 {
+ return fmt.Errorf("expected more than one chunk due to overflow")
+ }
+
+ if expectedTotalResources != totalResources {
+ return fmt.Errorf("total resources did not match: expected %d, actual %d", expectedTotalResources, totalResources)
+ }
+
+ return
+ })
+
+ require.NoError(t, err)
+}
+
+func TestCatchpointReadDatabaseOverflowAccounts(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ // create new protocol version, which has lower lookback
+ testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestFullCatchpointWriter")
+ protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
+ protoParams.CatchpointLookback = 32
+ config.Consensus[testProtocolVersion] = protoParams
+ temporaryDirectory := t.TempDir()
+ defer func() {
+ delete(config.Consensus, testProtocolVersion)
+ }()
+
+ maxResourcesPerChunk := 5
+
+ accts := ledgertesting.RandomAccounts(5, false)
+ // force each acct to have overflowing number of resources
+ assetIndex := 1000
+ for addr, acct := range accts {
+ if acct.AssetParams == nil {
+ acct.AssetParams = make(map[basics.AssetIndex]basics.AssetParams, 0)
+ accts[addr] = acct
+ }
+ for i := uint64(0); i < 20; i++ {
+ ap := ledgertesting.RandomAssetParams()
+ acct.AssetParams[basics.AssetIndex(assetIndex)] = ap
+ assetIndex++
+ }
+ }
+
+ ml := makeMockLedgerForTracker(t, true, 10, testProtocolVersion, []map[basics.Address]basics.AccountData{accts})
+ defer ml.Close()
+
+ conf := config.GetDefaultLocal()
+ conf.CatchpointInterval = 1
+ conf.Archival = true
+ au, _ := newAcctUpdates(t, ml, conf)
+ err := au.loadFromDisk(ml, 0)
+ require.NoError(t, err)
+ au.close()
+ catchpointDataFilePath := filepath.Join(temporaryDirectory, "15.data")
+ readDb := ml.trackerDB().Rdb
+
+ err = readDb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ expectedTotalAccounts, err := totalAccounts(ctx, tx)
+ if err != nil {
+ return err
+ }
+ totalAccountsWritten := uint64(0)
+ totalResources := 0
+ var expectedTotalResources int
+ cw, err := makeCatchpointWriter(context.Background(), catchpointDataFilePath, tx, maxResourcesPerChunk)
+ err = cw.tx.QueryRowContext(cw.ctx, "SELECT count(1) FROM resources").Scan(&expectedTotalResources)
+ if err != nil {
+ return err
+ }
+ // repeat this until read all accts
+ for totalAccountsWritten < expectedTotalAccounts {
+ cw.balancesChunk.Balances = nil
+ err := cw.readDatabaseStep(cw.ctx, cw.tx)
+ if err != nil {
+ return err
+ }
+ totalAccountsWritten += cw.balancesChunk.numAccounts
+ numResources := 0
+ for _, balance := range cw.balancesChunk.Balances {
+ numResources += len(balance.Resources)
+ }
+ if numResources > maxResourcesPerChunk {
+ return fmt.Errorf("too many resources in this chunk: found %d resources, maximum %d resources", numResources, maxResourcesPerChunk)
+ }
+ totalResources += numResources
+ }
+
+ if expectedTotalResources != totalResources {
+ return fmt.Errorf("total resources did not match: expected %d, actual %d", expectedTotalResources, totalResources)
+ }
+
+ return
+ })
+
+ require.NoError(t, err)
+}
+
+func TestFullCatchpointWriterOverflowAccounts(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ // create new protocol version, which has lower lookback
+ testProtocolVersion := protocol.ConsensusVersion("test-protocol-TestFullCatchpointWriter")
+ protoParams := config.Consensus[protocol.ConsensusCurrentVersion]
+ protoParams.CatchpointLookback = 32
+ config.Consensus[testProtocolVersion] = protoParams
+ temporaryDirectory := t.TempDir()
+ defer func() {
+ delete(config.Consensus, testProtocolVersion)
+ }()
+
+ accts := ledgertesting.RandomAccounts(BalancesPerCatchpointFileChunk*3, false)
+ ml := makeMockLedgerForTracker(t, true, 10, testProtocolVersion, []map[basics.Address]basics.AccountData{accts})
+ defer ml.Close()
+
+ conf := config.GetDefaultLocal()
+ conf.CatchpointInterval = 1
+ conf.Archival = true
+ au, _ := newAcctUpdates(t, ml, conf)
+ err := au.loadFromDisk(ml, 0)
+ require.NoError(t, err)
+ au.close()
+ catchpointDataFilePath := filepath.Join(temporaryDirectory, "15.data")
+ catchpointFilePath := filepath.Join(temporaryDirectory, "15.catchpoint")
+ readDb := ml.trackerDB().Rdb
+ var totalAccounts uint64
+ var totalChunks uint64
+ var biggestChunkLen uint64
+ var accountsRnd basics.Round
+ var totals ledgercore.AccountTotals
+ err = readDb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
+ writer, err := makeCatchpointWriter(context.Background(), catchpointDataFilePath, tx, 5)
if err != nil {
return err
}
@@ -358,7 +663,7 @@ func TestFullCatchpointWriter(t *testing.T) {
require.NoError(t, err)
// load the file from disk.
- fileContent, err := ioutil.ReadFile(catchpointFilePath)
+ fileContent, err := os.ReadFile(catchpointFilePath)
require.NoError(t, err)
gzipReader, err := gzip.NewReader(bytes.NewBuffer(fileContent))
require.NoError(t, err)
diff --git a/ledger/catchupaccessor.go b/ledger/catchupaccessor.go
index ec05c86af8..3c2f6aceee 100644
--- a/ledger/catchupaccessor.go
+++ b/ledger/catchupaccessor.go
@@ -86,6 +86,9 @@ type CatchpointCatchupAccessor interface {
// CompleteCatchup completes the catchpoint catchup process by switching the databases tables around
// and reloading the ledger.
CompleteCatchup(ctx context.Context) (err error)
+
+ // Ledger returns a narrow subset of Ledger methods needed by CatchpointCatchupAccessor clients
+ Ledger() (l CatchupAccessorClientLedger)
}
// CatchpointCatchupAccessorImpl is the concrete implementation of the CatchpointCatchupAccessor interface
@@ -94,6 +97,13 @@ type CatchpointCatchupAccessorImpl struct {
// log copied from ledger
log logging.Logger
+
+ acctResCnt catchpointAccountResourceCounter
+
+ // expecting next account to be a specific account
+ expectingSpecificAccount bool
+ // next expected balance account, empty address if not expecting specific account
+ nextExpectedAccount basics.Address
}
// CatchpointCatchupState is the state of the current catchpoint catchup process
@@ -104,8 +114,8 @@ const (
CatchpointCatchupStateInactive = iota
// CatchpointCatchupStateLedgerDownload indicates that we're downloading the ledger
CatchpointCatchupStateLedgerDownload
- // CatchpointCatchupStateLastestBlockDownload indicates that we're download the latest block
- CatchpointCatchupStateLastestBlockDownload
+ // CatchpointCatchupStateLatestBlockDownload indicates that we're download the latest block
+ CatchpointCatchupStateLatestBlockDownload
// CatchpointCatchupStateBlocksDownload indicates that we're downloading the blocks prior to the latest one ( total of CatchpointLookback blocks )
CatchpointCatchupStateBlocksDownload
// CatchpointCatchupStateSwitch indicates that we're switching to use the downloaded ledger/blocks content
@@ -115,6 +125,14 @@ const (
catchpointCatchupStateLast = CatchpointCatchupStateSwitch
)
+// CatchupAccessorClientLedger represents ledger interface needed for catchpoint accessor clients
+type CatchupAccessorClientLedger interface {
+ Block(rnd basics.Round) (blk bookkeeping.Block, err error)
+ GenesisHash() crypto.Digest
+ BlockHdr(rnd basics.Round) (blk bookkeeping.BlockHeader, err error)
+ Latest() (rnd basics.Round)
+}
+
// MakeCatchpointCatchupAccessor creates a CatchpointCatchupAccessor given a ledger
func MakeCatchpointCatchupAccessor(ledger *Ledger, log logging.Logger) CatchpointCatchupAccessor {
return &CatchpointCatchupAccessorImpl{
@@ -299,6 +317,7 @@ func (c *CatchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte
ledgerProcessstagingbalancesCount.Inc(nil)
var normalizedAccountBalances []normalizedAccountBalance
+ var expectingMoreEntries []bool
switch progress.Version {
default:
@@ -317,6 +336,7 @@ func (c *CatchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte
}
normalizedAccountBalances, err = prepareNormalizedBalancesV5(balances.Balances, c.ledger.GenesisProto())
+ expectingMoreEntries = make([]bool, len(balances.Balances))
case CatchpointFileVersionV6:
var balances catchpointFileBalancesChunkV6
@@ -330,12 +350,83 @@ func (c *CatchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte
}
normalizedAccountBalances, err = prepareNormalizedBalancesV6(balances.Balances, c.ledger.GenesisProto())
+ expectingMoreEntries = make([]bool, len(balances.Balances))
+ for i, balance := range balances.Balances {
+ expectingMoreEntries[i] = balance.ExpectingMoreEntries
+ }
}
if err != nil {
return fmt.Errorf("processStagingBalances failed to prepare normalized balances : %w", err)
}
+ expectingSpecificAccount := c.expectingSpecificAccount
+ nextExpectedAccount := c.nextExpectedAccount
+
+ // keep track of number of resources processed for each account
+ for i, balance := range normalizedAccountBalances {
+ // missing resources for this account
+ if expectingSpecificAccount && balance.address != nextExpectedAccount {
+ return fmt.Errorf("processStagingBalances received incomplete chunks for account %v", nextExpectedAccount)
+ }
+
+ for _, resData := range balance.resources {
+ if resData.IsApp() && resData.IsOwning() {
+ c.acctResCnt.totalAppParams++
+ }
+ if resData.IsApp() && resData.IsHolding() {
+ c.acctResCnt.totalAppLocalStates++
+ }
+ if resData.IsAsset() && resData.IsOwning() {
+ c.acctResCnt.totalAssetParams++
+ }
+ if resData.IsAsset() && resData.IsHolding() {
+ c.acctResCnt.totalAssets++
+ }
+ }
+ // check that counted resources adds up for this account
+ if !expectingMoreEntries[i] {
+ if c.acctResCnt.totalAppParams != balance.accountData.TotalAppParams {
+ return fmt.Errorf(
+ "processStagingBalances received %d appParams for account %v, expected %d",
+ c.acctResCnt.totalAppParams,
+ balance.address,
+ balance.accountData.TotalAppParams,
+ )
+ }
+ if c.acctResCnt.totalAppLocalStates != balance.accountData.TotalAppLocalStates {
+ return fmt.Errorf(
+ "processStagingBalances received %d appLocalStates for account %v, expected %d",
+ c.acctResCnt.totalAppParams,
+ balance.address,
+ balance.accountData.TotalAppLocalStates,
+ )
+ }
+ if c.acctResCnt.totalAssetParams != balance.accountData.TotalAssetParams {
+ return fmt.Errorf(
+ "processStagingBalances received %d assetParams for account %v, expected %d",
+ c.acctResCnt.totalAppParams,
+ balance.address,
+ balance.accountData.TotalAssetParams,
+ )
+ }
+ if c.acctResCnt.totalAssets != balance.accountData.TotalAssets {
+ return fmt.Errorf(
+ "processStagingBalances received %d assets for account %v, expected %d",
+ c.acctResCnt.totalAppParams,
+ balance.address,
+ balance.accountData.TotalAssets,
+ )
+ }
+ c.acctResCnt = catchpointAccountResourceCounter{}
+ nextExpectedAccount = basics.Address{}
+ expectingSpecificAccount = false
+ } else {
+ nextExpectedAccount = balance.address
+ expectingSpecificAccount = true
+ }
+ }
+
wg := sync.WaitGroup{}
var errBalances error
@@ -349,9 +440,9 @@ func (c *CatchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte
wg.Add(1)
go func() {
defer wg.Done()
- errBalances = wdb.Atomic(func(ctx context.Context, tx *sql.Tx) error {
+ errBalances = wdb.Atomic(func(ctx context.Context, tx *sql.Tx) (err error) {
start := time.Now()
- err := writeCatchpointStagingBalances(ctx, tx, normalizedAccountBalances)
+ err = writeCatchpointStagingBalances(ctx, tx, normalizedAccountBalances)
durBalances = time.Since(start)
return err
})
@@ -431,6 +522,9 @@ func (c *CatchpointCatchupAccessorImpl) processStagingBalances(ctx context.Conte
// restore "normal" synchronous mode
c.ledger.setSynchronousMode(ctx, c.ledger.synchronousMode)
}
+
+ c.expectingSpecificAccount = expectingSpecificAccount
+ c.nextExpectedAccount = nextExpectedAccount
return err
}
@@ -891,6 +985,11 @@ func (c *CatchpointCatchupAccessorImpl) finishBalances(ctx context.Context) (err
return err
}
+// Ledger returns ledger instance as CatchupAccessorClientLedger interface
+func (c *CatchpointCatchupAccessorImpl) Ledger() (l CatchupAccessorClientLedger) {
+ return c.ledger
+}
+
var ledgerResetstagingbalancesCount = metrics.NewCounter("ledger_catchup_resetstagingbalances_count", "calls")
var ledgerResetstagingbalancesMicros = metrics.NewCounter("ledger_catchup_resetstagingbalances_micros", "µs spent")
var ledgerProcessstagingcontentCount = metrics.NewCounter("ledger_catchup_processstagingcontent_count", "calls")
diff --git a/ledger/catchupaccessor_test.go b/ledger/catchupaccessor_test.go
index 88738ee669..50a8d9b571 100644
--- a/ledger/catchupaccessor_test.go
+++ b/ledger/catchupaccessor_test.go
@@ -59,7 +59,10 @@ func createTestingEncodedChunks(accountsCount uint64) (encodedAccountChunks [][]
accountData := baseAccountData{}
accountData.MicroAlgos.Raw = crypto.RandUint63()
randomAccount.AccountData = protocol.Encode(&accountData)
- crypto.RandBytes(randomAccount.Address[:])
+ // have the first account be the zero address
+ if i > 0 {
+ crypto.RandBytes(randomAccount.Address[:])
+ }
binary.LittleEndian.PutUint64(randomAccount.Address[:], accounts+i)
balances.Balances[i] = randomAccount
}
@@ -163,7 +166,7 @@ func TestCatchupAccessorFoo(t *testing.T) {
require.NoError(t, err, "catchpointAccessor.SetState")
err = catchpointAccessor.SetState(context.Background(), CatchpointCatchupStateLedgerDownload)
require.NoError(t, err, "catchpointAccessor.SetState")
- err = catchpointAccessor.SetState(context.Background(), CatchpointCatchupStateLastestBlockDownload)
+ err = catchpointAccessor.SetState(context.Background(), CatchpointCatchupStateLatestBlockDownload)
require.NoError(t, err, "catchpointAccessor.SetState")
err = catchpointAccessor.SetState(context.Background(), CatchpointCatchupStateBlocksDownload)
require.NoError(t, err, "catchpointAccessor.SetState")
@@ -369,3 +372,53 @@ func TestVerifyCatchpoint(t *testing.T) {
require.Error(t, err)
//require.NoError(t, err)
}
+
+func TestCatchupAccessorResourceCountMismatch(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ // setup boilerplate
+ log := logging.TestingLog(t)
+ dbBaseFileName := t.Name()
+ const inMem = true
+ genesisInitState, _ := ledgertesting.GenerateInitState(t, protocol.ConsensusCurrentVersion, 100)
+ cfg := config.GetDefaultLocal()
+ l, err := OpenLedger(log, dbBaseFileName, inMem, genesisInitState, cfg)
+ require.NoError(t, err, "could not open ledger")
+ defer func() {
+ l.Close()
+ }()
+ catchpointAccessor := MakeCatchpointCatchupAccessor(l, log)
+ var progress CatchpointCatchupAccessorProgress
+ ctx := context.Background()
+
+ // content.msgpack from this:
+ fileHeader := CatchpointFileHeader{
+ Version: CatchpointFileVersionV6,
+ BalancesRound: basics.Round(0),
+ BlocksRound: basics.Round(0),
+ Totals: ledgercore.AccountTotals{},
+ TotalAccounts: 1,
+ TotalChunks: 1,
+ Catchpoint: "",
+ BlockHeaderDigest: crypto.Digest{},
+ }
+ encodedFileHeader := protocol.Encode(&fileHeader)
+ err = catchpointAccessor.ProgressStagingBalances(ctx, "content.msgpack", encodedFileHeader, &progress)
+ require.NoError(t, err)
+
+ var balances catchpointFileBalancesChunkV6
+ balances.Balances = make([]encodedBalanceRecordV6, 1)
+ var randomAccount encodedBalanceRecordV6
+ accountData := baseAccountData{}
+ accountData.MicroAlgos.Raw = crypto.RandUint63()
+ accountData.TotalAppParams = 1
+ randomAccount.AccountData = protocol.Encode(&accountData)
+ crypto.RandBytes(randomAccount.Address[:])
+ binary.LittleEndian.PutUint64(randomAccount.Address[:], 0)
+ balances.Balances[0] = randomAccount
+ encodedAccounts := protocol.Encode(&balances)
+
+ // expect error since there is a resource count mismatch
+ err = catchpointAccessor.ProgressStagingBalances(context.Background(), "balances.XX.msgpack", encodedAccounts, &progress)
+ require.Error(t, err)
+}
diff --git a/ledger/evalbench_test.go b/ledger/evalbench_test.go
index 265579cf39..fcbfef6817 100644
--- a/ledger/evalbench_test.go
+++ b/ledger/evalbench_test.go
@@ -509,7 +509,7 @@ func benchmarkPreparePaymentTransactionsTesting(b *testing.B, numTxns int, txnSo
require.NoError(b, err)
genHash := l.GenesisHash()
- // apply initialization transations if any
+ // apply initialization transactions if any
initSignedTxns, maxTxnPerBlock := txnSource.Prepare(b, addrs, keys, newBlock.Round(), genHash)
if len(initSignedTxns) > 0 {
diff --git a/ledger/fullblock_perf_test.go b/ledger/fullblock_perf_test.go
new file mode 100644
index 0000000000..d2d50cd8f7
--- /dev/null
+++ b/ledger/fullblock_perf_test.go
@@ -0,0 +1,638 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see .
+
+package ledger
+
+import (
+ "context"
+ "crypto/rand"
+ "encoding/binary"
+ "fmt"
+ mrand "math/rand"
+ "path/filepath"
+ "testing"
+ "time"
+
+ "github.com/stretchr/testify/require"
+
+ "github.com/algorand/go-algorand/agreement"
+ "github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/algorand/go-algorand/data/bookkeeping"
+ "github.com/algorand/go-algorand/data/transactions"
+ "github.com/algorand/go-algorand/data/transactions/logic"
+ "github.com/algorand/go-algorand/data/transactions/verify"
+ "github.com/algorand/go-algorand/ledger/internal"
+ "github.com/algorand/go-algorand/ledger/ledgercore"
+ "github.com/algorand/go-algorand/logging"
+ "github.com/algorand/go-algorand/protocol"
+)
+
+type benchConfig struct {
+ txnCount uint64
+ round uint64
+ b *testing.B
+ creator basics.Address
+ accts []basics.Address
+ acctToAst map[basics.Address]map[basics.AssetIndex]uint64
+ acctToApp map[basics.Address]map[basics.AppIndex]struct{}
+ l0 *Ledger
+ l1 *Ledger
+ eval *internal.BlockEvaluator
+ numPay uint64
+ numAst uint64
+ numApp uint64
+ blocks []bookkeeping.Block
+}
+
+func setupEnv(b *testing.B, numAccts int) (bc *benchConfig) {
+ dbTempDir := b.TempDir()
+ name := b.Name()
+ dbName := fmt.Sprintf("%s.%d", name, crypto.RandUint64())
+ dbPrefix := filepath.Join(dbTempDir, dbName)
+
+ genesisInitState := getInitState()
+
+ // Use future protocol
+ genesisInitState.Block.BlockHeader.GenesisHash = crypto.Digest{}
+ genesisInitState.Block.CurrentProtocol = protocol.ConsensusFuture
+ genesisInitState.GenesisHash = crypto.Digest{1}
+ genesisInitState.Block.BlockHeader.GenesisHash = crypto.Digest{1}
+
+ // maintain a map from accounts to a map of assets and apps
+ acctToAst := make(map[basics.Address]map[basics.AssetIndex]uint64)
+ acctToApp := make(map[basics.Address]map[basics.AppIndex]struct{})
+ accts := make([]basics.Address, 0, numAccts)
+ // creator is the special rich account
+ creator := basics.Address{}
+ _, err := rand.Read(creator[:])
+ require.NoError(b, err)
+ genesisInitState.Accounts[creator] = basics.MakeAccountData(basics.Offline, basics.MicroAlgos{Raw: 1234567890000000000})
+
+ logger := logging.TestingLog(b)
+ logger.SetLevel(logging.Warn)
+
+ // open 2 ledgers: 1st for preparing the blocks, 2nd for measuring the time
+ inMem := false
+ cfg := config.GetDefaultLocal()
+ cfg.Archival = false
+ cfg.MaxAcctLookback = uint64(b.N) // prevent committing blocks into DB since we benchmark validation
+ cfg.Archival = true
+ cfg.MaxAcctLookback = uint64(b.N) // prevent committing blocks into DB since we benchmark validation
+ l0, err := OpenLedger(logger, dbPrefix, inMem, genesisInitState, cfg)
+ require.NoError(b, err)
+
+ // open second ledger
+ inMem = false
+ cfg.Archival = false
+ cfg.MaxAcctLookback = uint64(b.N) // prevent committing blocks into DB since we benchmark validation
+ dbName = fmt.Sprintf("%s.%d.2", name, crypto.RandUint64())
+ dbPrefix = filepath.Join(dbTempDir, dbName)
+ l1, err := OpenLedger(logger, dbPrefix, inMem, genesisInitState, cfg)
+ require.NoError(b, err)
+
+ // init the first block
+ blk := genesisInitState.Block
+ blk.BlockHeader.Round++
+ blk.BlockHeader.TimeStamp += int64(crypto.RandUint64() % 100 * 1000)
+ blk.BlockHeader.GenesisID = fmt.Sprintf("%s-genesis", b.Name())
+ cert := agreement.Certificate{}
+
+ err = l0.AddBlock(blk, cert)
+ require.NoError(b, err)
+ err = l1.AddBlock(blk, cert)
+ require.NoError(b, err)
+
+ newBlk := bookkeeping.MakeBlock(blk.BlockHeader)
+ eval, err := l0.StartEvaluator(newBlk.BlockHeader, 5000, 0)
+ require.NoError(b, err)
+
+ bc = &benchConfig{
+ txnCount: 0,
+ round: 1,
+ b: b,
+ creator: creator,
+ accts: accts,
+ acctToAst: acctToAst,
+ acctToApp: acctToApp,
+ l0: l0,
+ l1: l1,
+ eval: eval,
+ }
+
+ // start the ledger with a pool of accounts
+ for i := 0; i < numAccts; i++ {
+ acct := addNewAccount(bc)
+ payTo(bc, bc.creator, acct, 1234567890000)
+ }
+
+ addBlock(bc)
+ vc := verify.GetMockedCache(true)
+ for _, blk := range bc.blocks {
+ _, err := internal.Eval(context.Background(), bc.l1, blk, true, vc, nil)
+ require.NoError(b, err)
+ err = bc.l1.AddBlock(blk, cert)
+ require.NoError(b, err)
+ }
+ bc.blocks = bc.blocks[len(bc.blocks):]
+ bc.txnCount = 0
+ bc.round = 0
+ bc.numPay = 0
+ return bc
+}
+
+func sendAssetEvent(bc *benchConfig, newAccount bool) {
+
+ // pick a random account
+ randAcct1 := bc.accts[mrand.Intn(len(bc.accts))]
+ randAcct2 := bc.accts[mrand.Intn(len(bc.accts))]
+ if newAccount {
+ randAcct2 = addNewAccount(bc)
+ payTo(bc, bc.creator, randAcct2, 100000000)
+ }
+
+ var assIdx basics.AssetIndex
+ for key, val := range bc.acctToAst[randAcct1] {
+ if val > 1 {
+ assIdx = key
+ break
+ }
+ }
+
+ if assIdx == 0 {
+ assIdx = createAssetForAcct(bc, randAcct1)
+ }
+
+ // opt in to the asset
+ if _, have := bc.acctToAst[randAcct2][assIdx]; !have {
+ sendAssetTo(bc, randAcct2, randAcct2, assIdx, 0)
+ }
+ sendAssetTo(bc, randAcct1, randAcct2, assIdx, 1)
+}
+
+func appCallEvent(bc *benchConfig, newAccount bool) {
+
+ // pick a random account
+ randAcct1 := bc.accts[mrand.Intn(len(bc.accts))]
+ randAcct2 := bc.accts[mrand.Intn(len(bc.accts))]
+ if newAccount {
+ randAcct2 = addNewAccount(bc)
+ payTo(bc, bc.creator, randAcct2, 100000000)
+ }
+
+ var appIdx basics.AppIndex
+ if len(bc.acctToApp) > 0 {
+ randApp := mrand.Intn(len(bc.acctToApp))
+ a := 0
+ for key := range bc.acctToApp[randAcct1] {
+ if a == randApp {
+ appIdx = key
+ break
+ }
+ a++
+ }
+ }
+
+ if appIdx == 0 {
+ appIdx = createAppForAcct(bc, randAcct1)
+ }
+
+ // opt in to the asset
+ if _, have := bc.acctToApp[randAcct2][appIdx]; !have {
+ optInApp(bc, randAcct2, appIdx)
+ }
+ callApp(bc, randAcct2, appIdx)
+}
+
+func payEvent(bc *benchConfig, newAccount bool) {
+ // pick a random account
+ randAcct1 := bc.accts[mrand.Intn(len(bc.accts))]
+ randAcct2 := bc.accts[mrand.Intn(len(bc.accts))]
+ if newAccount {
+ randAcct2 = addNewAccount(bc)
+ payTo(bc, bc.creator, randAcct2, 100000000)
+ } else {
+ payTo(bc, randAcct1, randAcct2, 10)
+ }
+}
+
+func sendAssetTo(bc *benchConfig, from, to basics.Address, assIdx basics.AssetIndex, amt uint64) {
+ tx := sendAssetTransaction(bc.txnCount, bc.round, from, to, assIdx, amt)
+ var stxn transactions.SignedTxn
+ stxn.Txn = tx
+ stxn.Sig = crypto.Signature{1}
+ addTransaction(bc, stxn)
+ bc.numAst++
+}
+
+func payTo(bc *benchConfig, from, to basics.Address, amt uint64) {
+ tx := createPaymentTransaction(uint64(bc.txnCount), bc.round, from, to, amt)
+ var stxn transactions.SignedTxn
+ stxn.Txn = tx
+ stxn.Sig = crypto.Signature{1}
+ addTransaction(bc, stxn)
+ bc.numPay++
+}
+
+func createAssetForAcct(bc *benchConfig, acct basics.Address) (aidx basics.AssetIndex) {
+ tx := createAssetTransaction(bc.txnCount, bc.round, acct)
+ stxn := transactions.SignedTxn{Txn: tx, Sig: crypto.Signature{1}}
+ aIdx := basics.AssetIndex(addTransaction(bc, stxn))
+ if len(bc.acctToAst[acct]) == 0 {
+ bc.acctToAst[acct] = make(map[basics.AssetIndex]uint64)
+ }
+ bc.acctToAst[acct][aIdx] = 3000000
+ bc.numAst++
+ return aIdx
+}
+
+func createAppForAcct(bc *benchConfig, acct basics.Address) (appIdx basics.AppIndex) {
+ tx, err := makeAppTransaction(bc.txnCount, bc.round, acct)
+ require.NoError(bc.b, err)
+ stxn := transactions.SignedTxn{Txn: tx, Sig: crypto.Signature{1}}
+ appIdx = basics.AppIndex(addTransaction(bc, stxn))
+ if len(bc.acctToApp[acct]) == 0 {
+ bc.acctToApp[acct] = make(map[basics.AppIndex]struct{})
+ }
+ bc.acctToApp[acct][appIdx] = struct{}{}
+ bc.numApp++
+ return appIdx
+}
+
+func optInApp(bc *benchConfig, acct basics.Address, appIdx basics.AppIndex) {
+ tx := makeOptInAppTransaction(bc.txnCount, appIdx, bc.round, acct)
+ var stxn transactions.SignedTxn
+ stxn.Txn = tx
+ stxn.Sig = crypto.Signature{1}
+ addTransaction(bc, stxn)
+ bc.numApp++
+}
+
+func callApp(bc *benchConfig, acct basics.Address, appIdx basics.AppIndex) {
+ tx := callAppTransaction(bc.txnCount, appIdx, bc.round, acct)
+ var stxn transactions.SignedTxn
+ stxn.Txn = tx
+ stxn.Sig = crypto.Signature{1}
+ addTransaction(bc, stxn)
+ bc.numApp++
+}
+
+func addNewAccount(bc *benchConfig) (acct basics.Address) {
+
+ acct = basics.Address{}
+ _, err := rand.Read(acct[:])
+ require.NoError(bc.b, err)
+ bc.accts = append(bc.accts, acct)
+ return acct
+}
+
+func addTransaction(bc *benchConfig, stxn transactions.SignedTxn) uint64 {
+ err := bc.eval.Transaction(stxn, transactions.ApplyData{})
+ if err == ledgercore.ErrNoSpace {
+ addBlock(bc)
+ addTransaction(bc, stxn)
+ } else {
+ require.NoError(bc.b, err)
+ bc.txnCount++
+ }
+ return bc.eval.TestingTxnCounter()
+}
+
+func addBlock(bc *benchConfig) {
+ vblk, err := bc.eval.GenerateBlock()
+ cert := agreement.Certificate{}
+ require.NoError(bc.b, err)
+ bc.blocks = append(bc.blocks, vblk.Block())
+
+ err = bc.l0.AddBlock(vblk.Block(), cert)
+ require.NoError(bc.b, err)
+
+ _, last := bc.l0.LatestCommitted()
+ prev, err := bc.l0.BlockHdr(basics.Round(last))
+ require.NoError(bc.b, err)
+ newBlk := bookkeeping.MakeBlock(prev)
+ bc.eval, err = bc.l0.StartEvaluator(newBlk.BlockHeader, 5000, 0)
+ bc.round++
+ require.NoError(bc.b, err)
+}
+
+// BenchmarkBlockValidationJustPayNoNew sends payment transactions between existing accounts,
+// by choosing pair of random accounts.
+func BenchmarkBlockValidationJustPayNoNew(b *testing.B) {
+ numAccts := 50000
+ newAcctProb := 0.0
+
+ // Set the probability in %
+ payProb := 1.0
+ astProb := 0.0
+ //appsProb := 0
+ benchmarkBlockValidationMix(b, newAcctProb, payProb, astProb, numAccts)
+}
+
+// BenchmarkBlockValidationJustPay sends payments between two random accounts, with
+// 50% probability of creating a new account.
+func BenchmarkBlockValidationJustPay(b *testing.B) {
+ numAccts := 50000
+ newAcctProb := 0.5
+
+ // Set the probability in %
+ payProb := 1.0
+ astProb := 0.0
+ //appsProb := 0
+ benchmarkBlockValidationMix(b, newAcctProb, payProb, astProb, numAccts)
+}
+
+// BenchmarkBlockValidationNoNew executes payment, asset or application events with
+// 30%, 50%, and 20% probability respectively among existing accounts.
+// Note that each event may involve multiple transactions (e.g. opt in to asset,
+// create app, opt in to app).
+func BenchmarkBlockValidationNoNew(b *testing.B) {
+ numAccts := 50000
+ newAcctProb := 0.0
+
+ // Set the probability in %
+ payProb := 0.3
+ astProb := 0.5
+ //appsProb := 0.2
+ benchmarkBlockValidationMix(b, newAcctProb, payProb, astProb, numAccts)
+}
+
+// BenchmarkBlockValidationMix executes payment, asset or application events with
+// 30%, 50%, and 20% probability respectively among existing or new accounts.
+// Note that each event may involve multiple transactions (e.g. funding new account,
+// opt in to asset, create app, opt in to app).
+func BenchmarkBlockValidationMix(b *testing.B) {
+ numAccts := 50000
+ newAcctProb := 0.5
+
+ // Set the probability in %
+ payProb := 0.3
+ astProb := 0.5
+ //appsProb := 0.2
+ benchmarkBlockValidationMix(b, newAcctProb, payProb, astProb, numAccts)
+}
+
+func benchmarkBlockValidationMix(b *testing.B, newAcctProb, payProb, astProb float64, numAccts int) {
+ bc := setupEnv(b, numAccts)
+
+ numBlocks := uint64(b.N)
+ cert := agreement.Certificate{}
+ fmt.Printf("Preparing... /%d: ", numBlocks)
+ s3 := time.Now()
+
+ for bc.round < numBlocks {
+ currentRound := bc.round
+ for bc.round == currentRound {
+ randNum := mrand.Float64()
+ if randNum < payProb {
+ // add pay transaction
+ payEvent(bc, mrand.Float64() < newAcctProb)
+ } else if randNum < payProb+astProb {
+ // add asset transactions
+ sendAssetEvent(bc, mrand.Float64() < newAcctProb)
+ } else {
+ // add app transaction
+ appCallEvent(bc, mrand.Float64() < newAcctProb)
+ }
+ }
+ if (currentRound+1)*10%(2*numBlocks) == 0 {
+ fmt.Printf("%d%% %.1fs ", (currentRound+1)*100/numBlocks, time.Since(s3).Seconds())
+ s3 = time.Now()
+ }
+
+ }
+ fmt.Printf("\nSummary %d blocks and %d txns: pay %d/blk (%d%%) assets %d/blk (%d%%) apps %d/blk (%d%%)\n",
+ numBlocks, bc.txnCount, bc.numPay/numBlocks, bc.numPay*100/bc.txnCount, bc.numAst/numBlocks, bc.numAst*100/bc.txnCount, bc.numApp/numBlocks, bc.numApp*100/bc.txnCount)
+
+ // eval + add all the (valid) blocks to the second ledger, measuring it this time
+ vc := verify.GetMockedCache(true)
+ tt := time.Now()
+ b.ResetTimer()
+ for _, blk := range bc.blocks {
+ _, err := internal.Eval(context.Background(), bc.l1, blk, true, vc, nil)
+ require.NoError(b, err)
+ err = bc.l1.AddBlock(blk, cert)
+ require.NoError(b, err)
+ }
+ fmt.Printf("%.1f sec / %d blks\n", time.Since(tt).Seconds(), numBlocks)
+}
+
+func createPaymentTransaction(
+ counter uint64,
+ round uint64,
+ sender basics.Address,
+ receiver basics.Address,
+ amount uint64) (txn transactions.Transaction) {
+
+ note := make([]byte, 8)
+ binary.LittleEndian.PutUint64(note, counter)
+ txn = transactions.Transaction{
+ Type: protocol.PaymentTx,
+ Header: transactions.Header{
+ Sender: sender,
+ Fee: basics.MicroAlgos{Raw: config.Consensus[protocol.ConsensusCurrentVersion].MinTxnFee},
+ FirstValid: basics.Round(round),
+ LastValid: basics.Round(round + 1000),
+ GenesisHash: crypto.Digest{1},
+ Note: note,
+ },
+ PaymentTxnFields: transactions.PaymentTxnFields{
+ Receiver: receiver,
+ Amount: basics.MicroAlgos{Raw: amount},
+ },
+ }
+ return
+}
+
+// prepares a create asset transaction
+func createAssetTransaction(
+ counter uint64,
+ round uint64,
+ sender basics.Address) (assetTx transactions.Transaction) {
+
+ note := make([]byte, 8)
+ binary.LittleEndian.PutUint64(note, counter)
+ assetTx = transactions.Transaction{
+ Type: protocol.AssetConfigTx,
+ Header: transactions.Header{
+ Sender: sender,
+ Fee: basics.MicroAlgos{Raw: config.Consensus[protocol.ConsensusCurrentVersion].MinTxnFee},
+ FirstValid: basics.Round(round),
+ LastValid: basics.Round(round + 1000),
+ GenesisHash: crypto.Digest{1},
+ Note: note,
+ },
+ AssetConfigTxnFields: transactions.AssetConfigTxnFields{
+ AssetParams: basics.AssetParams{
+ Total: 3000000,
+ DefaultFrozen: false,
+ Manager: sender,
+ },
+ },
+ }
+ return
+}
+
+// prepares a send asset transaction
+func sendAssetTransaction(
+ counter uint64,
+ round uint64,
+ sender basics.Address,
+ receiver basics.Address,
+ assetID basics.AssetIndex,
+ amt uint64) (tx transactions.Transaction) {
+
+ note := make([]byte, 8)
+ binary.LittleEndian.PutUint64(note, counter)
+ tx = transactions.Transaction{
+ Type: protocol.AssetTransferTx,
+ Header: transactions.Header{
+ Sender: sender,
+ Fee: basics.MicroAlgos{Raw: config.Consensus[protocol.ConsensusCurrentVersion].MinTxnFee},
+ FirstValid: basics.Round(round),
+ LastValid: basics.Round(round + 1000),
+ GenesisHash: crypto.Digest{1},
+ Note: note,
+ },
+ AssetTransferTxnFields: transactions.AssetTransferTxnFields{
+ XferAsset: assetID,
+ AssetAmount: amt,
+ AssetReceiver: receiver,
+ },
+ }
+ return
+}
+
+func makeAppTransaction(
+ counter uint64,
+ round uint64,
+ sender basics.Address) (appTx transactions.Transaction, err error) {
+
+ progCounter := uint64(1)
+ progCounter = counter
+ prog := fmt.Sprintf(`#pragma version 2
+// a simple global and local calls counter app
+byte b64 Y291bnRlcg== // counter
+dup
+app_global_get
+int %d
++
+app_global_put // update the counter
+int 0
+int 0
+app_opted_in
+bnz opted_in
+err
+opted_in:
+int 0 // account idx for app_local_put
+byte b64 Y291bnRlcg== // counter
+int 0
+byte b64 Y291bnRlcg==
+app_local_get
+int 1 // increment
++
+app_local_put
+int 1
+`, progCounter)
+
+ approvalOps, err := logic.AssembleString(prog)
+ if err != nil {
+ return transactions.Transaction{}, err
+ }
+ clearstateOps, err := logic.AssembleString("#pragma version 2\nint 1")
+ if err != nil {
+ return transactions.Transaction{}, err
+ }
+ schema := basics.StateSchema{
+ NumUint: 1,
+ }
+
+ // create the app
+ appTx = transactions.Transaction{}
+ appTx.Type = protocol.ApplicationCallTx
+ appTx.OnCompletion = transactions.OptInOC
+ appTx.ApprovalProgram = approvalOps.Program
+ appTx.ClearStateProgram = clearstateOps.Program
+ appTx.GlobalStateSchema = schema
+ appTx.LocalStateSchema = schema
+
+ note := make([]byte, 8)
+ binary.LittleEndian.PutUint64(note, counter)
+
+ appTx.Header = transactions.Header{
+ Sender: sender,
+ Fee: basics.MicroAlgos{Raw: config.Consensus[protocol.ConsensusCurrentVersion].MinTxnFee},
+ FirstValid: basics.Round(round),
+ LastValid: basics.Round(round + 1000),
+ GenesisHash: crypto.Digest{1},
+ Note: note,
+ }
+ appTx.Type = protocol.ApplicationCallTx
+ return
+}
+
+// prepares a opt-in app transaction
+func makeOptInAppTransaction(
+ counter uint64,
+ appIdx basics.AppIndex,
+ round uint64,
+ sender basics.Address) (appTx transactions.Transaction) {
+
+ note := make([]byte, 8)
+ binary.LittleEndian.PutUint64(note, counter)
+
+ appTx = transactions.Transaction{}
+ appTx.ApplicationID = basics.AppIndex(appIdx)
+ appTx.OnCompletion = transactions.OptInOC
+
+ appTx.Header = transactions.Header{
+ Sender: sender,
+ Fee: basics.MicroAlgos{Raw: config.Consensus[protocol.ConsensusCurrentVersion].MinTxnFee},
+ FirstValid: basics.Round(round),
+ LastValid: basics.Round(round + 1000),
+ GenesisHash: crypto.Digest{1},
+ Note: note,
+ }
+ appTx.Type = protocol.ApplicationCallTx
+ return
+}
+
+// prepare app call transaction
+func callAppTransaction(
+ counter uint64,
+ appIdx basics.AppIndex,
+ round uint64,
+ sender basics.Address) (appTx transactions.Transaction) {
+
+ note := make([]byte, 8)
+ binary.LittleEndian.PutUint64(note, counter)
+
+ appTx = transactions.Transaction{}
+ appTx.ApplicationID = basics.AppIndex(appIdx)
+ appTx.OnCompletion = transactions.NoOpOC
+
+ appTx.Header = transactions.Header{
+ Sender: sender,
+ Fee: basics.MicroAlgos{Raw: config.Consensus[protocol.ConsensusCurrentVersion].MinTxnFee},
+ FirstValid: basics.Round(round),
+ LastValid: basics.Round(round + 1000),
+ GenesisHash: crypto.Digest{1},
+ Note: note,
+ }
+ appTx.Type = protocol.ApplicationCallTx
+ return
+}
diff --git a/ledger/internal/apptxn_test.go b/ledger/internal/apptxn_test.go
index 962e1419c6..86a4a7feab 100644
--- a/ledger/internal/apptxn_test.go
+++ b/ledger/internal/apptxn_test.go
@@ -2479,7 +2479,7 @@ func TestInnerClearState(t *testing.T) {
eval := nextBlock(t, l)
txn(t, l, eval, &inner)
vb := endBlock(t, l, eval)
- innerId := vb.Block().Payset[0].ApplicationID
+ innerID := vb.Block().Payset[0].ApplicationID
// Outer is a simple app that will invoke the given app (in ForeignApps[0])
// with the given OnCompletion (in ApplicationArgs[0]). Goal is to use it
@@ -2498,33 +2498,33 @@ itxn_begin
itxn_field OnCompletion
itxn_submit
`),
- ForeignApps: []basics.AppIndex{innerId},
+ ForeignApps: []basics.AppIndex{innerID},
}
eval = nextBlock(t, l)
txn(t, l, eval, &outer)
vb = endBlock(t, l, eval)
- outerId := vb.Block().Payset[0].ApplicationID
+ outerID := vb.Block().Payset[0].ApplicationID
fund := txntest.Txn{
Type: "pay",
Sender: addrs[0],
- Receiver: outerId.Address(),
+ Receiver: outerID.Address(),
Amount: 1_000_000,
}
call := txntest.Txn{
Type: "appl",
Sender: addrs[0],
- ApplicationID: outerId,
+ ApplicationID: outerID,
ApplicationArgs: [][]byte{{byte(transactions.OptInOC)}},
- ForeignApps: []basics.AppIndex{innerId},
+ ForeignApps: []basics.AppIndex{innerID},
}
eval = nextBlock(t, l)
txns(t, l, eval, &fund, &call)
endBlock(t, l, eval)
- outerAcct := lookup(t, l, outerId.Address())
+ outerAcct := lookup(t, l, outerID.Address())
require.Len(t, outerAcct.AppLocalStates, 1)
require.Equal(t, outerAcct.TotalAppSchema, basics.StateSchema{
NumUint: 2,
@@ -2536,7 +2536,7 @@ itxn_submit
txn(t, l, eval, &call)
endBlock(t, l, eval)
- outerAcct = lookup(t, l, outerId.Address())
+ outerAcct = lookup(t, l, outerID.Address())
require.Empty(t, outerAcct.AppLocalStates)
require.Empty(t, outerAcct.TotalAppSchema)
@@ -2567,7 +2567,7 @@ b top
eval := nextBlock(t, l)
txn(t, l, eval, &badCallee)
vb := endBlock(t, l, eval)
- badId := vb.Block().Payset[0].ApplicationID
+ badID := vb.Block().Payset[0].ApplicationID
// Outer is a simple app that will invoke the given app (in ForeignApps[0])
// with the given OnCompletion (in ApplicationArgs[0]). Goal is to use it
@@ -2603,33 +2603,33 @@ bnz skip // Don't do budget checking during optin
assert
skip:
`),
- ForeignApps: []basics.AppIndex{badId},
+ ForeignApps: []basics.AppIndex{badID},
}
eval = nextBlock(t, l)
txn(t, l, eval, &outer)
vb = endBlock(t, l, eval)
- outerId := vb.Block().Payset[0].ApplicationID
+ outerID := vb.Block().Payset[0].ApplicationID
fund := txntest.Txn{
Type: "pay",
Sender: addrs[0],
- Receiver: outerId.Address(),
+ Receiver: outerID.Address(),
Amount: 1_000_000,
}
call := txntest.Txn{
Type: "appl",
Sender: addrs[0],
- ApplicationID: outerId,
+ ApplicationID: outerID,
ApplicationArgs: [][]byte{{byte(transactions.OptInOC)}},
- ForeignApps: []basics.AppIndex{badId},
+ ForeignApps: []basics.AppIndex{badID},
}
eval = nextBlock(t, l)
txns(t, l, eval, &fund, &call)
endBlock(t, l, eval)
- outerAcct := lookup(t, l, outerId.Address())
+ outerAcct := lookup(t, l, outerID.Address())
require.Len(t, outerAcct.AppLocalStates, 1)
// When doing a clear state, `call` checks that budget wasn't stolen
@@ -2639,7 +2639,7 @@ skip:
endBlock(t, l, eval)
// Clearstate took effect, despite failure from infinite loop
- outerAcct = lookup(t, l, outerId.Address())
+ outerAcct = lookup(t, l, outerID.Address())
require.Empty(t, outerAcct.AppLocalStates)
}
@@ -2697,8 +2697,8 @@ log
eval := nextBlock(t, l)
txns(t, l, eval, &inner, &waster)
vb := endBlock(t, l, eval)
- innerId := vb.Block().Payset[0].ApplicationID
- wasterId := vb.Block().Payset[1].ApplicationID
+ innerID := vb.Block().Payset[0].ApplicationID
+ wasterID := vb.Block().Payset[1].ApplicationID
// Grouper is a simple app that will invoke the given apps (in
// ForeignApps[0,1]) as a group, with the given OnCompletion (in
@@ -2730,27 +2730,27 @@ itxn_submit
eval = nextBlock(t, l)
txn(t, l, eval, &grouper)
vb = endBlock(t, l, eval)
- grouperId := vb.Block().Payset[0].ApplicationID
+ grouperID := vb.Block().Payset[0].ApplicationID
fund := txntest.Txn{
Type: "pay",
Sender: addrs[0],
- Receiver: grouperId.Address(),
+ Receiver: grouperID.Address(),
Amount: 1_000_000,
}
call := txntest.Txn{
Type: "appl",
Sender: addrs[0],
- ApplicationID: grouperId,
+ ApplicationID: grouperID,
ApplicationArgs: [][]byte{{byte(transactions.OptInOC)}, {byte(transactions.OptInOC)}},
- ForeignApps: []basics.AppIndex{wasterId, innerId},
+ ForeignApps: []basics.AppIndex{wasterID, innerID},
}
eval = nextBlock(t, l)
txns(t, l, eval, &fund, &call)
endBlock(t, l, eval)
- gAcct := lookup(t, l, grouperId.Address())
+ gAcct := lookup(t, l, grouperID.Address())
require.Len(t, gAcct.AppLocalStates, 2)
call.ApplicationArgs = [][]byte{{byte(transactions.CloseOutOC)}, {byte(transactions.ClearStateOC)}}
@@ -2760,7 +2760,7 @@ itxn_submit
require.Len(t, vb.Block().Payset, 0)
// Clearstate did not take effect, since the caller tried to shortchange the CSP
- gAcct = lookup(t, l, grouperId.Address())
+ gAcct = lookup(t, l, grouperID.Address())
require.Len(t, gAcct.AppLocalStates, 2)
}
@@ -3310,14 +3310,9 @@ func TestReloadWithTxns(t *testing.T) {
dl.fullBlock() // So that the `block` opcode has a block to inspect
lookHdr := txntest.Txn{
- Type: "appl",
- Sender: addrs[0],
- ApprovalProgram: `
- txn FirstValid
- int 1
- -
- block BlkTimestamp
-`,
+ Type: "appl",
+ Sender: addrs[0],
+ ApprovalProgram: "txn FirstValid; int 1; -; block BlkTimestamp",
}
dl.fullBlock(&lookHdr)
diff --git a/ledger/internal/eval.go b/ledger/internal/eval.go
index 1944c4a69a..f2750d8a08 100644
--- a/ledger/internal/eval.go
+++ b/ledger/internal/eval.go
@@ -1476,7 +1476,7 @@ func (validator *evalTxValidator) run() {
unverifiedTxnGroups = append(unverifiedTxnGroups, signedTxnGroup)
}
- unverifiedTxnGroups = validator.txcache.GetUnverifiedTranscationGroups(unverifiedTxnGroups, specialAddresses, validator.block.BlockHeader.CurrentProtocol)
+ unverifiedTxnGroups = validator.txcache.GetUnverifiedTransactionGroups(unverifiedTxnGroups, specialAddresses, validator.block.BlockHeader.CurrentProtocol)
err := verify.PaysetGroups(validator.ctx, unverifiedTxnGroups, validator.block.BlockHeader, validator.verificationPool, validator.txcache, validator.ledger)
if err != nil {
diff --git a/ledger/internal/eval_blackbox_test.go b/ledger/internal/eval_blackbox_test.go
index a34dd2f315..b6eb6b9c16 100644
--- a/ledger/internal/eval_blackbox_test.go
+++ b/ledger/internal/eval_blackbox_test.go
@@ -533,6 +533,16 @@ func endBlock(t testing.TB, ledger *ledger.Ledger, eval *internal.BlockEvaluator
require.NoError(t, err)
err = ledger.AddValidatedBlock(*validatedBlock, agreement.Certificate{})
require.NoError(t, err)
+ // `rndBQ` gives the latest known block round added to the ledger
+ // we should wait until `rndBQ` block to be committed to blockQueue,
+ // in case there is a data race, noted in
+ // https://github.com/algorand/go-algorand/issues/4349
+ // where writing to `callTxnGroup` after `dl.fullBlock` caused data race,
+ // because the underlying async goroutine `go bq.syncer()` is reading `callTxnGroup`.
+ // A solution here would be wait until all new added blocks are committed,
+ // then we return the result and continue the execution.
+ rndBQ := ledger.Latest()
+ ledger.WaitForCommit(rndBQ)
return validatedBlock
}
@@ -1037,13 +1047,13 @@ func TestLogsInBlock(t *testing.T) {
}
vb := dl.fullBlock(&createTxn)
createInBlock := vb.Block().Payset[0]
- appId := createInBlock.ApplyData.ApplicationID
+ appID := createInBlock.ApplyData.ApplicationID
require.Equal(t, "APP", createInBlock.ApplyData.EvalDelta.Logs[0])
optInTxn := txntest.Txn{
Type: protocol.ApplicationCallTx,
Sender: addrs[1],
- ApplicationID: appId,
+ ApplicationID: appID,
OnCompletion: transactions.OptInOC,
}
vb = dl.fullBlock(&optInTxn)
@@ -1053,7 +1063,7 @@ func TestLogsInBlock(t *testing.T) {
clearTxn := txntest.Txn{
Type: protocol.ApplicationCallTx,
Sender: addrs[1],
- ApplicationID: appId,
+ ApplicationID: appID,
OnCompletion: transactions.ClearStateOC,
}
vb = dl.fullBlock(&clearTxn)
@@ -1092,7 +1102,7 @@ func TestUnfundedSenders(t *testing.T) {
ghost := basics.Address{0x01}
- asa_create := txntest.Txn{
+ asaCreate := txntest.Txn{
Type: "acfg",
Sender: addrs[0],
AssetParams: basics.AssetParams{
@@ -1103,12 +1113,12 @@ func TestUnfundedSenders(t *testing.T) {
},
}
- app_create := txntest.Txn{
+ appCreate := txntest.Txn{
Type: "appl",
Sender: addrs[0],
}
- dl.fullBlock(&asa_create, &app_create)
+ dl.fullBlock(&asaCreate, &appCreate)
// Advance so that rewardsLevel increases
for i := 1; i < 10; i++ {
@@ -1220,7 +1230,7 @@ func TestAppCallAppDuringInit(t *testing.T) {
dl.fullBlock()
}
- call_in_init := txntest.Txn{
+ callInInit := txntest.Txn{
Type: "appl",
Sender: addrs[0],
ApprovalProgram: `
@@ -1241,6 +1251,6 @@ func TestAppCallAppDuringInit(t *testing.T) {
// In the old days, balances.Move would try to increase the rewardsState on the unfunded account
problem = "balance 0 below min"
}
- dl.txn(&call_in_init, problem)
+ dl.txn(&callInInit, problem)
})
}
diff --git a/ledger/ledger_test.go b/ledger/ledger_test.go
index 469f681b97..95a0a84e22 100644
--- a/ledger/ledger_test.go
+++ b/ledger/ledger_test.go
@@ -1388,6 +1388,7 @@ func TestLedgerBlockHdrCaching(t *testing.T) {
cfg := config.GetDefaultLocal()
cfg.Archival = true
log := logging.TestingLog(t)
+ log.SetLevel(logging.Info)
l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg)
a.NoError(err)
defer l.Close()
@@ -1484,6 +1485,7 @@ func TestLedgerReload(t *testing.T) {
cfg := config.GetDefaultLocal()
cfg.Archival = true
log := logging.TestingLog(t)
+ log.SetLevel(logging.Info)
l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg)
require.NoError(t, err)
defer l.Close()
@@ -1682,11 +1684,13 @@ func TestLedgerKeepsOldBlocksForStateProof(t *testing.T) {
cfg := config.GetDefaultLocal()
cfg.Archival = false
log := logging.TestingLog(t)
+ log.SetLevel(logging.Info)
l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg)
require.NoError(t, err)
defer l.Close()
lastBlock, err := l.Block(l.Latest())
+ require.NoError(t, err)
proto := config.Consensus[lastBlock.CurrentProtocol]
accounts := make(map[basics.Address]basics.AccountData, len(genesisInitState.Accounts)+maxBlocks)
keys := make(map[basics.Address]*crypto.SignatureSecrets, len(initKeys)+maxBlocks)
@@ -2849,6 +2853,7 @@ func TestVotersReloadFromDisk(t *testing.T) {
cfg.Archival = false
cfg.MaxAcctLookback = proto.StateProofInterval - proto.StateProofVotersLookback - 10
log := logging.TestingLog(t)
+ log.SetLevel(logging.Info)
l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg)
require.NoError(t, err)
defer l.Close()
@@ -2896,6 +2901,7 @@ func TestVotersReloadFromDiskAfterOneStateProofCommitted(t *testing.T) {
cfg.Archival = false
cfg.MaxAcctLookback = proto.StateProofInterval - proto.StateProofVotersLookback - 10
log := logging.TestingLog(t)
+ log.SetLevel(logging.Info)
l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg)
require.NoError(t, err)
defer l.Close()
@@ -2955,6 +2961,7 @@ func TestVotersReloadFromDiskPassRecoveryPeriod(t *testing.T) {
cfg.Archival = false
cfg.MaxAcctLookback = proto.StateProofInterval - proto.StateProofVotersLookback - 10
log := logging.TestingLog(t)
+ log.SetLevel(logging.Info)
l, err := OpenLedger(log, dbName, inMem, genesisInitState, cfg)
require.NoError(t, err)
defer l.Close()
diff --git a/ledger/msgp_gen.go b/ledger/msgp_gen.go
index 58824b2ae0..d76a3a0db0 100644
--- a/ledger/msgp_gen.go
+++ b/ledger/msgp_gen.go
@@ -1768,7 +1768,7 @@ func (z *catchpointFileBalancesChunkV6) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
zb0002Len := uint32(1)
- var zb0002Mask uint8 /* 2 bits */
+ var zb0002Mask uint8 /* 3 bits */
if len((*z).Balances) == 0 {
zb0002Len--
zb0002Mask |= 0x2
@@ -2298,8 +2298,8 @@ func (z *encodedBalanceRecordV5) MsgIsZero() bool {
func (z *encodedBalanceRecordV6) MarshalMsg(b []byte) (o []byte) {
o = msgp.Require(b, z.Msgsize())
// omitempty: check for empty values
- zb0003Len := uint32(3)
- var zb0003Mask uint8 /* 4 bits */
+ zb0003Len := uint32(4)
+ var zb0003Mask uint8 /* 5 bits */
if (*z).Address.MsgIsZero() {
zb0003Len--
zb0003Mask |= 0x2
@@ -2312,6 +2312,10 @@ func (z *encodedBalanceRecordV6) MarshalMsg(b []byte) (o []byte) {
zb0003Len--
zb0003Mask |= 0x8
}
+ if (*z).ExpectingMoreEntries == false {
+ zb0003Len--
+ zb0003Mask |= 0x10
+ }
// variable map header, size zb0003Len
o = append(o, 0x80|uint8(zb0003Len))
if zb0003Len != 0 {
@@ -2345,6 +2349,11 @@ func (z *encodedBalanceRecordV6) MarshalMsg(b []byte) (o []byte) {
o = zb0002.MarshalMsg(o)
}
}
+ if (zb0003Mask & 0x10) == 0 { // if not empty
+ // string "e"
+ o = append(o, 0xa1, 0x65)
+ o = msgp.AppendBool(o, (*z).ExpectingMoreEntries)
+ }
}
return
}
@@ -2419,6 +2428,14 @@ func (z *encodedBalanceRecordV6) UnmarshalMsg(bts []byte) (o []byte, err error)
(*z).Resources[zb0001] = zb0002
}
}
+ if zb0003 > 0 {
+ zb0003--
+ (*z).ExpectingMoreEntries, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "struct-from-array", "ExpectingMoreEntries")
+ return
+ }
+ }
if zb0003 > 0 {
err = msgp.ErrTooManyArrayFields(zb0003)
if err != nil {
@@ -2488,6 +2505,12 @@ func (z *encodedBalanceRecordV6) UnmarshalMsg(bts []byte) (o []byte, err error)
}
(*z).Resources[zb0001] = zb0002
}
+ case "e":
+ (*z).ExpectingMoreEntries, bts, err = msgp.ReadBoolBytes(bts)
+ if err != nil {
+ err = msgp.WrapError(err, "ExpectingMoreEntries")
+ return
+ }
default:
err = msgp.ErrNoField(string(field))
if err != nil {
@@ -2516,12 +2539,13 @@ func (z *encodedBalanceRecordV6) Msgsize() (s int) {
s += 0 + msgp.Uint64Size + zb0002.Msgsize()
}
}
+ s += 2 + msgp.BoolSize
return
}
// MsgIsZero returns whether this is a zero value
func (z *encodedBalanceRecordV6) MsgIsZero() bool {
- return ((*z).Address.MsgIsZero()) && ((*z).AccountData.MsgIsZero()) && (len((*z).Resources) == 0)
+ return ((*z).Address.MsgIsZero()) && ((*z).AccountData.MsgIsZero()) && (len((*z).Resources) == 0) && ((*z).ExpectingMoreEntries == false)
}
// MarshalMsg implements msgp.Marshaler
diff --git a/ledger/testing/randomAccounts.go b/ledger/testing/randomAccounts.go
index 0f20579a89..cea52d434c 100644
--- a/ledger/testing/randomAccounts.go
+++ b/ledger/testing/randomAccounts.go
@@ -58,7 +58,7 @@ func RandomAccountData(rewardsBase uint64) basics.AccountData {
switch crypto.RandUint64() % 3 {
case 0:
data.Status = basics.Online
- data.VoteLastValid = 1000
+ data.VoteLastValid = 10000
case 1:
data.Status = basics.Offline
data.VoteLastValid = 0
diff --git a/ledger/voters_test.go b/ledger/voters_test.go
index b13b11d971..78e9eb7b87 100644
--- a/ledger/voters_test.go
+++ b/ledger/voters_test.go
@@ -20,6 +20,7 @@ import (
"testing"
"github.com/algorand/go-algorand/config"
+ "github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/crypto/merklesignature"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/bookkeeping"
@@ -30,12 +31,46 @@ import (
"github.com/stretchr/testify/require"
)
-func addBlockToAccountsUpdate(blk bookkeeping.Block, ao *onlineAccounts) {
+func addBlockToAccountsUpdate(blk bookkeeping.Block, ao *onlineAccounts, totals ledgercore.AccountTotals) {
updates := ledgercore.MakeAccountDeltas(1)
delta := ledgercore.MakeStateDelta(&blk.BlockHeader, 0, updates.Len(), 0)
+ delta.Accts.MergeAccounts(updates)
+ delta.Totals = totals
ao.newBlock(blk, delta)
}
+func checkVoters(a *require.Assertions, ao *onlineAccounts, expectedSize uint64) {
+ a.Equal(expectedSize, uint64(len(ao.voters.votersForRoundCache)))
+ for _, v := range ao.voters.votersForRoundCache {
+ err := v.Wait()
+ a.NoError(err)
+ a.NotZero(v.TotalWeight)
+ a.NotZero(len(v.Participants))
+ a.NotZero(v.Tree.NumOfElements)
+ }
+}
+
+func makeRandomOnlineAccounts(numberOfAccounts uint64) map[basics.Address]basics.AccountData {
+ res := make(map[basics.Address]basics.AccountData)
+
+ for i := uint64(0); i < numberOfAccounts; i++ {
+ var data basics.AccountData
+
+ // Avoid overflowing totals
+ data.MicroAlgos.Raw = crypto.RandUint64() % (1 << 32)
+
+ data.Status = basics.Online
+ data.VoteLastValid = 10000000
+
+ data.VoteFirstValid = 0
+ data.RewardsBase = 0
+
+ res[ledgertesting.RandomAddress()] = data
+ }
+
+ return res
+}
+
func TestVoterTrackerDeleteVotersAfterStateproofConfirmed(t *testing.T) {
partitiontest.PartitionTest(t)
a := require.New(t)
@@ -44,7 +79,7 @@ func TestVoterTrackerDeleteVotersAfterStateproofConfirmed(t *testing.T) {
numOfIntervals := config.Consensus[protocol.ConsensusCurrentVersion].StateProofMaxRecoveryIntervals - 1
lookbackForTest := config.Consensus[protocol.ConsensusCurrentVersion].StateProofVotersLookback
- accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(20, true)}
+ accts := []map[basics.Address]basics.AccountData{makeRandomOnlineAccounts(20)}
pooldata := basics.AccountData{}
pooldata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000
@@ -64,15 +99,18 @@ func TestVoterTrackerDeleteVotersAfterStateproofConfirmed(t *testing.T) {
defer au.close()
defer ao.close()
+ _, totals, err := au.LatestTotals()
+ require.NoError(t, err)
+
i := uint64(1)
// adding blocks to the voterstracker (in order to pass the numOfIntervals*stateproofInterval we add 1)
for ; i < (numOfIntervals*intervalForTest)+1; i++ {
block := randomBlock(basics.Round(i))
block.block.CurrentProtocol = protocol.ConsensusCurrentVersion
- addBlockToAccountsUpdate(block.block, ao)
+ addBlockToAccountsUpdate(block.block, ao, totals)
}
- a.Equal(numOfIntervals, uint64(len(ao.voters.votersForRoundCache)))
+ checkVoters(a, ao, numOfIntervals)
a.Equal(basics.Round(intervalForTest-lookbackForTest), ao.voters.lowestRound(basics.Round(i)))
block := randomBlock(basics.Round(i))
@@ -84,13 +122,13 @@ func TestVoterTrackerDeleteVotersAfterStateproofConfirmed(t *testing.T) {
stateTracking.StateProofNextRound = basics.Round((numOfIntervals - 1) * intervalForTest)
block.block.BlockHeader.StateProofTracking = make(map[protocol.StateProofType]bookkeeping.StateProofTrackingData)
block.block.BlockHeader.StateProofTracking[protocol.StateProofBasic] = stateTracking
- addBlockToAccountsUpdate(block.block, ao)
+ addBlockToAccountsUpdate(block.block, ao, totals)
// the tracker should have 3 entries
// - voters to confirm the numOfIntervals - 1 th interval
// - voters to confirm the numOfIntervals th interval
// - voters to confirm the numOfIntervals + 1 th interval
- a.Equal(uint64(3), uint64(len(ao.voters.votersForRoundCache)))
+ checkVoters(a, ao, 3)
a.Equal(basics.Round((numOfIntervals-2)*intervalForTest-lookbackForTest), ao.voters.lowestRound(basics.Round(i)))
block = randomBlock(basics.Round(i))
@@ -98,9 +136,9 @@ func TestVoterTrackerDeleteVotersAfterStateproofConfirmed(t *testing.T) {
stateTracking.StateProofNextRound = basics.Round(numOfIntervals * intervalForTest)
block.block.BlockHeader.StateProofTracking = make(map[protocol.StateProofType]bookkeeping.StateProofTrackingData)
block.block.BlockHeader.StateProofTracking[protocol.StateProofBasic] = stateTracking
- addBlockToAccountsUpdate(block.block, ao)
+ addBlockToAccountsUpdate(block.block, ao, totals)
- a.Equal(uint64(2), uint64(len(ao.voters.votersForRoundCache)))
+ checkVoters(a, ao, 2)
a.Equal(basics.Round((numOfIntervals-1)*intervalForTest-lookbackForTest), ao.voters.lowestRound(basics.Round(i)))
}
@@ -112,7 +150,7 @@ func TestLimitVoterTracker(t *testing.T) {
recoveryIntervalForTests := config.Consensus[protocol.ConsensusCurrentVersion].StateProofMaxRecoveryIntervals
lookbackForTest := config.Consensus[protocol.ConsensusCurrentVersion].StateProofVotersLookback
- accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(20, true)}
+ accts := []map[basics.Address]basics.AccountData{makeRandomOnlineAccounts(20)}
pooldata := basics.AccountData{}
pooldata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000
@@ -132,6 +170,9 @@ func TestLimitVoterTracker(t *testing.T) {
defer au.close()
defer ao.close()
+ _, totals, err := au.LatestTotals()
+ require.NoError(t, err)
+
i := uint64(1)
// since the first state proof is expected to happen on stateproofInterval*2 we would start give-up on state proofs
@@ -141,33 +182,33 @@ func TestLimitVoterTracker(t *testing.T) {
for ; i < intervalForTest*(recoveryIntervalForTests+2); i++ {
block := randomBlock(basics.Round(i))
block.block.CurrentProtocol = protocol.ConsensusCurrentVersion
- addBlockToAccountsUpdate(block.block, ao)
+ addBlockToAccountsUpdate(block.block, ao, totals)
}
// the votersForRoundCache should contains recoveryIntervalForTests+2 elements:
// recoveryIntervalForTests - since this is the recovery interval
// + 1 - since votersForRoundCache would contain the votersForRound for the next state proof to come
// + 1 - in order to confirm recoveryIntervalForTests number of state proofs we need recoveryIntervalForTests + 1 headers (for the commitment)
- a.Equal(recoveryIntervalForTests+2, uint64(len(ao.voters.votersForRoundCache)))
+ checkVoters(a, ao, recoveryIntervalForTests+2)
a.Equal(basics.Round(config.Consensus[protocol.ConsensusCurrentVersion].StateProofInterval-lookbackForTest), ao.voters.lowestRound(basics.Round(i)))
// after adding the round intervalForTest*(recoveryIntervalForTests+3)+1 we expect the voter tracker to remove voters
for ; i < intervalForTest*(recoveryIntervalForTests+3)+1; i++ {
block := randomBlock(basics.Round(i))
block.block.CurrentProtocol = protocol.ConsensusCurrentVersion
- addBlockToAccountsUpdate(block.block, ao)
+ addBlockToAccountsUpdate(block.block, ao, totals)
}
- a.Equal(recoveryIntervalForTests+2, uint64(len(ao.voters.votersForRoundCache)))
+ checkVoters(a, ao, recoveryIntervalForTests+2)
a.Equal(basics.Round(config.Consensus[protocol.ConsensusCurrentVersion].StateProofInterval*2-lookbackForTest), ao.voters.lowestRound(basics.Round(i)))
// after adding the round intervalForTest*(recoveryIntervalForTests+3)+1 we expect the voter tracker to remove voters
for ; i < intervalForTest*(recoveryIntervalForTests+4)+1; i++ {
block := randomBlock(basics.Round(i))
block.block.CurrentProtocol = protocol.ConsensusCurrentVersion
- addBlockToAccountsUpdate(block.block, ao)
+ addBlockToAccountsUpdate(block.block, ao, totals)
}
- a.Equal(recoveryIntervalForTests+2, uint64(len(ao.voters.votersForRoundCache)))
+ checkVoters(a, ao, recoveryIntervalForTests+2)
a.Equal(basics.Round(config.Consensus[protocol.ConsensusCurrentVersion].StateProofInterval*3-lookbackForTest), ao.voters.lowestRound(basics.Round(i)))
// if the last round of the intervalForTest has not been added to the ledger the votersTracker would
@@ -175,17 +216,17 @@ func TestLimitVoterTracker(t *testing.T) {
for ; i < intervalForTest*(recoveryIntervalForTests+5); i++ {
block := randomBlock(basics.Round(i))
block.block.CurrentProtocol = protocol.ConsensusCurrentVersion
- addBlockToAccountsUpdate(block.block, ao)
+ addBlockToAccountsUpdate(block.block, ao, totals)
}
- a.Equal(recoveryIntervalForTests+3, uint64(len(ao.voters.votersForRoundCache)))
+ checkVoters(a, ao, recoveryIntervalForTests+3)
a.Equal(basics.Round(config.Consensus[protocol.ConsensusCurrentVersion].StateProofInterval*3-lookbackForTest), ao.voters.lowestRound(basics.Round(i)))
for ; i < intervalForTest*(recoveryIntervalForTests+5)+1; i++ {
block := randomBlock(basics.Round(i))
block.block.CurrentProtocol = protocol.ConsensusCurrentVersion
- addBlockToAccountsUpdate(block.block, ao)
+ addBlockToAccountsUpdate(block.block, ao, totals)
}
- a.Equal(recoveryIntervalForTests+2, uint64(len(ao.voters.votersForRoundCache)))
+ checkVoters(a, ao, recoveryIntervalForTests+2)
a.Equal(basics.Round(config.Consensus[protocol.ConsensusCurrentVersion].StateProofInterval*4-lookbackForTest), ao.voters.lowestRound(basics.Round(i)))
}
@@ -196,7 +237,7 @@ func TestTopNAccountsThatHaveNoMssKeys(t *testing.T) {
intervalForTest := config.Consensus[protocol.ConsensusCurrentVersion].StateProofInterval
lookbackForTest := config.Consensus[protocol.ConsensusCurrentVersion].StateProofVotersLookback
- accts := []map[basics.Address]basics.AccountData{ledgertesting.RandomAccounts(20, true)}
+ accts := []map[basics.Address]basics.AccountData{makeRandomOnlineAccounts(20)}
pooldata := basics.AccountData{}
pooldata.MicroAlgos.Raw = 1000 * 1000 * 1000 * 1000
@@ -216,11 +257,14 @@ func TestTopNAccountsThatHaveNoMssKeys(t *testing.T) {
defer au.close()
defer ao.close()
+ _, totals, err := au.LatestTotals()
+ require.NoError(t, err)
+
i := uint64(1)
for ; i < (intervalForTest)+1; i++ {
block := randomBlock(basics.Round(i))
block.block.CurrentProtocol = protocol.ConsensusCurrentVersion
- addBlockToAccountsUpdate(block.block, ao)
+ addBlockToAccountsUpdate(block.block, ao, totals)
}
top, err := ao.voters.getVoters(basics.Round(intervalForTest - lookbackForTest))
diff --git a/libgoal/libgoal.go b/libgoal/libgoal.go
index b19d379a31..3ec8cd45c6 100644
--- a/libgoal/libgoal.go
+++ b/libgoal/libgoal.go
@@ -20,7 +20,6 @@ import (
"encoding/json"
"errors"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"time"
@@ -63,6 +62,10 @@ type Client struct {
consensus config.ConsensusProtocols
algodVersionAffinity algodclient.APIVersion
kmdVersionAffinity kmdclient.APIVersion
+
+ suggestedParamsCache v1.TransactionParams
+ suggestedParamsExpire time.Time
+ suggestedParamsMaxAge time.Duration
}
// ClientConfig is data to configure a Client
@@ -514,7 +517,7 @@ func (c *Client) signAndBroadcastTransactionWithWallet(walletHandle, pw []byte,
// M | M | error
//
func (c *Client) ComputeValidityRounds(firstValid, lastValid, validRounds uint64) (first, last, latest uint64, err error) {
- params, err := c.SuggestedParams()
+ params, err := c.cachedSuggestedParams()
if err != nil {
return 0, 0, 0, err
}
@@ -577,7 +580,7 @@ func (c *Client) ConstructPayment(from, to string, fee, amount uint64, note []by
}
// Get current round, protocol, genesis ID
- params, err := c.SuggestedParams()
+ params, err := c.cachedSuggestedParams()
if err != nil {
return transactions.Transaction{}, err
}
@@ -921,6 +924,23 @@ func (c *Client) SuggestedParams() (params v1.TransactionParams, err error) {
return
}
+// SetSuggestedParamsCacheAge sets the maximum age for an internal cached version of SuggestedParams() used internally to many libgoal Client functions.
+func (c *Client) SetSuggestedParamsCacheAge(maxAge time.Duration) {
+ c.suggestedParamsMaxAge = maxAge
+}
+
+func (c *Client) cachedSuggestedParams() (params v1.TransactionParams, err error) {
+ if c.suggestedParamsMaxAge == 0 || time.Now().After(c.suggestedParamsExpire) {
+ params, err = c.SuggestedParams()
+ if err == nil && c.suggestedParamsMaxAge != 0 {
+ c.suggestedParamsCache = params
+ c.suggestedParamsExpire = time.Now().Add(c.suggestedParamsMaxAge)
+ }
+ return
+ }
+ return c.suggestedParamsCache, nil
+}
+
// GetPendingTransactions gets a snapshot of current pending transactions on the node.
// If maxTxns = 0, fetches as many transactions as possible.
func (c *Client) GetPendingTransactions(maxTxns uint64) (resp v1.PendingTransactions, err error) {
@@ -968,7 +988,7 @@ func (c *Client) VerifyParticipationKey(timeout time.Duration, participationID s
// AddParticipationKey takes a participation key file and sends it to the node.
// The key will be loaded into the system when the function returns successfully.
func (c *Client) AddParticipationKey(keyfile string) (resp generated.PostParticipationResponse, err error) {
- data, err := ioutil.ReadFile(keyfile)
+ data, err := os.ReadFile(keyfile)
if err != nil {
return
}
diff --git a/libgoal/lockedFile.go b/libgoal/lockedFile.go
index 3c827d870d..26b235d243 100644
--- a/libgoal/lockedFile.go
+++ b/libgoal/lockedFile.go
@@ -18,7 +18,7 @@ package libgoal
import (
"fmt"
- "io/ioutil"
+ "io"
"os"
)
@@ -72,7 +72,7 @@ func (f *lockedFile) read() (bytes []byte, err error) {
}
}()
- bytes, err = ioutil.ReadAll(fd)
+ bytes, err = io.ReadAll(fd)
return
}
diff --git a/libgoal/transactions.go b/libgoal/transactions.go
index c28fd0216a..fb788f0249 100644
--- a/libgoal/transactions.go
+++ b/libgoal/transactions.go
@@ -24,7 +24,7 @@ import (
"github.com/algorand/go-algorand/crypto"
"github.com/algorand/go-algorand/crypto/merklesignature"
"github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated"
- "github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
+ v1 "github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
"github.com/algorand/go-algorand/data/account"
"github.com/algorand/go-algorand/data/basics"
"github.com/algorand/go-algorand/data/transactions"
@@ -253,7 +253,7 @@ func generateRegistrationTransaction(part generated.ParticipationKey, fee basics
func (c *Client) MakeRegistrationTransactionWithGenesisID(part account.Participation, fee, txnFirstValid, txnLastValid uint64, leaseBytes [32]byte, includeStateProofKeys bool) (transactions.Transaction, error) {
// Get current round, protocol, genesis ID
- params, err := c.SuggestedParams()
+ params, err := c.cachedSuggestedParams()
if err != nil {
return transactions.Transaction{}, err
}
@@ -293,7 +293,7 @@ func (c *Client) MakeUnsignedGoOnlineTx(address string, firstValid, lastValid, f
}
// Get current round, protocol, genesis ID
- params, err := c.SuggestedParams()
+ params, err := c.cachedSuggestedParams()
if err != nil {
return transactions.Transaction{}, err
}
@@ -350,7 +350,7 @@ func (c *Client) MakeUnsignedGoOfflineTx(address string, firstValid, lastValid,
return transactions.Transaction{}, err
}
- params, err := c.SuggestedParams()
+ params, err := c.cachedSuggestedParams()
if err != nil {
return transactions.Transaction{}, err
}
@@ -405,7 +405,7 @@ func (c *Client) MakeUnsignedBecomeNonparticipatingTx(address string, firstValid
return transactions.Transaction{}, err
}
- params, err := c.SuggestedParams()
+ params, err := c.cachedSuggestedParams()
if err != nil {
return transactions.Transaction{}, err
}
@@ -460,7 +460,7 @@ func (c *Client) FillUnsignedTxTemplate(sender string, firstValid, lastValid, fe
return transactions.Transaction{}, err
}
- params, err := c.SuggestedParams()
+ params, err := c.cachedSuggestedParams()
if err != nil {
return transactions.Transaction{}, err
}
@@ -637,7 +637,7 @@ func (c *Client) MakeUnsignedAssetCreateTx(total uint64, defaultFrozen bool, man
}
// Get consensus params so we can get max field lengths
- params, err := c.SuggestedParams()
+ params, err := c.cachedSuggestedParams()
if err != nil {
return transactions.Transaction{}, err
}
diff --git a/logging/cyclicWriter_test.go b/logging/cyclicWriter_test.go
index d1eaa43b0b..5719be9303 100644
--- a/logging/cyclicWriter_test.go
+++ b/logging/cyclicWriter_test.go
@@ -17,7 +17,6 @@
package logging
import (
- "io/ioutil"
"os"
"testing"
@@ -49,12 +48,12 @@ func TestCyclicWrite(t *testing.T) {
require.NoError(t, err)
require.Equal(t, len(secondWrite), n)
- liveData, err := ioutil.ReadFile(liveFileName)
+ liveData, err := os.ReadFile(liveFileName)
require.NoError(t, err)
require.Len(t, liveData, len(secondWrite))
require.Equal(t, byte('B'), liveData[0])
- oldData, err := ioutil.ReadFile(archiveFileName)
+ oldData, err := os.ReadFile(archiveFileName)
require.NoError(t, err)
require.Len(t, oldData, space)
for i := 0; i < space; i++ {
diff --git a/logging/telemetryspec/event.go b/logging/telemetryspec/event.go
index 81d2283241..a759bb261d 100644
--- a/logging/telemetryspec/event.go
+++ b/logging/telemetryspec/event.go
@@ -29,6 +29,12 @@ type Event string
// StartupEvent event
const StartupEvent Event = "Startup"
+// NameValue defines a named value, for use in an array reported to telemetry.
+type NameValue struct {
+ Name string
+ Value interface{}
+}
+
// StartupEventDetails contains details for the StartupEvent
type StartupEventDetails struct {
Version string
@@ -36,6 +42,7 @@ type StartupEventDetails struct {
Branch string
Channel string
InstanceHash string
+ Overrides []NameValue
}
// HeartbeatEvent is sent periodically to indicate node is running
diff --git a/netdeploy/network.go b/netdeploy/network.go
index f54eef4ccb..78a665b789 100644
--- a/netdeploy/network.go
+++ b/netdeploy/network.go
@@ -19,7 +19,6 @@ package netdeploy
import (
"encoding/json"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"sort"
@@ -233,7 +232,7 @@ func saveNetworkCfg(cfg NetworkCfg, configFile string) error {
func (n *Network) scanForNodes() error {
// Enumerate direct sub-directories of our root and look for valid node data directories (where genesis.json exists)
- entries, err := ioutil.ReadDir(n.rootDir)
+ entries, err := os.ReadDir(n.rootDir)
if err != nil {
return err
}
diff --git a/netdeploy/networkTemplate.go b/netdeploy/networkTemplate.go
index 293fbaa865..5456179287 100644
--- a/netdeploy/networkTemplate.go
+++ b/netdeploy/networkTemplate.go
@@ -20,7 +20,7 @@ import (
"encoding/json"
"fmt"
"io"
- "io/ioutil"
+ "io/fs"
"math/big"
"os"
"path/filepath"
@@ -107,8 +107,8 @@ func (t NetworkTemplate) createNodeDirectories(targetFolder string, binDir strin
return
}
- var files []os.FileInfo
- files, err = ioutil.ReadDir(targetFolder)
+ var files []fs.DirEntry
+ files, err = os.ReadDir(targetFolder)
if err != nil {
return
}
diff --git a/netdeploy/remote/deployedNetwork.go b/netdeploy/remote/deployedNetwork.go
index 3382edee04..23828d469a 100644
--- a/netdeploy/remote/deployedNetwork.go
+++ b/netdeploy/remote/deployedNetwork.go
@@ -19,7 +19,7 @@ package remote
import (
"encoding/json"
"fmt"
- "io/ioutil"
+ "io/fs"
"math/rand"
"os"
"path/filepath"
@@ -128,7 +128,7 @@ func InitDeployedNetworkConfig(file string, buildConfig BuildConfig) (cfg Deploy
}
func loadAndProcessConfig(file string, buildConfig BuildConfig) (expanded string, err error) {
- raw, err := ioutil.ReadFile(file)
+ raw, err := os.ReadFile(file)
if err != nil {
return
}
@@ -287,7 +287,7 @@ func validateFilename(filename string) (err error) {
if strings.Index(filename, "*") >= 0 {
return ErrDeployedNetworkNameCantIncludeWildcard
}
- file, err := ioutil.TempFile("", filename)
+ file, err := os.CreateTemp("", filename)
if err == nil {
file.Close()
os.Remove(file.Name())
@@ -831,8 +831,8 @@ func (cfg DeployedNetwork) createHostFolders(targetFolder string, genesisFolder
}
func (cfg DeployedNetwork) copyWalletsToNodes(genesisFolder string, walletNameToDataMap map[string]walletTargetData) (err error) {
- var files []os.FileInfo
- files, err = ioutil.ReadDir(genesisFolder)
+ var files []fs.DirEntry
+ files, err = os.ReadDir(genesisFolder)
if err != nil {
return
}
diff --git a/netdeploy/remote/nodecfg/nodeDir.go b/netdeploy/remote/nodecfg/nodeDir.go
index a59b15c3a7..1136875457 100644
--- a/netdeploy/remote/nodecfg/nodeDir.go
+++ b/netdeploy/remote/nodecfg/nodeDir.go
@@ -19,7 +19,6 @@ package nodecfg
import (
"encoding/json"
"fmt"
- "io/ioutil"
"net/url"
"os"
"path/filepath"
@@ -174,9 +173,11 @@ func (nd *nodeDir) configureAPIToken(token string) (err error) {
return
}
fmt.Fprintf(os.Stdout, " - Assigning APIToken: %s\n", token)
- ioutil.WriteFile(filepath.Join(nd.dataDir, tokens.AlgodTokenFilename), []byte(token), 0600)
- err = nd.saveConfig()
- return
+ err = os.WriteFile(filepath.Join(nd.dataDir, tokens.AlgodTokenFilename), []byte(token), 0600)
+ if err != nil {
+ return err
+ }
+ return nd.saveConfig()
}
func (nd *nodeDir) configureTelemetry(enable bool) (err error) {
diff --git a/network/limitlistener/rejectingLimitListener_test.go b/network/limitlistener/rejectingLimitListener_test.go
index 7f286e13de..a3b955fc5e 100644
--- a/network/limitlistener/rejectingLimitListener_test.go
+++ b/network/limitlistener/rejectingLimitListener_test.go
@@ -8,7 +8,6 @@ import (
"errors"
"fmt"
"io"
- "io/ioutil"
"net"
"net/http"
"testing"
@@ -57,7 +56,7 @@ func TestRejectingLimitListenerBasic(t *testing.T) {
return
}
- io.Copy(ioutil.Discard, r.Body)
+ io.Copy(io.Discard, r.Body)
r.Body.Close()
queryCh <- nil
diff --git a/network/wsNetwork.go b/network/wsNetwork.go
index a7c874cdae..b7b2a5e893 100644
--- a/network/wsNetwork.go
+++ b/network/wsNetwork.go
@@ -22,7 +22,7 @@ import (
"encoding/base64"
"errors"
"fmt"
- "io/ioutil"
+ "io"
"net"
"net/http"
"net/textproto"
@@ -201,7 +201,7 @@ type GossipNode interface {
// this node to send corresponding MsgOfInterest notifications to any
// newly connecting peers. This should be called before the network
// is started.
- RegisterMessageInterest(protocol.Tag) error
+ RegisterMessageInterest(protocol.Tag)
// SubstituteGenesisID substitutes the "{genesisID}" with their network-specific genesisID.
SubstituteGenesisID(rawURL string) string
@@ -2031,7 +2031,7 @@ func (wn *WebsocketNetwork) tryConnect(addr, gossipAddr string) {
if err == websocket.ErrBadHandshake {
// reading here from ioutil is safe only because it came from DialContext above, which alredy finsihed reading all the data from the network
// and placed it all in a ioutil.NopCloser reader.
- bodyBytes, _ := ioutil.ReadAll(response.Body)
+ bodyBytes, _ := io.ReadAll(response.Body)
errString := string(bodyBytes)
if len(errString) > 128 {
errString = errString[:128]
@@ -2308,7 +2308,7 @@ func SetUserAgentHeader(header http.Header) {
// this node to send corresponding MsgOfInterest notifications to any
// newly connecting peers. This should be called before the network
// is started.
-func (wn *WebsocketNetwork) RegisterMessageInterest(t protocol.Tag) error {
+func (wn *WebsocketNetwork) RegisterMessageInterest(t protocol.Tag) {
wn.messagesOfInterestMu.Lock()
defer wn.messagesOfInterestMu.Unlock()
@@ -2321,11 +2321,10 @@ func (wn *WebsocketNetwork) RegisterMessageInterest(t protocol.Tag) error {
wn.messagesOfInterest[t] = true
wn.updateMessagesOfInterestEnc()
- return nil
}
// DeregisterMessageInterest will tell peers to no longer send us traffic with a protocol Tag
-func (wn *WebsocketNetwork) DeregisterMessageInterest(t protocol.Tag) error {
+func (wn *WebsocketNetwork) DeregisterMessageInterest(t protocol.Tag) {
wn.messagesOfInterestMu.Lock()
defer wn.messagesOfInterestMu.Unlock()
@@ -2338,7 +2337,6 @@ func (wn *WebsocketNetwork) DeregisterMessageInterest(t protocol.Tag) error {
delete(wn.messagesOfInterest, t)
wn.updateMessagesOfInterestEnc()
- return nil
}
func (wn *WebsocketNetwork) updateMessagesOfInterestEnc() {
diff --git a/network/wsNetwork_test.go b/network/wsNetwork_test.go
index 6f14e669f0..dc86511254 100644
--- a/network/wsNetwork_test.go
+++ b/network/wsNetwork_test.go
@@ -1467,7 +1467,7 @@ func TestSlowPeerDisconnection(t *testing.T) {
now := time.Now()
expire := now.Add(5 * time.Second)
for {
- time.Sleep(time.Millisecond)
+ time.Sleep(10 * time.Millisecond)
if len(peer.sendBufferHighPrio)+len(peer.sendBufferBulk) == 0 {
break
}
@@ -1838,7 +1838,7 @@ func TestWebsocketNetworkMessageOfInterest(t *testing.T) {
waitReady(t, netB, readyTimeout.C)
// have netB asking netA to send it only AgreementVoteTag and ProposalPayloadTag
- require.NoError(t, netB.RegisterMessageInterest(ft2))
+ netB.RegisterMessageInterest(ft2)
// send another message which we can track, so that we'll know that the first message was delivered.
netB.Broadcast(context.Background(), protocol.VoteBundleTag, []byte{0, 1, 2, 3, 4}, true, nil)
messageFilterArriveWg.Wait()
diff --git a/network/wsPeer_test.go b/network/wsPeer_test.go
index 550eb5fbda..800ab5b148 100644
--- a/network/wsPeer_test.go
+++ b/network/wsPeer_test.go
@@ -18,12 +18,14 @@ package network
import (
"encoding/binary"
+ "strings"
"testing"
"time"
"unsafe"
"github.com/algorand/go-algorand/logging"
"github.com/algorand/go-algorand/test/partitiontest"
+ "github.com/algorand/go-algorand/util/metrics"
"github.com/stretchr/testify/require"
)
@@ -90,10 +92,10 @@ func TestDefaultMessageTagsLength(t *testing.T) {
}
}
-// TestAtomicVariablesAligment ensures that the 64-bit atomic variables
+// TestAtomicVariablesAlignment ensures that the 64-bit atomic variables
// offsets are 64-bit aligned. This is required due to go atomic library
// limitation.
-func TestAtomicVariablesAligment(t *testing.T) {
+func TestAtomicVariablesAlignment(t *testing.T) {
partitiontest.PartitionTest(t)
p := wsPeer{}
@@ -101,3 +103,25 @@ func TestAtomicVariablesAligment(t *testing.T) {
require.True(t, (unsafe.Offsetof(p.lastPacketTime)%8) == 0)
require.True(t, (unsafe.Offsetof(p.intermittentOutgoingMessageEnqueueTime)%8) == 0)
}
+
+func TestTagCounterFiltering(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ tagCounterTags := map[string]*metrics.TagCounter{
+ "networkSentBytesByTag": networkSentBytesByTag,
+ "networkReceivedBytesByTag": networkReceivedBytesByTag,
+ "networkMessageReceivedByTag": networkMessageReceivedByTag,
+ "networkMessageSentByTag": networkMessageSentByTag,
+ }
+ for name, tag := range tagCounterTags {
+ t.Run(name, func(t *testing.T) {
+ require.NotZero(t, len(tag.AllowedTags))
+ tag.Add("TEST_TAG", 1)
+ b := strings.Builder{}
+ tag.WriteMetric(&b, "")
+ result := b.String()
+ require.Contains(t, result, "_UNK")
+ require.NotContains(t, result, "TEST_TAG")
+ })
+ }
+}
diff --git a/node/node.go b/node/node.go
index 7f0df81400..5e0ec29fbe 100644
--- a/node/node.go
+++ b/node/node.go
@@ -21,7 +21,6 @@ import (
"context"
"errors"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"strings"
@@ -298,11 +297,13 @@ func MakeFull(log logging.Logger, rootDir string, cfg config.Local, phonebookAdd
return nil, err
}
if catchpointCatchupState != ledger.CatchpointCatchupStateInactive {
- node.catchpointCatchupService, err = catchup.MakeResumedCatchpointCatchupService(context.Background(), node, node.log, node.net, node.ledger.Ledger, node.config)
+ accessor := ledger.MakeCatchpointCatchupAccessor(node.ledger.Ledger, node.log)
+ node.catchpointCatchupService, err = catchup.MakeResumedCatchpointCatchupService(context.Background(), node, node.log, node.net, accessor, node.config)
if err != nil {
log.Errorf("unable to create catchpoint catchup service: %v", err)
return nil, err
}
+ node.log.Infof("resuming catchpoint catchup from state %d", catchpointCatchupState)
}
node.tracer = messagetracer.NewTracer(log).Init(cfg)
@@ -900,7 +901,7 @@ func (node *AlgorandFullNode) InstallParticipationKey(partKeyBinary []byte) (acc
func (node *AlgorandFullNode) loadParticipationKeys() error {
// Generate a list of all potential participation key files
genesisDir := filepath.Join(node.rootDir, node.genesisID)
- files, err := ioutil.ReadDir(genesisDir)
+ files, err := os.ReadDir(genesisDir)
if err != nil {
return fmt.Errorf("AlgorandFullNode.loadPartitipationKeys: could not read directory %v: %v", genesisDir, err)
}
@@ -1118,7 +1119,8 @@ func (node *AlgorandFullNode) StartCatchup(catchpoint string) error {
return MakeCatchpointUnableToStartError(stats.CatchpointLabel, catchpoint)
}
var err error
- node.catchpointCatchupService, err = catchup.MakeNewCatchpointCatchupService(catchpoint, node, node.log, node.net, node.ledger.Ledger, node.config)
+ accessor := ledger.MakeCatchpointCatchupAccessor(node.ledger.Ledger, node.log)
+ node.catchpointCatchupService, err = catchup.MakeNewCatchpointCatchupService(catchpoint, node, node.log, node.net, accessor, node.config)
if err != nil {
node.log.Warnf("unable to create catchpoint catchup service : %v", err)
return err
@@ -1145,12 +1147,12 @@ func (node *AlgorandFullNode) AbortCatchup(catchpoint string) error {
}
// SetCatchpointCatchupMode change the node's operational mode from catchpoint catchup mode and back, it returns a
-// channel which contains the updated node context. This function need to work asyncronisly so that the caller could
-// detect and handle the usecase where the node is being shut down while we're switching to/from catchup mode without
+// channel which contains the updated node context. This function need to work asynchronously so that the caller could
+// detect and handle the use case where the node is being shut down while we're switching to/from catchup mode without
// deadlocking on the shared node mutex.
func (node *AlgorandFullNode) SetCatchpointCatchupMode(catchpointCatchupMode bool) (outCtxCh <-chan context.Context) {
// create a non-buffered channel to return the newly created context. The fact that it's non-buffered here
- // is imporant, as it allows us to syncronize the "receiving" of the new context before canceling of the previous
+ // is important, as it allows us to synchronize the "receiving" of the new context before canceling of the previous
// one.
ctxCh := make(chan context.Context)
outCtxCh = ctxCh
diff --git a/node/node_test.go b/node/node_test.go
index f440810f6b..dcf2bb6a20 100644
--- a/node/node_test.go
+++ b/node/node_test.go
@@ -502,51 +502,6 @@ func TestMismatchingGenesisDirectoryPermissions(t *testing.T) {
require.NoError(t, os.RemoveAll(testDirectroy))
}
-func TestAsyncRecord(t *testing.T) {
- partitiontest.PartitionTest(t)
-
- testDirectroy := t.TempDir()
-
- genesis := bookkeeping.Genesis{
- SchemaID: "go-test-node-record-async",
- Proto: protocol.ConsensusCurrentVersion,
- Network: config.Devtestnet,
- FeeSink: sinkAddr.String(),
- RewardsPool: poolAddr.String(),
- }
-
- cfg := config.GetDefaultLocal()
- cfg.DisableNetworking = true
- node, err := MakeFull(logging.TestingLog(t), testDirectroy, config.GetDefaultLocal(), []string{}, genesis)
- require.NoError(t, err)
- node.Start()
- defer node.Stop()
-
- var addr basics.Address
- addr[0] = 1
-
- p := account.Participation{
- Parent: addr,
- FirstValid: 0,
- LastValid: 1000000,
- Voting: &crypto.OneTimeSignatureSecrets{},
- VRF: &crypto.VRFSecrets{},
- }
- id, err := node.accountManager.Registry().Insert(p)
- require.NoError(t, err)
- err = node.accountManager.Registry().Register(id, 0)
- require.NoError(t, err)
-
- node.Record(addr, 10000, account.Vote)
- node.Record(addr, 20000, account.BlockProposal)
-
- time.Sleep(5000 * time.Millisecond)
- records := node.accountManager.Registry().GetAll()
- require.Len(t, records, 1)
- require.Equal(t, 10000, int(records[0].LastVote))
- require.Equal(t, 20000, int(records[0].LastBlockProposal))
-}
-
// TestOfflineOnlineClosedBitStatus a test that validates that the correct bits are being set
func TestOfflineOnlineClosedBitStatus(t *testing.T) {
partitiontest.PartitionTest(t)
diff --git a/nodecontrol/algodControl.go b/nodecontrol/algodControl.go
index 872fa05a45..e614cf63fb 100644
--- a/nodecontrol/algodControl.go
+++ b/nodecontrol/algodControl.go
@@ -18,7 +18,6 @@ package nodecontrol
import (
"fmt"
- "io/ioutil"
"net/url"
"os"
"os/exec"
@@ -368,7 +367,7 @@ func (nc NodeController) GetGenesis() (bookkeeping.Genesis, error) {
var genesis bookkeeping.Genesis
genesisFile := filepath.Join(nc.GetDataDir(), config.GenesisJSONFile)
- genesisText, err := ioutil.ReadFile(genesisFile)
+ genesisText, err := os.ReadFile(genesisFile)
if err != nil {
return genesis, err
}
@@ -417,7 +416,7 @@ func (nc NodeController) setAlgodCmdLogFiles(cmd *exec.Cmd) (files []*os.File) {
func (nc NodeController) readGenesisJSON(genesisFile string) (genesisLedger bookkeeping.Genesis, err error) {
// Load genesis
- genesisText, err := ioutil.ReadFile(genesisFile)
+ genesisText, err := os.ReadFile(genesisFile)
if err != nil {
return
}
diff --git a/protocol/codec.go b/protocol/codec.go
index 1153b7c612..e74b4b3e8e 100644
--- a/protocol/codec.go
+++ b/protocol/codec.go
@@ -246,6 +246,35 @@ func NewDecoderBytes(b []byte) Decoder {
return codec.NewDecoderBytes(b, CodecHandle)
}
+// NewMsgpDecoderBytes returns a decoder object reading bytes from [b].
+// that works with msgp-serialized objects
+func NewMsgpDecoderBytes(b []byte) *MsgpDecoderBytes {
+ return &MsgpDecoderBytes{b: b, pos: 0}
+}
+
+// MsgpDecoderBytes is a []byte decoder into msgp-encoded objects
+type MsgpDecoderBytes struct {
+ b []byte
+ pos int
+}
+
+// Decode an objptr from from a byte stream
+func (d *MsgpDecoderBytes) Decode(objptr msgp.Unmarshaler) error {
+ if !objptr.CanUnmarshalMsg(objptr) {
+ return fmt.Errorf("object %T cannot be msgp-unmashalled", objptr)
+ }
+ if d.pos >= len(d.b) {
+ return io.EOF
+ }
+
+ rem, err := objptr.UnmarshalMsg(d.b[d.pos:])
+ if err != nil {
+ return err
+ }
+ d.pos = (len(d.b) - len(rem))
+ return nil
+}
+
// encodingPool holds temporary byte slice buffers used for encoding messages.
var encodingPool = sync.Pool{
New: func() interface{} {
diff --git a/protocol/codec_test.go b/protocol/codec_test.go
index 79814dadc6..e623f9024b 100644
--- a/protocol/codec_test.go
+++ b/protocol/codec_test.go
@@ -17,6 +17,9 @@
package protocol
import (
+ "fmt"
+ "io"
+ "math/rand"
"reflect"
"testing"
@@ -199,3 +202,68 @@ func TestEncodeJSON(t *testing.T) {
require.True(t, reflect.DeepEqual(v, nsv))
require.True(t, reflect.DeepEqual(v, sv))
}
+
+func TestMsgpDecode(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ var tag Tag = "test"
+ dec := NewMsgpDecoderBytes([]byte{1, 2, 3})
+ err := dec.Decode(&tag)
+ require.Error(t, err)
+
+ data := EncodeMsgp(tag)
+ dec = NewMsgpDecoderBytes(data)
+ var tag2 Tag
+ err = dec.Decode(&tag2)
+ require.Equal(t, tag, tag2)
+ require.NoError(t, err)
+
+ limit := rand.Intn(30)
+ tags := make([]Tag, limit)
+ buf := make([]byte, 0, limit*10)
+ for i := 0; i < limit; i++ {
+ tags[i] = Tag(fmt.Sprintf("tag_%d", i))
+ buf = append(buf, EncodeMsgp(tags[i])...)
+ }
+
+ dec = NewMsgpDecoderBytes(buf)
+ for i := 0; i < limit; i++ {
+ err = dec.Decode(&tag2)
+ require.NoError(t, err)
+ require.Equal(t, tags[i], tag2)
+ }
+ err = dec.Decode(&tag2)
+ require.Error(t, err)
+ require.ErrorIs(t, err, io.EOF)
+}
+
+func TestRandomizeObjectWithPtrField(t *testing.T) {
+ partitiontest.PartitionTest(t)
+
+ type testObjA struct {
+ U64 uint64
+ }
+ type testObjB struct {
+ U16 uint16
+ ObjA *testObjA
+ }
+
+ // run a few and fail if all ints are zero
+ sawNonZeroU16 := false
+ sawNonZeroU64 := false
+ for i := 0; i < 10; i++ {
+ obj, err := RandomizeObject(&testObjB{})
+ require.NoError(t, err)
+ objB, ok := obj.(*testObjB)
+ require.True(t, ok)
+ require.NotNil(t, objB.ObjA)
+ if objB.U16 != 0 {
+ sawNonZeroU16 = true
+ }
+ if objB.ObjA.U64 != 0 {
+ sawNonZeroU64 = true
+ }
+ }
+ require.True(t, sawNonZeroU16, "RandomizeObject made all zeroes for testObjB.U16")
+ require.True(t, sawNonZeroU64, "RandomizeObject made all zeroes for testObjA.U64")
+}
diff --git a/protocol/codec_tester.go b/protocol/codec_tester.go
index 694c2c492c..f402700391 100644
--- a/protocol/codec_tester.go
+++ b/protocol/codec_tester.go
@@ -17,9 +17,7 @@
package protocol
import (
- "errors"
"fmt"
- "io/ioutil"
"math/rand"
"os"
"path"
@@ -147,11 +145,11 @@ func checkMsgpAllocBoundDirective(dataType reflect.Type) bool {
return nil
})
for _, packageFile := range packageFiles {
- fileBytes, err := ioutil.ReadFile(packageFile)
+ fileBytes, err := os.ReadFile(packageFile)
if err != nil {
continue
}
- if strings.Index(string(fileBytes), fmt.Sprintf("msgp:allocbound %s", dataType.Name())) != -1 {
+ if strings.Contains(string(fileBytes), fmt.Sprintf("msgp:allocbound %s", dataType.Name())) {
// message pack alloc bound definition was found.
return true
}
@@ -230,7 +228,13 @@ func randomizeValue(v reflect.Value, datapath string, tag string, remainingChang
switch v.Kind() {
case reflect.Uint, reflect.Uintptr, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
- v.SetUint(rand.Uint64())
+ if strings.HasSuffix(datapath, "/HashType") &&
+ strings.HasSuffix(v.Type().PkgPath(), "go-algorand/crypto") && v.Type().Name() == "HashType" {
+ // generate value that will avoid protocol.ErrInvalidObject from HashType.Validate()
+ v.SetUint(rand.Uint64() % 3) // 3 is crypto.MaxHashType
+ } else {
+ v.SetUint(rand.Uint64())
+ }
*remainingChanges--
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
v.SetInt(int64(rand.Uint64()))
@@ -243,6 +247,12 @@ func randomizeValue(v reflect.Value, datapath string, tag string, remainingChang
}
v.SetString(string(buf))
*remainingChanges--
+ case reflect.Ptr:
+ v.Set(reflect.New(v.Type().Elem()))
+ err := randomizeValue(reflect.Indirect(v), datapath, tag, remainingChanges, seenTypes)
+ if err != nil {
+ return err
+ }
case reflect.Struct:
st := v.Type()
if !seenTypes[st] {
@@ -352,7 +362,11 @@ func EncodingTest(template msgpMarshalUnmarshal) error {
}
if debugCodecTester {
- ioutil.WriteFile("/tmp/v0", []byte(fmt.Sprintf("%#v", v0)), 0666)
+ err = os.WriteFile("/tmp/v0", []byte(fmt.Sprintf("%#v", v0)), 0666)
+ if err != nil {
+ return err
+ }
+
}
e1 := EncodeMsgp(v0.(msgp.Marshaler))
@@ -360,8 +374,14 @@ func EncodingTest(template msgpMarshalUnmarshal) error {
// for debug, write out the encodings to a file
if debugCodecTester {
- ioutil.WriteFile("/tmp/e1", e1, 0666)
- ioutil.WriteFile("/tmp/e2", e2, 0666)
+ err = os.WriteFile("/tmp/e1", e1, 0666)
+ if err != nil {
+ return err
+ }
+ err = os.WriteFile("/tmp/e2", e2, 0666)
+ if err != nil {
+ return err
+ }
}
if !reflect.DeepEqual(e1, e2) {
@@ -382,8 +402,14 @@ func EncodingTest(template msgpMarshalUnmarshal) error {
}
if debugCodecTester {
- ioutil.WriteFile("/tmp/v1", []byte(fmt.Sprintf("%#v", v1)), 0666)
- ioutil.WriteFile("/tmp/v2", []byte(fmt.Sprintf("%#v", v2)), 0666)
+ err = os.WriteFile("/tmp/v1", []byte(fmt.Sprintf("%#v", v1)), 0666)
+ if err != nil {
+ return err
+ }
+ err = os.WriteFile("/tmp/v2", []byte(fmt.Sprintf("%#v", v2)), 0666)
+ if err != nil {
+ return err
+ }
}
// At this point, it might be that v differs from v1 and v2,
@@ -402,8 +428,14 @@ func EncodingTest(template msgpMarshalUnmarshal) error {
ee2 := EncodeReflect(v1)
if debugCodecTester {
- ioutil.WriteFile("/tmp/ee1", ee1, 0666)
- ioutil.WriteFile("/tmp/ee2", ee2, 0666)
+ err = os.WriteFile("/tmp/ee1", ee1, 0666)
+ if err != nil {
+ return err
+ }
+ err = os.WriteFile("/tmp/ee2", ee2, 0666)
+ if err != nil {
+ return err
+ }
}
if !reflect.DeepEqual(e1, ee1) {
@@ -427,15 +459,7 @@ func RunEncodingTest(t *testing.T, template msgpMarshalUnmarshal) {
t.Skip()
return
}
- if err == nil {
- continue
- }
- // some objects might appen to the original error additional info.
- // we ensure that invalidObject error is not failing the test.
- if errors.As(err, &ErrInvalidObject) {
- continue
- }
require.NoError(t, err)
}
}
diff --git a/protocol/consensus.go b/protocol/consensus.go
index a996525ad5..cd03519fb4 100644
--- a/protocol/consensus.go
+++ b/protocol/consensus.go
@@ -198,6 +198,20 @@ const ConsensusFuture = ConsensusVersion(
"future",
)
+// ConsensusVAlpha1 is the first consensus protocol for AlphaNet, which is the same as
+// v32, but with a 2-second filter timeout and 5M block size.
+const ConsensusVAlpha1 = ConsensusVersion("alpha1")
+
+// ConsensusVAlpha2 is the second consensus protocol for AlphaNet, which increases the
+// filter timeout to 3.5 seconds and uses 5MiB blocks.
+const ConsensusVAlpha2 = ConsensusVersion("alpha2")
+
+// ConsensusVAlpha3 uses the same parameters as ConsensusV33.
+const ConsensusVAlpha3 = ConsensusVersion("alpha3")
+
+// ConsensusVAlpha4 uses the same parameters as ConsensusV34.
+const ConsensusVAlpha4 = ConsensusVersion("alpha4")
+
// !!! ********************* !!!
// !!! *** Please update ConsensusCurrentVersion when adding new protocol versions *** !!!
// !!! ********************* !!!
diff --git a/protocol/transcode/core_test.go b/protocol/transcode/core_test.go
index 132c4270a2..e9cfc42f87 100644
--- a/protocol/transcode/core_test.go
+++ b/protocol/transcode/core_test.go
@@ -20,7 +20,6 @@ import (
"encoding/base32"
"fmt"
"io"
- "io/ioutil"
"testing"
"github.com/stretchr/testify/require"
@@ -56,7 +55,7 @@ func testIdempotentRoundtrip(t *testing.T, mpdata []byte) {
}
p1in.Close()
}()
- res, err := ioutil.ReadAll(p3out)
+ res, err := io.ReadAll(p3out)
require.NoError(t, err)
require.Equal(t, mpdata, res)
diff --git a/rpcs/blockService_test.go b/rpcs/blockService_test.go
index f05c0f6efb..2adb2f1b29 100644
--- a/rpcs/blockService_test.go
+++ b/rpcs/blockService_test.go
@@ -19,7 +19,7 @@ package rpcs
import (
"context"
"fmt"
- "io/ioutil"
+ "io"
"net/http"
"strings"
"testing"
@@ -173,7 +173,7 @@ func TestRedirectFallbackArchiver(t *testing.T) {
require.NoError(t, err)
require.Equal(t, http.StatusOK, response.StatusCode)
- bodyData, err := ioutil.ReadAll(response.Body)
+ bodyData, err := io.ReadAll(response.Body)
require.NoError(t, err)
require.NotEqual(t, 0, len(bodyData))
}
diff --git a/rpcs/txSyncer_test.go b/rpcs/txSyncer_test.go
index d4b0d2fbc5..0d09bb0878 100644
--- a/rpcs/txSyncer_test.go
+++ b/rpcs/txSyncer_test.go
@@ -19,6 +19,7 @@ package rpcs
import (
"context"
"errors"
+ "math/rand"
"net/http"
"net/rpc"
"strings"
@@ -44,9 +45,31 @@ type mockPendingTxAggregate struct {
txns []transactions.SignedTxn
}
+var testSource rand.Source
+var testRand *rand.Rand
+
+func init() {
+ testSource = rand.NewSource(12345678)
+ testRand = rand.New(testSource)
+}
+
+func testRandBytes(d []byte) {
+ // We don't need cryptographically strong random bytes for a
+ // unit test, we _do_ need deterministic 'random' bytes so
+ // that _sometimes_ a bloom filter doesn't fail on the data
+ // (e.g. TestSync() below).
+ n, err := testRand.Read(d)
+ if n != len(d) {
+ panic("short rand read")
+ }
+ if err != nil {
+ panic(err)
+ }
+}
+
func makeMockPendingTxAggregate(txCount int) mockPendingTxAggregate {
var secret [32]byte
- crypto.RandBytes(secret[:])
+ testRandBytes(secret[:])
sk := crypto.GenerateSignatureSecrets(crypto.Seed(secret))
mock := mockPendingTxAggregate{
txns: make([]transactions.SignedTxn, txCount),
@@ -54,7 +77,7 @@ func makeMockPendingTxAggregate(txCount int) mockPendingTxAggregate {
for i := 0; i < txCount; i++ {
var note [16]byte
- crypto.RandBytes(note[:])
+ testRandBytes(note[:])
tx := transactions.Transaction{
Type: protocol.PaymentTx,
Header: transactions.Header{
diff --git a/scripts/build_deb.sh b/scripts/build_deb.sh
index 0cd7c154ae..3c5b1e2198 100755
--- a/scripts/build_deb.sh
+++ b/scripts/build_deb.sh
@@ -74,7 +74,7 @@ for data in "${data_files[@]}"; do
done
if [ ! -z "${RELEASE_GENESIS_PROCESS}" ]; then
- genesis_dirs=("devnet" "testnet" "mainnet" "betanet")
+ genesis_dirs=("devnet" "testnet" "mainnet" "betanet" "alphanet")
for dir in "${genesis_dirs[@]}"; do
mkdir -p "${PKG_ROOT}/var/lib/algorand/genesis/${dir}"
cp "${REPO_DIR}/installer/genesis/${dir}/genesis.json" "${PKG_ROOT}/var/lib/algorand/genesis/${dir}/genesis.json"
@@ -82,7 +82,7 @@ if [ ! -z "${RELEASE_GENESIS_PROCESS}" ]; then
done
# Copy the appropriate network genesis.json for our default (in root ./genesis folder)
cp "${PKG_ROOT}/var/lib/algorand/genesis/${DEFAULT_RELEASE_NETWORK}/genesis.json" "${PKG_ROOT}/var/lib/algorand"
-elif [[ "${CHANNEL}" == "dev" || "${CHANNEL}" == "stable" || "${CHANNEL}" == "nightly" || "${CHANNEL}" == "beta" ]]; then
+elif [[ "${CHANNEL}" == "dev" || "${CHANNEL}" == "stable" || "${CHANNEL}" == "nightly" || "${CHANNEL}" == "beta"|| "${CHANNEL}" == "alpha" ]]; then
cp "${REPO_DIR}/installer/genesis/${DEFAULTNETWORK}/genesis.json" "${PKG_ROOT}/var/lib/algorand/genesis.json"
#${GOPATH}/bin/buildtools genesis ensure -n ${DEFAULTNETWORK} --source ${REPO_DIR}/gen/${DEFAULTNETWORK}/genesis.json --target ${PKG_ROOT}/var/lib/algorand/genesis.json --releasedir ${REPO_DIR}/installer/genesis
else
diff --git a/scripts/build_package.sh b/scripts/build_package.sh
index ccbf0d27e0..d8f169201d 100755
--- a/scripts/build_package.sh
+++ b/scripts/build_package.sh
@@ -88,7 +88,7 @@ done
mkdir ${PKG_ROOT}/genesis
-genesis_dirs=("devnet" "testnet" "mainnet" "betanet")
+genesis_dirs=("devnet" "testnet" "mainnet" "betanet" "alphanet")
for dir in "${genesis_dirs[@]}"; do
mkdir -p ${PKG_ROOT}/genesis/${dir}
if [ -f "${REPO_DIR}/gen/${dir}/genesis.json" ]; then
diff --git a/scripts/buildtools/check_tests.py b/scripts/buildtools/check_tests.py
index d34bb70389..8bf8a66d48 100755
--- a/scripts/buildtools/check_tests.py
+++ b/scripts/buildtools/check_tests.py
@@ -6,8 +6,9 @@
# Arguments parsing / help menu
parser = argparse.ArgumentParser(description='Check test results for intentionally and unintentionally skipped tests, as well as tests that ran multiple times.')
-parser.add_argument('tests_results_filepath', metavar='RESULTS_FILE',
- help='json format test results file path (e.g. /tmp/results/testresults.json)')
+parser.add_argument('--tests-results-filepath', metavar='RESULTS_FILE',
+ help='json format test results file path (e.g. /tmp/results/testresults.json)', required=True)
+parser.add_argument('--ignored-tests', nargs = '*', help='Exact test names to ignore during verification')
args = parser.parse_args()
# Go through the given file one json object at a time, and record into a dict
@@ -17,7 +18,10 @@
testDict = json.loads(jsonObj)
if 'Test' not in testDict:
continue
-
+
+ if args.ignored_tests and testDict['Test'] in args.ignored_tests:
+ continue
+
fullTestName = testDict['Package'] + ' ' + testDict['Test']
if fullTestName not in AllTestResults:
AllTestResults[fullTestName] = {}
diff --git a/scripts/buildtools/install_buildtools.sh b/scripts/buildtools/install_buildtools.sh
index 2e0db63537..7a004851a1 100755
--- a/scripts/buildtools/install_buildtools.sh
+++ b/scripts/buildtools/install_buildtools.sh
@@ -87,9 +87,9 @@ if [[ "${BUILDTOOLS_INSTALL}" != "ALL" ]]; then
exit 0
fi
-install_go_module golang.org/x/lint golang.org/x/lint/golint
install_go_module golang.org/x/tools golang.org/x/tools/cmd/stringer
install_go_module github.com/go-swagger/go-swagger github.com/go-swagger/go-swagger/cmd/swagger
install_go_module github.com/algorand/msgp
install_go_module gotest.tools/gotestsum
install_go_module github.com/algorand/oapi-codegen github.com/algorand/oapi-codegen/cmd/oapi-codegen
+install_go_module github.com/golangci/golangci-lint/cmd/golangci-lint
diff --git a/scripts/buildtools/versions b/scripts/buildtools/versions
index c5a247b036..04960db22f 100644
--- a/scripts/buildtools/versions
+++ b/scripts/buildtools/versions
@@ -4,3 +4,4 @@ github.com/algorand/msgp v1.1.52
github.com/algorand/oapi-codegen v1.3.7
github.com/go-swagger/go-swagger v0.25.0
gotest.tools/gotestsum v1.6.4
+github.com/golangci/golangci-lint/cmd/golangci-lint v1.47.3
diff --git a/scripts/check_deps.sh b/scripts/check_deps.sh
index 4752108a29..a42405733d 100755
--- a/scripts/check_deps.sh
+++ b/scripts/check_deps.sh
@@ -35,9 +35,9 @@ missing_dep() {
}
GO_DEPS=(
- "$GO_BIN/golint"
"$GO_BIN/stringer"
"$GO_BIN/msgp"
+ "$GO_BIN/golangci-lint"
)
check_deps() {
diff --git a/scripts/compute_branch_channel.sh b/scripts/compute_branch_channel.sh
index 90cde3b553..6e8f77eac0 100755
--- a/scripts/compute_branch_channel.sh
+++ b/scripts/compute_branch_channel.sh
@@ -10,6 +10,8 @@ elif [ "$1" = "rel/stable" ]; then
echo "stable"
elif [ "$1" = "rel/beta" ]; then
echo "beta"
+elif [ "$1" = "feature/alphanet" ]; then
+ echo "alpha"
else
echo "dev"
fi
diff --git a/scripts/compute_branch_network.sh b/scripts/compute_branch_network.sh
index 9967463b16..a6a0d8f240 100755
--- a/scripts/compute_branch_network.sh
+++ b/scripts/compute_branch_network.sh
@@ -15,6 +15,9 @@ if [ "${BRANCH}" = "rel/stable" ]; then
elif [ "${BRANCH}" = "rel/beta" ]; then
echo "betanet"
exit 0
+elif [ "${BRANCH}" = "feature/alphanet" ]; then
+ echo "alphanet"
+ exit 0
fi
#get parent of current branch
@@ -26,6 +29,8 @@ if [ "${BRANCHPARENT}" = "rel/stable" ]; then
echo "testnet"
elif [ "${BRANCHPARENT}" = "rel/beta" ]; then
echo "betanet"
+elif [ "${BRANCHPARENT}" = "feature/alphanet" ]; then
+ echo "alphanet"
else
echo "devnet"
fi
diff --git a/scripts/compute_package_name.sh b/scripts/compute_package_name.sh
index 7e53a1351f..0a81ffb2f7 100755
--- a/scripts/compute_package_name.sh
+++ b/scripts/compute_package_name.sh
@@ -10,14 +10,12 @@
CHANNEL=${1:-stable}
NAME=${2:-algorand}
-if [ ! -z ${PACKAGE_NAME_EXTENSION} ]; then
+if [ -n "${PACKAGE_NAME_EXTENSION}" ]; then
NAME="${NAME}-${PACKAGE_NAME_EXTENSION}"
fi
-if [ "$CHANNEL" = beta ]; then
- echo "$NAME-beta"
-elif [ "$CHANNEL" = nightly ]; then
- echo "$NAME-nightly"
-else
+if [ "$CHANNEL" = stable ]; then
echo "$NAME"
+else
+ echo "$NAME-$CHANNEL"
fi
diff --git a/scripts/get_golang_version.sh b/scripts/get_golang_version.sh
index 390847ed4d..4e3525a548 100755
--- a/scripts/get_golang_version.sh
+++ b/scripts/get_golang_version.sh
@@ -11,7 +11,7 @@
# Our build task-runner `mule` will refer to this script and will automatically
# build a new image whenever the version number has been changed.
-BUILD=1.17.9
+BUILD=1.17.13
MIN=1.17
GO_MOD_SUPPORT=1.17
diff --git a/scripts/release/build/deb/build_deb.sh b/scripts/release/build/deb/build_deb.sh
index aeaa4eb307..275c37cd2d 100755
--- a/scripts/release/build/deb/build_deb.sh
+++ b/scripts/release/build/deb/build_deb.sh
@@ -57,7 +57,7 @@ if [[ ! "$PKG_NAME" =~ devtools ]]; then
cp "./installer/$data" "$PKG_ROOT/var/lib/algorand"
done
- genesis_dirs=("devnet" "testnet" "mainnet" "betanet")
+ genesis_dirs=("devnet" "testnet" "mainnet" "betanet" "alphanet")
for dir in "${genesis_dirs[@]}"; do
mkdir -p "$PKG_ROOT/var/lib/algorand/genesis/$dir"
cp "./installer/genesis/$dir/genesis.json" "$PKG_ROOT/var/lib/algorand/genesis/$dir/genesis.json"
diff --git a/scripts/release/mule/common/get_channel.sh b/scripts/release/mule/common/get_channel.sh
index b59937a1a4..d5b82d6409 100755
--- a/scripts/release/mule/common/get_channel.sh
+++ b/scripts/release/mule/common/get_channel.sh
@@ -2,7 +2,10 @@
NETWORK="$1"
-if [ "$NETWORK" = betanet ]
+if [ "$NETWORK" = alphanet ]
+then
+ echo alpha
+elif [ "$NETWORK" = betanet ]
then
echo beta
elif [ "$NETWORK" = mainnet ] || [ "$NETWORK" = testnet ]
diff --git a/scripts/release/mule/deploy/docker/docker.sh b/scripts/release/mule/deploy/docker/docker.sh
index c4125f041c..ee0c55fe0c 100755
--- a/scripts/release/mule/deploy/docker/docker.sh
+++ b/scripts/release/mule/deploy/docker/docker.sh
@@ -13,9 +13,9 @@ if [ -z "$NETWORK" ] || [ -z "$VERSION" ]; then
exit 1
fi
-if [[ ! "$NETWORK" =~ ^mainnet$|^testnet$|^betanet$ ]]
+if [[ ! "$NETWORK" =~ ^mainnet$|^testnet$|^betanet$|^alphanet$ ]]
then
- echo "[$0] Network values must be either \`mainnet\`, \`testnet\` or \`betanet\`."
+ echo "[$0] Network values must be either \`mainnet\`, \`testnet\`, \`betanet\`, or \`alphanet\`."
exit 1
fi
@@ -28,9 +28,9 @@ then
# Build and push testnet.
./build_releases.sh --tagname "$VERSION" --network testnet --cached
-elif [ "$NETWORK" = betanet ]
+elif [ "$NETWORK" = betanet ] || [ "$NETWORK" = alphanet ]
then
- ./build_releases.sh --tagname "$VERSION" --network betanet
+ ./build_releases.sh --tagname "$VERSION" --network "$NETWORK"
fi
popd
diff --git a/scripts/release/mule/package/deb/package.sh b/scripts/release/mule/package/deb/package.sh
index 6cd9f653ea..8a8612a3af 100755
--- a/scripts/release/mule/package/deb/package.sh
+++ b/scripts/release/mule/package/deb/package.sh
@@ -63,7 +63,7 @@ find tmp/node_pkgs -name "*${CHANNEL}*linux*${VERSION}*.tar.gz" | cut -d '/' -f3
cp "installer/$data" "$PKG_ROOT/var/lib/algorand"
done
- genesis_dirs=("devnet" "testnet" "mainnet" "betanet")
+ genesis_dirs=("devnet" "testnet" "mainnet" "betanet" "alphanet")
for dir in "${genesis_dirs[@]}"; do
mkdir -p "$PKG_ROOT/var/lib/algorand/genesis/$dir"
cp "./installer/genesis/$dir/genesis.json" "$PKG_ROOT/var/lib/algorand/genesis/$dir/genesis.json"
diff --git a/scripts/release/mule/test/test.sh b/scripts/release/mule/test/test.sh
index aed2a51107..27efe724ab 100755
--- a/scripts/release/mule/test/test.sh
+++ b/scripts/release/mule/test/test.sh
@@ -30,8 +30,8 @@ export OS_TYPE
export SHA
-ALGORAND_PACKAGE_NAME=$([ "$CHANNEL" = beta ] && echo algorand-beta || echo algorand)
-DEVTOOLS_PACKAGE_NAME=$([ "$CHANNEL" = beta ] && echo algorand-devtools-beta || echo algorand-devtools)
+ALGORAND_PACKAGE_NAME=$(( [ "$CHANNEL" = beta ] && echo algorand-beta ) || ( [ "$CHANNEL" = alpha ] && echo algorand-alpha ) || ( echo algorand ))
+DEVTOOLS_PACKAGE_NAME=$(( [ "$CHANNEL" = beta ] && echo algorand-devtools-beta ) || ( [ "$CHANNEL" = alpha ] && echo algorand-devtools-alpha ) || ( echo algorand-devtools ))
export ALGORAND_PACKAGE_NAME
export DEVTOOLS_PACKAGE_NAME
@@ -61,7 +61,7 @@ then
# so although it appears as though I just lied to you, I did not :)
#
# rpm
- if [ "$CHANNEL" = "beta" ]
+ if [ "$CHANNEL" = "beta" ] || [ "$CHANNEL" = "alpha" ]
then
PACKAGE_NAME_SUFFIX="$CHANNEL-$VERSION-1.$ARCH_BIT"
else
diff --git a/scripts/travis/codegen_verification.sh b/scripts/travis/codegen_verification.sh
index ed0f5825cd..8ba594d7b1 100755
--- a/scripts/travis/codegen_verification.sh
+++ b/scripts/travis/codegen_verification.sh
@@ -27,43 +27,6 @@ eval "$(~/gimme "${GOLANG_VERSION}")"
make gen SHORT_PART_PERIOD=1
-function runGoFmt() {
- unformatted=$(gofmt -l .)
- [ -z "$unformatted" ] && return 0
-
- # Some files are not gofmt'd. Print message and fail.
-
- echo >&2 "Go files must be formatted with gofmt. Please run:"
- for fn in $unformatted; do
- echo >&2 " gofmt -w $PWD/$fn"
- done
-
- return 1
-}
-
-function runGoLint() {
- warningCount=$("$GOPATH"/bin/golint $(go list ./... | grep -v /vendor/ | grep -v /test/e2e-go/) | wc -l | tr -d ' ')
- if [ "${warningCount}" = "0" ]; then
- return 0
- fi
-
- echo >&2 "golint must be clean. Please run the following to list issues(${warningCount}):"
- echo >&2 " make lint"
-
- # run the linter again to output the actual issues
- "$GOPATH"/bin/golint $(go list ./... | grep -v /vendor/ | grep -v /test/e2e-go/) >&2
- return 1
-}
-
-echo "Running go vet..."
-make vet
-
-echo "Running gofmt..."
-runGoFmt
-
-echo "Running golint..."
-runGoLint
-
echo "Running check_license..."
./scripts/check_license.sh
diff --git a/shared/pingpong/accounts.go b/shared/pingpong/accounts.go
index fa8cfecdf0..52d1ab38ac 100644
--- a/shared/pingpong/accounts.go
+++ b/shared/pingpong/accounts.go
@@ -17,8 +17,9 @@
package pingpong
import (
+ "encoding/binary"
"fmt"
- "io/ioutil"
+ "log"
"math/rand"
"os"
"path/filepath"
@@ -34,30 +35,79 @@ import (
"github.com/algorand/go-algorand/data/transactions"
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/libgoal"
- "github.com/algorand/go-algorand/protocol"
"github.com/algorand/go-algorand/util/db"
)
-func (pps *WorkerState) ensureAccounts(ac libgoal.Client, initCfg PpConfig) (accounts map[string]*pingPongAccount, cfg PpConfig, err error) {
- accounts = make(map[string]*pingPongAccount)
- cfg = initCfg
+func deterministicAccounts(initCfg PpConfig) <-chan *crypto.SignatureSecrets {
+ out := make(chan *crypto.SignatureSecrets)
+ if initCfg.GeneratedAccountSampleMethod == "" || initCfg.GeneratedAccountSampleMethod == "random" {
+ go randomDeterministicAccounts(initCfg, out)
+ } else if initCfg.GeneratedAccountSampleMethod == "sequential" {
+ go sequentialDeterministicAccounts(initCfg, out)
+ }
+ return out
+}
+func randomDeterministicAccounts(initCfg PpConfig, out chan *crypto.SignatureSecrets) {
+ numAccounts := initCfg.NumPartAccounts
+ totalAccounts := initCfg.GeneratedAccountsCount
+ if totalAccounts < numAccounts*4 {
+ // simpler rand strategy for smaller totalAccounts
+ order := rand.Perm(int(totalAccounts))[:numAccounts]
+ for _, acct := range order {
+ var seed crypto.Seed
+ binary.LittleEndian.PutUint64(seed[:], uint64(acct))
+ out <- crypto.GenerateSignatureSecrets(seed)
+ }
+ } else {
+ // randomly select numAccounts from generatedAccountsCount
+ // better for generatedAccountsCount much bigger than numAccounts
+ selected := make(map[uint32]bool, numAccounts)
+ for uint32(len(selected)) < numAccounts {
+ acct := uint32(rand.Int31n(int32(totalAccounts)))
+ if selected[acct] {
+ continue // already picked this account
+ }
+ // generate deterministic secret key from integer ID
+ // same uint64 seed used as netdeploy/remote/deployedNetwork.go
+ var seed crypto.Seed
+ binary.LittleEndian.PutUint64(seed[:], uint64(acct))
+ out <- crypto.GenerateSignatureSecrets(seed)
+ selected[acct] = true
+ }
+ }
+ close(out)
+}
+
+func sequentialDeterministicAccounts(initCfg PpConfig, out chan *crypto.SignatureSecrets) {
+ for i := uint32(0); i < initCfg.NumPartAccounts; i++ {
+ acct := uint64(i) + uint64(initCfg.GeneratedAccountsOffset)
+ var seed crypto.Seed
+ binary.LittleEndian.PutUint64(seed[:], uint64(acct))
+ out <- crypto.GenerateSignatureSecrets(seed)
+ }
+}
+
+// load accounts from ${ALGORAND_DATA}/${netname}-${version}/*.rootkey
+func fileAccounts(ac *libgoal.Client) (out <-chan *crypto.SignatureSecrets, err error) {
genID, err2 := ac.GenesisID()
if err2 != nil {
err = err2
return
}
genesisDir := filepath.Join(ac.DataDir(), genID)
- files, err2 := ioutil.ReadDir(genesisDir)
+ files, err2 := os.ReadDir(genesisDir)
if err2 != nil {
err = err2
return
}
- var srcAcctPresent bool
- var richestAccount string
- var richestBalance uint64
+ ch := make(chan *crypto.SignatureSecrets)
+ go enumerateFileAccounts(files, genesisDir, ch)
+ return ch, nil
+}
+func enumerateFileAccounts(files []os.DirEntry, genesisDir string, out chan<- *crypto.SignatureSecrets) {
for _, info := range files {
var handle db.Accessor
@@ -67,7 +117,7 @@ func (pps *WorkerState) ensureAccounts(ac libgoal.Client, initCfg PpConfig) (acc
}
// Fetch a handle to this database
- handle, err = db.MakeErasableAccessor(filepath.Join(genesisDir, info.Name()))
+ handle, err := db.MakeErasableAccessor(filepath.Join(genesisDir, info.Name()))
if err != nil {
// Couldn't open it, skip it
continue
@@ -81,339 +131,304 @@ func (pps *WorkerState) ensureAccounts(ac libgoal.Client, initCfg PpConfig) (acc
continue
}
- publicKey := root.Secrets().SignatureVerifier
- accountAddress := basics.Address(publicKey)
+ out <- root.Secrets()
+ }
+ close(out)
+}
- if accountAddress.String() == cfg.SrcAccount {
- srcAcctPresent = true
- }
+func (pps *WorkerState) ensureAccounts(ac *libgoal.Client) (err error) {
+ if pps.accounts == nil {
+ pps.accounts = make(map[string]*pingPongAccount)
+ }
- amt, err := ac.GetBalance(accountAddress.String())
- if err != nil {
- return nil, PpConfig{}, err
- }
+ if pps.cinfo.OptIns == nil {
+ pps.cinfo.OptIns = make(map[uint64][]string, pps.cfg.NumAsset+pps.cfg.NumApp)
+ }
+ if pps.cinfo.AssetParams == nil {
+ pps.cinfo.AssetParams = make(map[uint64]v1.AssetParams, pps.cfg.NumAsset)
+ }
+ if pps.cinfo.AppParams == nil {
+ pps.cinfo.AppParams = make(map[uint64]v1.AppParams, pps.cfg.NumApp)
+ }
- if !srcAcctPresent && amt > richestBalance {
- richestAccount = accountAddress.String()
- richestBalance = amt
- }
+ sources := make([]<-chan *crypto.SignatureSecrets, 0, 2)
+ // read file accounts for local big source money
+ var fileSource <-chan *crypto.SignatureSecrets
+ fileSource, err = fileAccounts(ac)
+ if err != nil {
+ return
+ }
+ sources = append(sources, fileSource)
+ if pps.cfg.DeterministicKeys {
+ // add deterministic key accounts for re-use across runs
+ detSource := deterministicAccounts(pps.cfg)
+ sources = append(sources, detSource)
+ }
- if !initCfg.Quiet {
- fmt.Printf("Found local account: %s -> %v\n", accountAddress.String(), amt)
- }
+ var srcAcctPresent bool
+ var richestAccount string
+ var richestBalance uint64
+
+ for _, source := range sources {
+ for secret := range source {
+ publicKey := secret.SignatureVerifier
+ accountAddress := basics.Address(publicKey)
+ addr := accountAddress.String()
+
+ if addr == pps.cfg.SrcAccount {
+ srcAcctPresent = true
+ }
+
+ // TODO: switch to v2 API
+ //ai, err := ac.AccountInformationV2(addr, false)
+ ai, err := ac.AccountInformation(addr)
+ if err != nil {
+ return err
+ }
+ amt := ai.Amount
+
+ if !srcAcctPresent && amt > richestBalance {
+ richestAccount = addr
+ richestBalance = amt
+ }
- accounts[accountAddress.String()] = &pingPongAccount{
- balance: amt,
- sk: root.Secrets(),
- pk: accountAddress,
+ ppa := &pingPongAccount{
+ balance: amt,
+ sk: secret,
+ pk: accountAddress,
+ }
+
+ pps.integrateAccountInfo(addr, ppa, ai)
+
+ if !pps.cfg.Quiet {
+ fmt.Printf("Found local account: %s\n", ppa.String())
+ }
+
+ pps.accounts[addr] = ppa
}
}
if !srcAcctPresent {
- if cfg.SrcAccount != "" {
- err = fmt.Errorf("specified Source Account '%s' not found", cfg.SrcAccount)
+ if pps.cfg.SrcAccount != "" {
+ err = fmt.Errorf("specified Source Account '%s' not found", pps.cfg.SrcAccount)
return
}
- if richestBalance >= cfg.MinAccountFunds {
- cfg.SrcAccount = richestAccount
+ if richestBalance >= pps.cfg.MinAccountFunds {
+ pps.cfg.SrcAccount = richestAccount
fmt.Printf("Identified richest account to use for Source Account: %s -> %v\n", richestAccount, richestBalance)
} else {
- err = fmt.Errorf("no accounts found with sufficient stake (> %d)", cfg.MinAccountFunds)
+ err = fmt.Errorf("no accounts found with sufficient stake (> %d)", pps.cfg.MinAccountFunds)
return
}
} else {
- fmt.Printf("Located Source Account: %s -> %v\n", cfg.SrcAccount, accounts[cfg.SrcAccount])
+ fmt.Printf("Located Source Account: %s -> %v\n", pps.cfg.SrcAccount, pps.accounts[pps.cfg.SrcAccount])
}
return
}
-// Prepare assets for asset transaction testing
-// Step 1) Create X assets for each of the participant accounts
-// Step 2) For each participant account, opt-in to assets of all other participant accounts
-// Step 3) Evenly distribute the assets across all participant accounts
-func (pps *WorkerState) prepareAssets(accounts map[string]*pingPongAccount, client libgoal.Client) (resultAssetMaps map[uint64]v1.AssetParams, optIns map[uint64][]string, err error) {
- proto, err := getProto(client)
- if err != nil {
- return
+func (pps *WorkerState) integrateAccountInfo(addr string, ppa *pingPongAccount, ai v1.Account) {
+ ppa.balance = ai.Amount
+ // assets this account has created
+ for assetID, ap := range ai.AssetParams {
+ pps.cinfo.OptIns[assetID] = uniqueAppend(pps.cinfo.OptIns[assetID], addr)
+ pps.cinfo.AssetParams[assetID] = ap
}
+ // assets held
+ for assetID, holding := range ai.Assets {
+ pps.cinfo.OptIns[assetID] = uniqueAppend(pps.cinfo.OptIns[assetID], addr)
+ if ppa.holdings == nil {
+ ppa.holdings = make(map[uint64]uint64)
+ }
+ ppa.holdings[assetID] = holding.Amount
+ }
+ // apps created by this account
+ for appID, ap := range ai.AppParams {
+ pps.cinfo.OptIns[appID] = uniqueAppend(pps.cinfo.OptIns[appID], addr)
+ pps.cinfo.AppParams[appID] = ap
+ }
+ // apps opted into
+ for appID := range ai.AppLocalStates {
+ pps.cinfo.OptIns[appID] = uniqueAppend(pps.cinfo.OptIns[appID], addr)
+ }
+}
- resultAssetMaps = make(map[uint64]v1.AssetParams)
+type assetopti struct {
+ assetID uint64
+ params v1.AssetParams // TODO: switch to v2 API
+ optins []string // addr strings
+}
- // optIns contains own and explicitly opted-in assets
- optIns = make(map[uint64][]string)
- numCreatedAssetsByAddr := make(map[string]int, len(accounts))
+type assetSet []assetopti
- nextSendTime := time.Now()
+// Len is part of sort.Interface
+func (as *assetSet) Len() int {
+ return len(*as)
+}
- // 1) Create X assets for each of the participant accounts
- for addr := range accounts {
- if addr == pps.cfg.SrcAccount {
- continue
- }
- addrAccount, addrErr := client.AccountInformation(addr)
- if addrErr != nil {
- fmt.Printf("Cannot lookup source account %v\n", addr)
- err = addrErr
- return
- }
+// Less is part of sort.Interface
+// This is a reversed sort, higher values first
+func (as *assetSet) Less(a, b int) bool {
+ return len((*as)[a].optins) > len((*as)[b].optins)
+}
- toCreate := int(pps.cfg.NumAsset) - len(addrAccount.AssetParams)
- numCreatedAssetsByAddr[addr] = toCreate
+// Swap is part of sort.Interface
+func (as *assetSet) Swap(a, b int) {
+ t := (*as)[a]
+ (*as)[a] = (*as)[b]
+ (*as)[b] = t
+}
- fmt.Printf("Creating %v create asset transaction for account %v \n", toCreate, addr)
- fmt.Printf("cfg.NumAsset %v, addrAccount.AssetParams %v\n", pps.cfg.NumAsset, addrAccount.AssetParams)
+func (pps *WorkerState) prepareAssets(client *libgoal.Client) (err error) {
+ if pps.cinfo.AssetParams == nil {
+ pps.cinfo.AssetParams = make(map[uint64]v1.AssetParams)
+ }
+ if pps.cinfo.OptIns == nil {
+ pps.cinfo.OptIns = make(map[uint64][]string)
+ }
- totalSupply := pps.cfg.MinAccountAsset * uint64(pps.cfg.NumPartAccounts) * 9 * uint64(pps.cfg.GroupSize) * uint64(pps.cfg.RefreshTime.Seconds()) / pps.cfg.TxnPerSec
+ // create new assets as needed
+ err = pps.makeNewAssets(client)
+ if err != nil {
+ return
+ }
- // create assets in participant account
- for i := 0; i < toCreate; i++ {
- var metaLen = 32
- meta := make([]byte, metaLen)
- crypto.RandBytes(meta[:])
+ // find the most-opted-in assets to work with
+ assets := make([]assetopti, len(pps.cinfo.AssetParams))
+ pos := 0
+ for assetID, params := range pps.cinfo.AssetParams {
+ assets[pos].assetID = assetID
+ assets[pos].params = params
+ assets[pos].optins = pps.cinfo.OptIns[assetID]
+ pos++
+ }
+ ta := assetSet(assets)
+ sort.Sort(&ta)
+ if len(assets) > int(pps.cfg.NumAsset) {
+ assets = assets[:pps.cfg.NumAsset]
+ nap := make(map[uint64]v1.AssetParams, pps.cfg.NumAsset)
+ for _, asset := range assets {
+ nap[asset.assetID] = asset.params
+ }
+ pps.cinfo.AssetParams = nap
+ }
+
+ // opt-in more accounts as needed
+ for assetID := range pps.cinfo.AssetParams {
+ for addr, acct := range pps.accounts {
+ _, has := acct.holdings[assetID]
+ if !has {
+ tx, sendErr := client.MakeUnsignedAssetSendTx(assetID, 0, addr, "", "")
+ if sendErr != nil {
+ fmt.Printf("Cannot initiate asset optin %v in account %v\n", assetID, addr)
+ err = sendErr
+ continue
+ }
- if totalSupply < pps.cfg.MinAccountAsset { // overflow
- fmt.Printf("Too many NumPartAccounts\n")
- return
- }
- assetName := fmt.Sprintf("pong%d", i)
- if !pps.cfg.Quiet {
- fmt.Printf("Creating asset %s\n", assetName)
- }
- tx, createErr := client.MakeUnsignedAssetCreateTx(totalSupply, false, addr, addr, addr, addr, "ping", assetName, "", meta, 0)
- if createErr != nil {
- fmt.Printf("Cannot make asset create txn with meta %v\n", meta)
- err = createErr
- return
- }
- tx, err = client.FillUnsignedTxTemplate(addr, 0, 0, pps.cfg.MaxFee, tx)
- if err != nil {
- fmt.Printf("Cannot fill asset creation txn\n")
- return
- }
- tx.Note = pps.makeNextUniqueNoteField()
- schedule(pps.cfg.TxnPerSec, &nextSendTime)
- _, err = signAndBroadcastTransaction(accounts[addr], tx, client)
- if err != nil {
- _, _ = fmt.Fprintf(os.Stderr, "signing and broadcasting asset creation failed with error %v\n", err)
- return
- }
- }
- }
+ tx, err = client.FillUnsignedTxTemplate(addr, 0, 0, pps.cfg.MaxFee, tx)
+ if err != nil {
+ fmt.Printf("Cannot fill asset optin %v in account %v\n", assetID, addr)
+ continue
+ }
+ tx.Note = pps.makeNextUniqueNoteField()
- // wait until all the assets created
- allAssets := make(map[uint64]string, int(pps.cfg.NumAsset)*len(accounts))
- for addr := range accounts {
- if addr == pps.cfg.SrcAccount {
- continue
- }
- var account v1.Account
- deadline := time.Now().Add(3 * time.Minute)
- for {
- account, err = client.AccountInformation(addr)
- if err != nil {
- fmt.Printf("Warning: cannot lookup source account after assets creation")
- time.Sleep(1 * time.Second)
- continue
- }
- if len(account.AssetParams) >= numCreatedAssetsByAddr[addr] {
- break
- }
- if time.Now().After(deadline) {
- err = fmt.Errorf("asset creation took too long")
- fmt.Printf("Error: %s\n", err.Error())
- return
+ pps.schedule(1)
+ _, err = signAndBroadcastTransaction(acct, tx, client)
+ if err != nil {
+ _, _ = fmt.Fprintf(os.Stderr, "signing and broadcasting asset optin failed with error %v\n", err)
+ continue
+ }
+ pps.cinfo.OptIns[assetID] = uniqueAppend(pps.cinfo.OptIns[assetID], addr)
}
- waitForNextRoundOrSleep(client, 500*time.Millisecond)
- }
- assetParams := account.AssetParams
- if !pps.cfg.Quiet {
- fmt.Printf("Configured %d assets %+v\n", len(assetParams), assetParams)
- }
- // add own asset to opt-ins since asset creators are auto-opted in
- for k := range account.AssetParams {
- optIns[k] = append(optIns[k], addr)
- allAssets[k] = addr
}
}
- // optInsByAddr tracks only explicitly opted-in assetsA
- optInsByAddr := make(map[string]map[uint64]bool)
+ // Could distribute value here, but just waits till constructAssetTxn()
+ return
+}
- // 2) For each participant account, opt-in up to proto.MaxAssetsPerAccount assets of all other participant accounts
- for addr := range accounts {
- if addr == pps.cfg.SrcAccount {
- continue
+const totalSupply = 10_000_000_000_000_000
+
+func (pps *WorkerState) makeNewAssets(client *libgoal.Client) (err error) {
+ if len(pps.cinfo.AssetParams) >= int(pps.cfg.NumAsset) {
+ return
+ }
+ assetsNeeded := int(pps.cfg.NumAsset) - len(pps.cinfo.AssetParams)
+ newAssetAddrs := make(map[string]*pingPongAccount, assetsNeeded)
+ for addr, acct := range pps.accounts {
+ if assetsNeeded <= 0 {
+ break
}
+ assetsNeeded--
+ var meta [32]byte
+ crypto.RandBytes(meta[:])
+ assetName := fmt.Sprintf("pong%d_%d", len(pps.cinfo.AssetParams), rand.Intn(8999)+1000)
if !pps.cfg.Quiet {
- fmt.Printf("Opting to account %v\n", addr)
+ fmt.Printf("Creating asset %s\n", assetName)
}
-
- acct, addrErr := client.AccountInformation(addr)
- if addrErr != nil {
- fmt.Printf("Cannot lookup optin account\n")
- err = addrErr
+ tx, createErr := client.MakeUnsignedAssetCreateTx(totalSupply, false, addr, addr, addr, addr, "ping", assetName, "", meta[:], 0)
+ if createErr != nil {
+ fmt.Printf("Cannot make asset create txn with meta %v\n", meta)
+ err = createErr
return
}
- maxAssetsPerAccount := proto.MaxAssetsPerAccount
- // TODO : given that we've added unlimited asset support, we should revise this
- // code so that we'll have control on how many asset/account we want to create.
- // for now, I'm going to keep the previous max values until we have refactored this code.
- if maxAssetsPerAccount == 0 {
- maxAssetsPerAccount = config.Consensus[protocol.ConsensusV30].MaxAssetsPerAccount
- }
- numSlots := maxAssetsPerAccount - len(acct.Assets)
- optInsByAddr[addr] = make(map[uint64]bool)
- for k, creator := range allAssets {
- if creator == addr {
- continue
- }
- // do we have any more asset slots for this?
- if numSlots <= 0 {
- break
- }
- numSlots--
-
- // opt-in asset k for addr
- tx, sendErr := client.MakeUnsignedAssetSendTx(k, 0, addr, "", "")
- if sendErr != nil {
- fmt.Printf("Cannot initiate asset optin %v in account %v\n", k, addr)
- err = sendErr
- return
- }
-
- tx, err = client.FillUnsignedTxTemplate(addr, 0, 0, pps.cfg.MaxFee, tx)
- if err != nil {
- fmt.Printf("Cannot fill asset optin %v in account %v\n", k, addr)
- return
- }
- tx.Note = pps.makeNextUniqueNoteField()
-
- schedule(pps.cfg.TxnPerSec, &nextSendTime)
- _, err = signAndBroadcastTransaction(accounts[addr], tx, client)
- if err != nil {
- _, _ = fmt.Fprintf(os.Stderr, "signing and broadcasting asset optin failed with error %v\n", err)
- return
- }
- optIns[k] = append(optIns[k], addr)
- optInsByAddr[addr][k] = true
+ tx, err = client.FillUnsignedTxTemplate(addr, 0, 0, pps.cfg.MaxFee, tx)
+ if err != nil {
+ fmt.Printf("Cannot fill asset creation txn\n")
+ return
}
- }
-
- // wait until all opt-ins completed
- waitForNextRoundOrSleep(client, 500*time.Millisecond)
- for addr := range accounts {
- if addr == pps.cfg.SrcAccount {
- continue
+ tx.Note = pps.makeNextUniqueNoteField()
+ pps.schedule(1)
+ _, err = signAndBroadcastTransaction(pps.accounts[addr], tx, client)
+ if err != nil {
+ _, _ = fmt.Fprintf(os.Stderr, "signing and broadcasting asset creation failed with error %v\n", err)
+ return
}
- expectedAssets := numCreatedAssetsByAddr[addr] + len(optInsByAddr[addr])
- var account v1.Account
- deadline := time.Now().Add(3 * time.Minute)
- for {
- account, err = client.AccountInformation(addr)
+ newAssetAddrs[addr] = acct
+ }
+ // wait for new assets to be created, fetch account data for them
+ newAssets := make(map[uint64]v1.AssetParams, assetsNeeded)
+ timeout := time.Now().Add(10 * time.Second)
+ for len(newAssets) < assetsNeeded {
+ for addr, acct := range newAssetAddrs {
+ // TODO: switch to v2 API
+ ai, err := client.AccountInformation(addr)
if err != nil {
- fmt.Printf("Warning: cannot lookup source account after assets opt in")
+ fmt.Printf("Warning: cannot lookup source account after assets creation")
time.Sleep(1 * time.Second)
continue
}
- if len(account.Assets) == expectedAssets {
- break
- } else if len(account.Assets) > expectedAssets {
- err = fmt.Errorf("account %v has too many assets %d > %d ", addr, len(account.Assets), expectedAssets)
- return
- }
-
- if time.Now().After(deadline) {
- err = fmt.Errorf("asset opting in took too long")
- fmt.Printf("Error: %s\n", err.Error())
- return
- }
- waitForNextRoundOrSleep(client, 500*time.Millisecond)
- }
- }
-
- // Step 3) Evenly distribute the assets across all opted-in accounts
- for k, creator := range allAssets {
- if !pps.cfg.Quiet {
- fmt.Printf("Distributing asset %+v from account %v\n", k, creator)
- }
- creatorAccount, creatorErr := client.AccountInformation(creator)
- if creatorErr != nil {
- fmt.Printf("Cannot lookup source account\n")
- err = creatorErr
- return
- }
- assetParams := creatorAccount.AssetParams
-
- for _, addr := range optIns[k] {
- assetAmt := assetParams[k].Total / uint64(len(optIns[k]))
- if !pps.cfg.Quiet {
- fmt.Printf("Distributing assets from %v to %v \n", creator, addr)
- }
-
- tx, sendErr := client.MakeUnsignedAssetSendTx(k, assetAmt, addr, "", "")
- if sendErr != nil {
- _, _ = fmt.Fprintf(os.Stdout, "error making unsigned asset send tx %v\n", sendErr)
- err = fmt.Errorf("error making unsigned asset send tx : %w", sendErr)
- return
- }
- tx.Note = pps.makeNextUniqueNoteField()
- tx, sendErr = client.FillUnsignedTxTemplate(creator, 0, 0, pps.cfg.MaxFee, tx)
- if sendErr != nil {
- _, _ = fmt.Fprintf(os.Stdout, "error making unsigned asset send tx %v\n", sendErr)
- err = fmt.Errorf("error making unsigned asset send tx : %w", sendErr)
- return
- }
- tx.LastValid = tx.FirstValid + 5
- if pps.cfg.MaxFee == 0 {
- var suggestedFee uint64
- suggestedFee, err = client.SuggestedFee()
- if err != nil {
- _, _ = fmt.Fprintf(os.Stdout, "error retrieving suggestedFee: %v\n", err)
- return
- }
- if suggestedFee > tx.Fee.Raw {
- tx.Fee.Raw = suggestedFee
+ for assetID, ap := range ai.AssetParams {
+ pps.cinfo.OptIns[assetID] = uniqueAppend(pps.cinfo.OptIns[assetID], addr)
+ _, has := pps.cinfo.AssetParams[assetID]
+ if !has {
+ newAssets[assetID] = ap
}
}
-
- schedule(pps.cfg.TxnPerSec, &nextSendTime)
- _, err = signAndBroadcastTransaction(accounts[creator], tx, client)
- if err != nil {
- _, _ = fmt.Fprintf(os.Stderr, "signing and broadcasting asset distribution failed with error %v\n", err)
- return
+ for assetID, holding := range ai.Assets {
+ pps.cinfo.OptIns[assetID] = uniqueAppend(pps.cinfo.OptIns[assetID], addr)
+ if acct.holdings == nil {
+ acct.holdings = make(map[uint64]uint64)
+ }
+ acct.holdings[assetID] = holding.Amount
}
}
- // append the asset to the result assets
- resultAssetMaps[k] = assetParams[k]
- }
-
- // wait for all transfers acceptance
- waitForNextRoundOrSleep(client, 500*time.Millisecond)
- deadline := time.Now().Add(3 * time.Minute)
- var pending v1.PendingTransactions
- for {
- pending, err = client.GetPendingTransactions(100)
- if err != nil {
- fmt.Printf("Warning: cannot get pending txn")
- time.Sleep(1 * time.Second)
- continue
- }
- if pending.TotalTxns == 0 {
+ if time.Now().After(timeout) {
+ // complain, but try to keep running on what assets we have
+ log.Printf("WARNING took too long to create new assets")
+ // TODO: error?
break
}
- if time.Now().After(deadline) {
- fmt.Printf("Warning: assets distribution took too long")
- break
- }
- waitForNextRoundOrSleep(client, 500*time.Millisecond)
}
- return
+ for assetID, ap := range newAssets {
+ pps.cinfo.AssetParams[assetID] = ap
+ }
+ return nil
}
-func signAndBroadcastTransaction(senderAccount *pingPongAccount, tx transactions.Transaction, client libgoal.Client) (txID string, err error) {
+func signAndBroadcastTransaction(senderAccount *pingPongAccount, tx transactions.Transaction, client *libgoal.Client) (txID string, err error) {
signedTx := tx.Sign(senderAccount.sk)
txID, err = client.BroadcastTransaction(signedTx)
if err != nil {
@@ -581,7 +596,7 @@ func genAppProgram(numOps uint32, numHashes uint32, hashSize string, numGlobalKe
return ops.Program, progAsm
}
-func waitForNextRoundOrSleep(client libgoal.Client, waitTime time.Duration) {
+func waitForNextRoundOrSleep(client *libgoal.Client, waitTime time.Duration) {
status, err := client.Status()
if err == nil {
status, err = client.WaitForRound(status.LastRound)
@@ -592,7 +607,7 @@ func waitForNextRoundOrSleep(client libgoal.Client, waitTime time.Duration) {
time.Sleep(waitTime)
}
-func (pps *WorkerState) sendAsGroup(txgroup []transactions.Transaction, client libgoal.Client, senders []string) (err error) {
+func (pps *WorkerState) sendAsGroup(txgroup []transactions.Transaction, client *libgoal.Client, senders []string) (err error) {
if len(txgroup) == 0 {
err = fmt.Errorf("sendAsGroup: empty group")
return
@@ -624,7 +639,7 @@ repeat:
var proto *config.ConsensusParams
-func getProto(client libgoal.Client) (config.ConsensusParams, error) {
+func getProto(client *libgoal.Client) (config.ConsensusParams, error) {
if proto == nil {
var err error
status, err := client.Status()
@@ -641,207 +656,136 @@ func getProto(client libgoal.Client) (config.ConsensusParams, error) {
return *proto, nil
}
-func (pps *WorkerState) prepareApps(accounts map[string]*pingPongAccount, client libgoal.Client, cfg PpConfig) (appParams map[uint64]v1.AppParams, optIns map[uint64][]string, err error) {
- proto, err := getProto(client)
- if err != nil {
- return
+// ensure that cfg.NumPartAccounts have cfg.NumAppOptIn opted in selecting from cfg.NumApp
+func (pps *WorkerState) prepareApps(client *libgoal.Client) (err error) {
+ if pps.cinfo.AppParams == nil {
+ pps.cinfo.AppParams = make(map[uint64]v1.AppParams)
}
- toCreate := int(cfg.NumApp)
- appsPerAcct := proto.MaxAppsCreated
- // TODO : given that we've added unlimited app support, we should revise this
- // code so that we'll have control on how many app/account we want to create.
- // for now, I'm going to keep the previous max values until we have refactored this code.
- if appsPerAcct == 0 {
- appsPerAcct = config.Consensus[protocol.ConsensusV30].MaxAppsCreated
+ if pps.cinfo.OptIns == nil {
+ pps.cinfo.OptIns = make(map[uint64][]string, pps.cfg.NumAsset+pps.cfg.NumApp)
}
- // create min(groupSize, maxAppsPerAcct) per account to optimize sending in batches
- groupSize := proto.MaxTxGroupSize
- if appsPerAcct > groupSize {
- appsPerAcct = groupSize
- }
-
- acctNeeded := toCreate / appsPerAcct
- if toCreate%appsPerAcct != 0 {
- acctNeeded++
- }
- if acctNeeded >= len(accounts) { // >= because cfg.SrcAccount is skipped
- err = fmt.Errorf("need %d accts to create %d apps but got only %d accts", acctNeeded, toCreate, len(accounts))
- return
- }
- maxOptIn := uint32(config.Consensus[protocol.ConsensusCurrentVersion].MaxAppsOptedIn)
- if maxOptIn > 0 && cfg.NumAppOptIn > maxOptIn {
- err = fmt.Errorf("each acct can only opt in to %d but %d requested", maxOptIn, cfg.NumAppOptIn)
- return
- }
-
- appAccounts := make([]v1.Account, len(accounts))
- accountsCount := 0
- for acctAddr := range accounts {
- if acctAddr == cfg.SrcAccount {
- continue
+ // generate new apps
+ var txgroup []transactions.Transaction
+ var senders []string
+ for addr, acct := range pps.accounts {
+ if len(pps.cinfo.AppParams) >= int(pps.cfg.NumApp) {
+ break
}
- appAccounts[accountsCount], err = client.AccountInformation(acctAddr)
+ var tx transactions.Transaction
+ tx, err = pps.newApp(addr, client)
if err != nil {
- fmt.Printf("Warning, cannot lookup acctAddr account %s", acctAddr)
return
}
- accountsCount++
- if accountsCount == acctNeeded {
- break
+ acct.addBalance(-int64(pps.cfg.MaxFee))
+ txgroup = append(txgroup, tx)
+ senders = append(senders, addr)
+ if len(txgroup) == int(pps.cfg.GroupSize) {
+ pps.schedule(len(txgroup))
+ err = pps.sendAsGroup(txgroup, client, senders)
+ if err != nil {
+ return
+ }
+ txgroup = txgroup[:0]
+ senders = senders[:0]
}
}
- appAccounts = appAccounts[:accountsCount]
-
- if !cfg.Quiet {
- fmt.Printf("Selected temp account:\n")
- for _, acct := range appAccounts {
- fmt.Printf("%s\n", acct.Address)
+ if len(txgroup) > 0 {
+ pps.schedule(len(txgroup))
+ err = pps.sendAsGroup(txgroup, client, senders)
+ if err != nil {
+ return
}
+ txgroup = txgroup[:0]
+ senders = senders[:0]
}
- // generate app program with roughly some number of operations
- prog, asm := genAppProgram(cfg.AppProgOps, cfg.AppProgHashes, cfg.AppProgHashSize, cfg.AppGlobKeys, cfg.AppLocalKeys)
- if !cfg.Quiet {
- fmt.Printf("generated program: \n%s\n", asm)
- }
- globSchema := basics.StateSchema{NumByteSlice: proto.MaxGlobalSchemaEntries}
- locSchema := basics.StateSchema{NumByteSlice: proto.MaxLocalSchemaEntries}
-
- // for each account, store the number of expected applications.
- accountsApplicationCount := make(map[string]int)
-
- // create apps
- for idx, appAccount := range appAccounts {
- begin := idx * appsPerAcct
- end := (idx + 1) * appsPerAcct
- if end > toCreate {
- end = toCreate
- }
-
- var txgroup []transactions.Transaction
- var senders []string
- for i := begin; i < end; i++ {
+ // opt-in more accounts to apps
+ acctPerApp := (pps.cfg.NumAppOptIn * pps.cfg.NumPartAccounts) / pps.cfg.NumApp
+ for appid := range pps.cinfo.AppParams {
+ optins := pps.cinfo.OptIns[appid]
+ for addr, acct := range pps.accounts {
+ if len(optins) >= int(acctPerApp) {
+ break
+ }
+ // opt-in the account to the app
var tx transactions.Transaction
-
- tx, err = client.MakeUnsignedAppCreateTx(transactions.NoOpOC, prog, prog, globSchema, locSchema, nil, nil, nil, nil, 0)
+ tx, err = pps.appOptIn(addr, appid, client)
if err != nil {
- fmt.Printf("Cannot create app txn\n")
- panic(err)
- // TODO : if we fail here for too long, we should re-create new accounts, etc.
+ return
}
-
- tx, err = client.FillUnsignedTxTemplate(appAccount.Address, 0, 0, cfg.MaxFee, tx)
- if err != nil {
- fmt.Printf("Cannot fill app creation txn\n")
- panic(err)
- // TODO : if we fail here for too long, we should re-create new accounts, etc.
+ acct.addBalance(-int64(pps.cfg.MaxFee))
+ txgroup = append(txgroup, tx)
+ senders = append(senders, addr)
+ if len(txgroup) == int(pps.cfg.GroupSize) {
+ pps.schedule(len(txgroup))
+ err = pps.sendAsGroup(txgroup, client, senders)
+ if err != nil {
+ return
+ }
+ txgroup = txgroup[:0]
+ senders = senders[:0]
}
- // Ensure different txids
- tx.Note = pps.makeNextUniqueNoteField()
-
- txgroup = append(txgroup, tx)
- accounts[appAccount.Address].addBalance(-int64(tx.Fee.Raw))
- senders = append(senders, appAccount.Address)
- accountsApplicationCount[appAccount.Address]++
}
-
+ }
+ if len(txgroup) > 0 {
+ pps.schedule(len(txgroup))
err = pps.sendAsGroup(txgroup, client, senders)
if err != nil {
- balance, err2 := client.GetBalance(appAccount.Address)
- if err2 == nil {
- fmt.Printf("account %v balance is %d, logged balance is %d\n", appAccount.Address, balance, accounts[appAccount.Address].getBalance())
- } else {
- fmt.Printf("account %v balance cannot be determined : %v\n", appAccount.Address, err2)
- }
return
}
- if !cfg.Quiet {
- fmt.Printf("Created new %d apps\n", len(txgroup))
- }
+ //txgroup = txgroup[:0]
+ //senders = senders[:0]
}
+ return
+}
- // get these apps
- var aidxs []uint64
- appParams = make(map[uint64]v1.AppParams)
- for _, appAccount := range appAccounts {
- var account v1.Account
- for {
- account, err = client.AccountInformation(appAccount.Address)
- if err != nil {
- fmt.Printf("Warning, cannot lookup source account")
- return
- }
- if len(account.AppParams) >= accountsApplicationCount[appAccount.Address] {
- break
- }
- waitForNextRoundOrSleep(client, 500*time.Millisecond)
- // TODO : if we fail here for too long, we should re-create new accounts, etc.
- }
- for idx, v := range account.AppParams {
- appParams[idx] = v
- aidxs = append(aidxs, idx)
- }
+func (pps *WorkerState) newApp(addr string, client *libgoal.Client) (tx transactions.Transaction, err error) {
+ // generate app program with roughly some number of operations
+ prog, asm := genAppProgram(pps.cfg.AppProgOps, pps.cfg.AppProgHashes, pps.cfg.AppProgHashSize, pps.cfg.AppGlobKeys, pps.cfg.AppLocalKeys)
+ if !pps.cfg.Quiet {
+ fmt.Printf("generated program: \n%s\n", asm)
}
- if len(aidxs) != len(appParams) {
- err = fmt.Errorf("duplicates in aidxs, %d != %d", len(aidxs), len(appParams))
- return
+ globSchema := basics.StateSchema{NumByteSlice: proto.MaxGlobalSchemaEntries}
+ locSchema := basics.StateSchema{NumByteSlice: proto.MaxLocalSchemaEntries}
+
+ tx, err = client.MakeUnsignedAppCreateTx(transactions.NoOpOC, prog, prog, globSchema, locSchema, nil, nil, nil, nil, 0)
+ if err != nil {
+ fmt.Printf("Cannot create app txn\n")
+ panic(err)
+ // TODO : if we fail here for too long, we should re-create new accounts, etc.
}
- // time to opt in to these apps
- if cfg.NumAppOptIn > 0 {
- optIns = make(map[uint64][]string)
- for addr := range accounts {
- if addr == cfg.SrcAccount {
- continue
- }
- var txgroup []transactions.Transaction
- var senders []string
- permAppIndices := rand.Perm(len(aidxs))
- for i := uint32(0); i < cfg.NumAppOptIn; i++ {
- j := permAppIndices[i]
- aidx := aidxs[j]
- var tx transactions.Transaction
- tx, err = client.MakeUnsignedAppOptInTx(aidx, nil, nil, nil, nil)
- if err != nil {
- fmt.Printf("Cannot create app txn\n")
- panic(err)
- }
+ tx, err = client.FillUnsignedTxTemplate(addr, 0, 0, pps.cfg.MaxFee, tx)
+ if err != nil {
+ fmt.Printf("Cannot fill app creation txn\n")
+ panic(err)
+ // TODO : if we fail here for too long, we should re-create new accounts, etc.
+ }
- tx, err = client.FillUnsignedTxTemplate(addr, 0, 0, cfg.MaxFee, tx)
- if err != nil {
- fmt.Printf("Cannot fill app creation txn\n")
- panic(err)
- }
+ // Ensure different txids
+ tx.Note = pps.makeNextUniqueNoteField()
- // Ensure different txids
- tx.Note = pps.makeNextUniqueNoteField()
+ return tx, err
+}
- optIns[aidx] = append(optIns[aidx], addr)
-
- txgroup = append(txgroup, tx)
- senders = append(senders, addr)
- if len(txgroup) == groupSize {
- err = pps.sendAsGroup(txgroup, client, senders)
- if err != nil {
- return
- }
- txgroup = txgroup[:0]
- senders = senders[:0]
- }
- }
- // broadcast leftovers
- if len(txgroup) > 0 {
- err = pps.sendAsGroup(txgroup, client, senders)
- if err != nil {
- return
- }
- }
- }
+func (pps *WorkerState) appOptIn(addr string, appID uint64, client *libgoal.Client) (tx transactions.Transaction, err error) {
+ tx, err = client.MakeUnsignedAppOptInTx(appID, nil, nil, nil, nil)
+ if err != nil {
+ fmt.Printf("Cannot create app txn\n")
+ panic(err)
}
+ tx, err = client.FillUnsignedTxTemplate(addr, 0, 0, pps.cfg.MaxFee, tx)
+ if err != nil {
+ fmt.Printf("Cannot fill app creation txn\n")
+ panic(err)
+ }
+
+ // Ensure different txids
+ tx.Note = pps.makeNextUniqueNoteField()
return
}
@@ -872,17 +816,28 @@ func takeTopAccounts(allAccounts map[string]*pingPongAccount, numAccounts uint32
return
}
-func generateAccounts(allAccounts map[string]*pingPongAccount, numAccounts uint32) {
+// generate random ephemeral accounts
+// TODO: don't do this and _always_ use the deterministic account mechanism?
+func (pps *WorkerState) generateAccounts() {
var seed crypto.Seed
- for accountsRequired := int(numAccounts+1) - len(allAccounts); accountsRequired > 0; accountsRequired-- {
+ for accountsRequired := int(pps.cfg.NumPartAccounts+1) - len(pps.accounts); accountsRequired > 0; accountsRequired-- {
crypto.RandBytes(seed[:])
privateKey := crypto.GenerateSignatureSecrets(seed)
publicKey := basics.Address(privateKey.SignatureVerifier)
- allAccounts[publicKey.String()] = &pingPongAccount{
+ pps.accounts[publicKey.String()] = &pingPongAccount{
sk: privateKey,
pk: publicKey,
}
}
}
+
+func uniqueAppend(they []string, x string) []string {
+ for _, v := range they {
+ if v == x {
+ return they
+ }
+ }
+ return append(they, x)
+}
diff --git a/shared/pingpong/accounts_test.go b/shared/pingpong/accounts_test.go
new file mode 100644
index 0000000000..7f2f0a737b
--- /dev/null
+++ b/shared/pingpong/accounts_test.go
@@ -0,0 +1,60 @@
+// Copyright (C) 2019-2022 Algorand, Inc.
+// This file is part of go-algorand
+//
+// go-algorand is free software: you can redistribute it and/or modify
+// it under the terms of the GNU Affero General Public License as
+// published by the Free Software Foundation, either version 3 of the
+// License, or (at your option) any later version.
+//
+// go-algorand is distributed in the hope that it will be useful,
+// but WITHOUT ANY WARRANTY; without even the implied warranty of
+// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+// GNU Affero General Public License for more details.
+//
+// You should have received a copy of the GNU Affero General Public License
+// along with go-algorand. If not, see .
+
+package pingpong
+
+import (
+ "encoding/binary"
+ "testing"
+
+ "github.com/algorand/go-algorand/crypto"
+ "github.com/algorand/go-algorand/data/basics"
+ "github.com/stretchr/testify/assert"
+)
+
+func makeKeyFromSeed(i uint64) *crypto.SignatureSecrets {
+ var seed crypto.Seed
+ binary.LittleEndian.PutUint64(seed[:], i)
+ s := crypto.GenerateSignatureSecrets(seed)
+ return s
+}
+
+func TestDeterministicAccounts(t *testing.T) {
+ initCfg := PpConfig{
+ NumPartAccounts: 20,
+ DeterministicKeys: true,
+ GeneratedAccountsCount: 100,
+ }
+
+ // created expected set of keys in a similar way as netgoal generate --deterministic
+ expectedPubKeys := make(map[crypto.PublicKey]*crypto.SignatureSecrets)
+ for i := 0; i < int(initCfg.GeneratedAccountsCount); i++ {
+ key := makeKeyFromSeed(uint64(i))
+ expectedPubKeys[key.SignatureVerifier] = key
+ }
+ assert.Len(t, expectedPubKeys, int(initCfg.GeneratedAccountsCount))
+
+ // call pingpong acct generator and assert its separately-generated secrets are equal
+ accountSecrets := deterministicAccounts(initCfg)
+ cnt := 0
+ for secret := range accountSecrets {
+ t.Log("Got address", basics.Address(secret.SignatureVerifier))
+ assert.Contains(t, expectedPubKeys, secret.SignatureVerifier)
+ assert.Equal(t, *expectedPubKeys[secret.SignatureVerifier], *secret)
+ cnt++
+ }
+ assert.Equal(t, int(initCfg.NumPartAccounts), cnt)
+}
diff --git a/shared/pingpong/config.go b/shared/pingpong/config.go
index db6cbb4ed1..5b9224c017 100644
--- a/shared/pingpong/config.go
+++ b/shared/pingpong/config.go
@@ -18,6 +18,7 @@ package pingpong
import (
"encoding/json"
+ "fmt"
"io"
"os"
"time"
@@ -30,6 +31,7 @@ const ConfigFilename = "ppconfig.json"
// PpConfig defines configuration structure for
type PpConfig struct {
+ // SrcAccount is address to use as funding source for new accounts
SrcAccount string
RandomizeFee bool
RandomizeAmt bool
@@ -45,12 +47,19 @@ type PpConfig struct {
Quiet bool
RandomNote bool
RandomLease bool
- Program []byte
- LogicArgs [][]byte
- GroupSize uint32
- NumAsset uint32
+
+ Program []byte
+ LogicArgs [][]byte
+ ProgramProbability float64
+
+ GroupSize uint32
+ // NumAsset is the number of assets each account holds
+ NumAsset uint32
+ // MinAccountAsset
MinAccountAsset uint64
- NumApp uint32
+ // NumApp is the total number of apps to create
+ NumApp uint32
+ // NumAppOptIn is the number of apps each account opts in to
NumAppOptIn uint32
AppProgOps uint32
AppProgHashes uint32
@@ -64,6 +73,18 @@ type PpConfig struct {
NftAsaPerSecond uint32 // e.g. 100
NftAsaPerAccount uint32 // 0..999
NftAsaAccountInFlight uint32
+
+ // configuration related to using bootstrapped ledgers built by netgoal
+ // TODO: support generatedAssetsCount, generatedApplicationCount
+ DeterministicKeys bool
+ GeneratedAccountsCount uint32
+ GeneratedAccountSampleMethod string
+ GeneratedAccountsOffset uint32
+
+ WeightPayment float64
+ WeightAsset float64
+ WeightApp float64
+ WeightNFTCreation float64
}
// DefaultConfig object for Ping Pong
@@ -78,7 +99,7 @@ var DefaultConfig = PpConfig{
TxnPerSec: 200,
NumPartAccounts: 10,
RunTime: 10 * time.Second,
- RefreshTime: 10 * time.Second,
+ RefreshTime: 3600 * time.Second,
MinAccountFunds: 100000,
GroupSize: 1,
NumAsset: 0,
@@ -90,6 +111,8 @@ var DefaultConfig = PpConfig{
Rekey: false,
MaxRuntime: 0,
+ ProgramProbability: 1,
+
NftAsaAccountInFlight: 5,
NftAsaPerAccount: 900,
}
@@ -125,3 +148,50 @@ func (cfg PpConfig) Dump(stream io.Writer) {
enc := codecs.NewFormattedJSONEncoder(stream)
enc.Encode(cfg)
}
+
+// SetDefaultWeights ensures a reasonable configuration of traffic generation weights.
+// With no weights set, and old args about what mode to run, each activated traffic type gets a weight of 1.
+// With no weights set and some activated traffic type other than payment, payment gets deactivated (zero weight) to maintain compatibility with prior behavior. WeightPayment must be explicitly set to add it to the mix if other modes are activated.
+func (cfg *PpConfig) SetDefaultWeights() {
+ const epsilon = 0.0000001
+ if cfg.WeightPayment+cfg.WeightAsset+cfg.WeightApp+cfg.WeightNFTCreation < epsilon {
+ // set up some sensible run probability weights
+ if cfg.NumAsset > 0 && cfg.WeightAsset < epsilon {
+ cfg.WeightAsset = 1
+ }
+ if cfg.NumApp > 0 && cfg.WeightApp < epsilon {
+ cfg.WeightApp = 1
+ }
+ if cfg.NftAsaPerSecond > 0 && cfg.WeightNFTCreation < epsilon {
+ cfg.WeightNFTCreation = 1
+ }
+ if cfg.NumAsset == 0 && cfg.NumApp == 0 && cfg.NftAsaPerSecond == 0 && cfg.WeightPayment < epsilon {
+ // backwards compatibility, if a mode is specified we wouldn't run payment traffic, so only set it when no mode is specified
+ cfg.WeightPayment = 1
+ }
+ }
+}
+
+var accountSampleMethods = []string{
+ "",
+ "random",
+ "sequential",
+}
+
+// Check returns an error if config is invalid.
+func (cfg *PpConfig) Check() error {
+ sampleOk := false
+ for _, v := range accountSampleMethods {
+ if v == cfg.GeneratedAccountSampleMethod {
+ sampleOk = true
+ break
+ }
+ }
+ if !sampleOk {
+ return fmt.Errorf("unknown GeneratedAccountSampleMethod: %s", cfg.GeneratedAccountSampleMethod)
+ }
+ if cfg.DeterministicKeys && (cfg.GeneratedAccountsOffset+cfg.NumPartAccounts > cfg.GeneratedAccountsCount) {
+ return fmt.Errorf("(GeneratedAccountsOffset %d) + (NumPartAccounts %d) > (GeneratedAccountsCount %d)", cfg.GeneratedAccountsOffset, cfg.NumPartAccounts, cfg.GeneratedAccountsCount)
+ }
+ return nil
+}
diff --git a/shared/pingpong/pingpong.go b/shared/pingpong/pingpong.go
index 1592ba6c9f..e0ee6812a7 100644
--- a/shared/pingpong/pingpong.go
+++ b/shared/pingpong/pingpong.go
@@ -19,11 +19,13 @@ package pingpong
import (
"context"
"encoding/binary"
+ "errors"
"fmt"
"math"
"math/rand"
"os"
"strings"
+ "sync/atomic"
"time"
"github.com/algorand/go-deadlock"
@@ -36,7 +38,6 @@ import (
"github.com/algorand/go-algorand/data/transactions/logic"
"github.com/algorand/go-algorand/libgoal"
"github.com/algorand/go-algorand/protocol"
- "github.com/algorand/go-algorand/util"
)
// CreatablesInfo has information about created assets, apps and opting in
@@ -49,145 +50,146 @@ type CreatablesInfo struct {
// pingPongAccount represents the account state for each account in the pingpong application
// This includes the current balance and public/private keys tied to the account
type pingPongAccount struct {
+ balance uint64
+ balanceRound uint64
+
deadlock.Mutex
sk *crypto.SignatureSecrets
pk basics.Address
- balance uint64
- balanceRound uint64
+ // asset holdings
+ holdings map[uint64]uint64
}
func (ppa *pingPongAccount) getBalance() uint64 {
- ppa.Lock()
- defer ppa.Unlock()
- return ppa.balance
+ return atomic.LoadUint64(&ppa.balance)
}
func (ppa *pingPongAccount) setBalance(balance uint64) {
+ atomic.StoreUint64(&ppa.balance, balance)
+}
+
+func (ppa *pingPongAccount) addBalance(offset int64) {
+ if offset >= 0 {
+ atomic.AddUint64(&ppa.balance, uint64(offset))
+ return
+ }
+ for {
+ v := atomic.LoadUint64(&ppa.balance)
+ nv := v - uint64(-offset)
+ done := atomic.CompareAndSwapUint64(&ppa.balance, v, nv)
+ if done {
+ return
+ }
+ }
+}
+
+func (ppa *pingPongAccount) getAsset(aid uint64) (v uint64, ok bool) {
+ ppa.Lock()
+ defer ppa.Unlock()
+ v, ok = ppa.holdings[aid]
+ return
+}
+func (ppa *pingPongAccount) setAsset(aid, value uint64) {
+ ppa.Lock()
+ defer ppa.Unlock()
+ ppa.holdings[aid] = value
+}
+func (ppa *pingPongAccount) addAsset(aid uint64, dv int64) {
ppa.Lock()
defer ppa.Unlock()
- ppa.balance = balance
+ v := ppa.holdings[aid]
+ if dv >= 0 {
+ v += uint64(dv)
+ } else {
+ v -= uint64(-dv)
+ }
+ ppa.holdings[aid] = v
}
-func (ppa *pingPongAccount) addBalance(offset int64) {
+func (ppa *pingPongAccount) String() string {
ppa.Lock()
defer ppa.Unlock()
- ppa.balance = uint64(int64(ppa.balance) + offset)
+ var ow strings.Builder
+ fmt.Fprintf(&ow, "%s %d", ppa.pk.String(), ppa.balance)
+ if len(ppa.holdings) > 0 {
+ fmt.Fprintf(&ow, "[")
+ first := true
+ for assetID, av := range ppa.holdings {
+ if first {
+ first = false
+ } else {
+ fmt.Fprintf(&ow, ", ")
+ }
+ fmt.Fprintf(&ow, "a%d=%d", assetID, av)
+ }
+ fmt.Fprintf(&ow, "]")
+ }
+ return ow.String()
}
// WorkerState object holds a running pingpong worker
type WorkerState struct {
- cfg PpConfig
- accounts map[string]*pingPongAccount
- accountsMu deadlock.RWMutex
- cinfo CreatablesInfo
+ cfg PpConfig
+ accounts map[string]*pingPongAccount
+ cinfo CreatablesInfo
nftStartTime int64
localNftIndex uint64
nftHolders map[string]int
incTransactionSalt uint64
- muSuggestedParams deadlock.Mutex
- suggestedParams v1.TransactionParams
- pendingTxns v1.PendingTransactions
+ nextSendTime time.Time
+ scheduleActionTime time.Duration
+ scheduleCalls uint64
+ scheduleSteps uint64
+
+ refreshAddrs []string
+ refreshPos int
+
+ client *libgoal.Client
}
// PrepareAccounts to set up accounts and asset accounts required for Ping Pong run
-func (pps *WorkerState) PrepareAccounts(ac libgoal.Client) (err error) {
- pps.accounts, pps.cfg, err = pps.ensureAccounts(ac, pps.cfg)
+func (pps *WorkerState) PrepareAccounts(ac *libgoal.Client) (err error) {
+ pps.client = ac
+ pps.nextSendTime = time.Now()
+ durPerTxn := time.Second / time.Duration(pps.cfg.TxnPerSec)
+ fmt.Printf("duration per txn %s\n", durPerTxn)
+
+ err = pps.ensureAccounts(ac)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "ensure accounts failed %v\n", err)
return
}
- cfg := pps.cfg
- if cfg.NumAsset > 0 {
- // zero out max amount for asset transactions
- cfg.MaxAmt = 0
+ // create new ephemeral random accounts
+ pps.generateAccounts()
- var assetAccounts map[string]*pingPongAccount
- assetAccounts, err = pps.prepareNewAccounts(ac)
- if err != nil {
- _, _ = fmt.Fprintf(os.Stderr, "prepare new accounts failed: %v\n", err)
- return
- }
+ err = pps.fundAccounts(ac)
+ if err != nil {
+ _, _ = fmt.Fprintf(os.Stderr, "fund accounts failed %v\n", err)
+ return
+ }
- pps.cinfo.AssetParams, pps.cinfo.OptIns, err = pps.prepareAssets(assetAccounts, ac)
+ if pps.cfg.NumAsset > 0 {
+ err = pps.prepareAssets(ac)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "prepare assets failed %v\n", err)
return
}
-
- if !cfg.Quiet {
- for addr := range assetAccounts {
- if addr != pps.cfg.SrcAccount {
- fmt.Printf("final prepareAccounts, account addr: %s, balance: %d\n", addr, pps.accounts[addr].getBalance())
- }
- }
- }
- } else if cfg.NumApp > 0 {
- var appAccounts map[string]*pingPongAccount
- appAccounts, err = pps.prepareNewAccounts(ac)
- if err != nil {
- _, _ = fmt.Fprintf(os.Stderr, "prepare new accounts failed: %v\n", err)
- return
- }
- pps.cinfo.AppParams, pps.cinfo.OptIns, err = pps.prepareApps(appAccounts, ac, cfg)
- if err != nil {
- return
- }
- if !cfg.Quiet {
- for addr := range appAccounts {
- if addr != pps.cfg.SrcAccount {
- fmt.Printf("final prepareAccounts, account addr: %s, balance: %d\n", addr, pps.accounts[addr].getBalance())
- }
- }
- }
- } else {
- // If we have more accounts than requested, pick the top N (not including src)
- if len(pps.accounts) > int(cfg.NumPartAccounts+1) {
- fmt.Printf("Finding the richest %d accounts to use for transacting\n", cfg.NumPartAccounts)
- pps.accounts = takeTopAccounts(pps.accounts, cfg.NumPartAccounts, cfg.SrcAccount)
- } else {
- // Not enough accounts yet (or just enough). Create more if needed
- fmt.Printf("Not enough accounts - creating %d more\n", int(cfg.NumPartAccounts+1)-len(pps.accounts))
- generateAccounts(pps.accounts, cfg.NumPartAccounts)
- }
-
- err = pps.fundAccounts(pps.accounts, ac, cfg)
+ }
+ if pps.cfg.NumApp > 0 {
+ err = pps.prepareApps(ac)
if err != nil {
- _, _ = fmt.Fprintf(os.Stderr, "fund accounts failed %v\n", err)
return
}
- go pps.roundMonitor(ac)
}
-
- pps.cfg = cfg
- return
-}
-
-func (pps *WorkerState) prepareNewAccounts(client libgoal.Client) (newAccounts map[string]*pingPongAccount, err error) {
- // create new accounts for testing
- newAccounts = make(map[string]*pingPongAccount)
- generateAccounts(newAccounts, pps.cfg.NumPartAccounts)
- // copy the source account, as needed.
- if srcAcct, has := pps.accounts[pps.cfg.SrcAccount]; has {
- newAccounts[pps.cfg.SrcAccount] = srcAcct
- }
- pps.accounts = newAccounts
-
- err = pps.fundAccounts(newAccounts, client, pps.cfg)
- if err != nil {
- _, _ = fmt.Fprintf(os.Stderr, "fund accounts failed %v\n", err)
- return
- }
-
- go pps.roundMonitor(client)
return
}
// determine the min balance per participant account
-func computeAccountMinBalance(client libgoal.Client, cfg PpConfig) (fundingRequiredBalance uint64, runningRequiredBalance uint64, err error) {
+func computeAccountMinBalance(client *libgoal.Client, cfg PpConfig) (fundingRequiredBalance uint64, runningRequiredBalance uint64, err error) {
proto, err := getProto(client)
if err != nil {
return
@@ -207,17 +209,6 @@ func computeAccountMinBalance(client libgoal.Client, cfg PpConfig) (fundingRequi
fee *= uint64(cfg.GroupSize)
}
- if cfg.NumApp > 0 {
- amount := uint64(0)
-
- runningRequiredBalance = (amount + fee) * 10 * 2
- setupCost := uint64(proto.MaxTxGroupSize) * (uint64(proto.AppFlatParamsMinBalance*2) + fee)
- // todo: add the cfg.NumAppOptIn to the setup cost.
- fundingRequiredBalance = proto.MinBalance + cfg.MinAccountFunds + (amount+fee)*10*2*cfg.TxnPerSec*uint64(math.Ceil(cfg.RefreshTime.Seconds())) + setupCost
- fmt.Printf("required min balance for app accounts: %d\n", fundingRequiredBalance)
- return
- }
-
fundingRequiredBalance = minActiveAccountBalance
runningRequiredBalance = minActiveAccountBalance
@@ -263,21 +254,90 @@ func computeAccountMinBalance(client libgoal.Client, cfg PpConfig) (fundingRequi
return
}
-// Wait for `*nextSendTime` and update it afterwards.
-func schedule(tps uint64, nextSendTime *time.Time) {
- dur := time.Until(*nextSendTime)
- if dur > 0 {
- time.Sleep(dur)
+func (pps *WorkerState) scheduleAction() bool {
+ if pps.refreshPos >= len(pps.refreshAddrs) {
+ if pps.refreshAddrs == nil {
+ pps.refreshAddrs = make([]string, 0, len(pps.accounts))
+ } else {
+ pps.refreshAddrs = pps.refreshAddrs[:0]
+ }
+ for addr := range pps.accounts {
+ pps.refreshAddrs = append(pps.refreshAddrs, addr)
+ }
+ pps.refreshPos = 0
}
+ addr := pps.refreshAddrs[pps.refreshPos]
+ ai, err := pps.client.AccountInformation(addr)
+ if err == nil {
+ ppa := pps.accounts[addr]
- *nextSendTime = nextSendTime.Add(time.Second / time.Duration(tps))
+ pps.integrateAccountInfo(addr, ppa, ai)
+ } else {
+ if !pps.cfg.Quiet {
+ fmt.Printf("background refresh err: %v\n", err)
+ }
+ return false
+ }
+ pps.refreshPos++
+ return true
+}
+
+const durationEpsilon = time.Microsecond * 10
+const scheduleActionTimeAlpha = 6
+
+// schedule consuming n txn time slots
+func (pps *WorkerState) schedule(n int) {
+ pps.scheduleCalls++
+ now := time.Now()
+ ok := true
+ timePerStep := time.Second / time.Duration(pps.cfg.TxnPerSec)
+ nextSendTime := pps.nextSendTime
+ if n > 1 {
+ nextSendTime = nextSendTime.Add(timePerStep * time.Duration(n-1))
+ }
+ for {
+ if now.After(nextSendTime) {
+ break
+ }
+ dur := nextSendTime.Sub(now)
+ if dur < durationEpsilon {
+ break
+ }
+ if dur < pps.scheduleActionTime || !ok {
+ time.Sleep(dur)
+ now = time.Now()
+ } else {
+ ok = pps.scheduleAction()
+ nn := time.Now()
+ dt := nn.Sub(now)
+ // alpha blend to keep running approximation
+ pps.scheduleActionTime = ((pps.scheduleActionTime * scheduleActionTimeAlpha) + dt) / (scheduleActionTimeAlpha + 1)
+ now = nn
+ }
+ }
+
+ steps := 0
+ for now.After(nextSendTime) {
+ if steps > 0 {
+ dt := now.Sub(nextSendTime)
+ if dt < timePerStep/2 {
+ // good enough
+ break
+ }
+ }
+ pps.scheduleSteps++
+ nextSendTime = nextSendTime.Add(timePerStep)
+ steps++
+ }
+ pps.nextSendTime = nextSendTime
+ //fmt.Printf("schedule now=%s next=%s\n", now, pps.nextSendTime)
}
-func (pps *WorkerState) fundAccounts(accounts map[string]*pingPongAccount, client libgoal.Client, cfg PpConfig) error {
+func (pps *WorkerState) fundAccounts(client *libgoal.Client) error {
var srcFunds, minFund uint64
var err error
var tx transactions.Transaction
- srcFunds, err = client.GetBalance(cfg.SrcAccount)
+ srcFunds, err = client.GetBalance(pps.cfg.SrcAccount)
if err != nil {
return err
@@ -288,19 +348,19 @@ func (pps *WorkerState) fundAccounts(accounts map[string]*pingPongAccount, clien
// Fee of 0 will make cause the function to use the suggested one by network
fee := uint64(0)
- minFund, _, err = computeAccountMinBalance(client, cfg)
+ minFund, _, err = computeAccountMinBalance(client, pps.cfg)
if err != nil {
return err
}
fmt.Printf("adjusting account balance to %d\n", minFund)
- srcAcct := accounts[cfg.SrcAccount]
+ srcAcct := pps.accounts[pps.cfg.SrcAccount]
- nextSendTime := time.Now()
- for {
- accountsAdjusted := 0
+ accountsAdjusted := 1
+ for accountsAdjusted > 0 {
+ accountsAdjusted = 0
adjStart := time.Now()
- for addr, acct := range accounts {
+ for addr, acct := range pps.accounts {
if addr == pps.cfg.SrcAccount {
continue
}
@@ -308,19 +368,19 @@ func (pps *WorkerState) fundAccounts(accounts map[string]*pingPongAccount, clien
if acct.getBalance() >= minFund {
continue
}
- if !cfg.Quiet {
+ if !pps.cfg.Quiet {
fmt.Printf("adjusting balance of account %v\n", addr)
}
toSend := minFund - acct.getBalance()
if srcFunds <= toSend {
- return fmt.Errorf("source account %s has insufficient funds %d - needs %d", cfg.SrcAccount, srcFunds, toSend)
+ return fmt.Errorf("source account %s has insufficient funds %d - needs %d", pps.cfg.SrcAccount, srcFunds, toSend)
}
srcFunds -= toSend
- if !cfg.Quiet {
+ if !pps.cfg.Quiet {
fmt.Printf("adjusting balance of account %v by %d\n ", addr, toSend)
}
- schedule(cfg.TxnPerSec, &nextSendTime)
+ pps.schedule(1)
tx, err = pps.sendPaymentFromSourceAccount(client, addr, fee, toSend, srcAcct)
if err != nil {
if strings.Contains(err.Error(), "broadcast queue full") {
@@ -332,32 +392,29 @@ func (pps *WorkerState) fundAccounts(accounts map[string]*pingPongAccount, clien
}
srcFunds -= tx.Fee.Raw
accountsAdjusted++
- if !cfg.Quiet {
+ if !pps.cfg.Quiet {
fmt.Printf("account balance for key %s will be %d\n", addr, minFund)
}
acct.setBalance(minFund)
totalSent++
}
- accounts[cfg.SrcAccount].setBalance(srcFunds)
+ pps.accounts[pps.cfg.SrcAccount].setBalance(srcFunds)
waitStart := time.Now()
// wait until all the above transactions are sent, or that we have no more transactions
// in our pending transaction pool coming from the source account.
- err = waitPendingTransactions([]string{cfg.SrcAccount}, client)
+ err = waitPendingTransactions([]string{pps.cfg.SrcAccount}, client)
if err != nil {
return err
}
waitStop := time.Now()
- if !cfg.Quiet {
+ if !pps.cfg.Quiet {
fmt.Printf("%d sent (%s); waited %s\n", accountsAdjusted, waitStart.Sub(adjStart).String(), waitStop.Sub(waitStart).String())
}
- if accountsAdjusted == 0 {
- break
- }
}
return err
}
-func (pps *WorkerState) sendPaymentFromSourceAccount(client libgoal.Client, to string, fee, amount uint64, srcAcct *pingPongAccount) (transactions.Transaction, error) {
+func (pps *WorkerState) sendPaymentFromSourceAccount(client *libgoal.Client, to string, fee, amount uint64, srcAcct *pingPongAccount) (transactions.Transaction, error) {
// generate a unique note to avoid duplicate transaction failures
note := pps.makeNextUniqueNoteField()
@@ -388,7 +445,7 @@ func (pps *WorkerState) sendPaymentFromSourceAccount(client libgoal.Client, to s
// accounts map have been cleared out of the transaction pool. A prerequesite for this is that
// there is no other source who might be generating transactions that would come from these account
// addresses.
-func waitPendingTransactions(accounts []string, client libgoal.Client) error {
+func waitPendingTransactions(accounts []string, client *libgoal.Client) error {
for _, from := range accounts {
repeat:
pendingTxns, err := client.GetPendingTransactionsByAddress(from, 0)
@@ -411,13 +468,11 @@ func waitPendingTransactions(accounts []string, client libgoal.Client) error {
return nil
}
-func (pps *WorkerState) refreshAccounts(client libgoal.Client, cfg PpConfig) error {
- pps.accountsMu.Lock()
+func (pps *WorkerState) refreshAccounts(client *libgoal.Client) error {
addrs := make([]string, 0, len(pps.accounts))
for addr := range pps.accounts {
addrs = append(addrs, addr)
}
- pps.accountsMu.Unlock()
// wait until all the pending transactions have been sent; otherwise, getting the balance
// is pretty much meaningless.
fmt.Printf("waiting for all transactions to be accepted before refreshing accounts.\n")
@@ -436,13 +491,11 @@ func (pps *WorkerState) refreshAccounts(client libgoal.Client, cfg PpConfig) err
balanceUpdates[addr] = amount
}
- pps.accountsMu.Lock()
- defer pps.accountsMu.Unlock()
for addr, amount := range balanceUpdates {
pps.accounts[addr].setBalance(amount)
}
- return pps.fundAccounts(pps.accounts, client, cfg)
+ return pps.fundAccounts(client)
}
// return a shuffled list of accounts with some minimum balance
@@ -463,7 +516,7 @@ func listSufficientAccounts(accounts map[string]*pingPongAccount, minimumAmount
var logPeriod = 5 * time.Second
// RunPingPong starts ping pong process
-func (pps *WorkerState) RunPingPong(ctx context.Context, ac libgoal.Client) {
+func (pps *WorkerState) RunPingPong(ctx context.Context, ac *libgoal.Client) {
// Infinite loop given:
// - accounts -> map of accounts to include in transfers (including src account, which we don't want to use)
// - cfg -> configuration for how to proceed
@@ -480,23 +533,21 @@ func (pps *WorkerState) RunPingPong(ctx context.Context, ac libgoal.Client) {
// error = fundAccounts()
// }
- cfg := pps.cfg
+ pps.nextSendTime = time.Now()
+ ac.SetSuggestedParamsCacheAge(200 * time.Millisecond)
+ pps.client = ac
+
var runTime time.Duration
- if cfg.RunTime > 0 {
- runTime = cfg.RunTime
+ if pps.cfg.RunTime > 0 {
+ runTime = pps.cfg.RunTime
} else {
runTime = 10000 * time.Hour // Effectively 'forever'
}
var endTime time.Time
- if cfg.MaxRuntime > 0 {
- endTime = time.Now().Add(cfg.MaxRuntime)
- }
- refreshTime := time.Now().Add(cfg.RefreshTime)
-
- var nftThrottler *throttler
- if pps.cfg.NftAsaPerSecond > 0 {
- nftThrottler = newThrottler(20, float64(pps.cfg.NftAsaPerSecond))
+ if pps.cfg.MaxRuntime > 0 {
+ endTime = time.Now().Add(pps.cfg.MaxRuntime)
}
+ refreshTime := time.Now().Add(pps.cfg.RefreshTime)
lastLog := time.Now()
nextLog := lastLog.Add(logPeriod)
@@ -518,7 +569,7 @@ func (pps *WorkerState) RunPingPong(ctx context.Context, ac libgoal.Client) {
}
if now.After(nextLog) {
dt := now.Sub(lastLog)
- fmt.Printf("%d sent, %0.2f/s (%d total)\n", totalSent-lastTotalSent, float64(totalSent-lastTotalSent)/dt.Seconds(), totalSent)
+ fmt.Printf("%d sent, %0.2f/s (%d total) (%d sc %d sts)\n", totalSent-lastTotalSent, float64(totalSent-lastTotalSent)/dt.Seconds(), totalSent, pps.scheduleCalls, pps.scheduleSteps)
lastTotalSent = totalSent
for now.After(nextLog) {
nextLog = nextLog.Add(logPeriod)
@@ -526,32 +577,18 @@ func (pps *WorkerState) RunPingPong(ctx context.Context, ac libgoal.Client) {
lastLog = now
}
- if cfg.MaxRuntime > 0 && time.Now().After(endTime) {
- fmt.Printf("Terminating after max run time of %.f seconds\n", cfg.MaxRuntime.Seconds())
+ if pps.cfg.MaxRuntime > 0 && time.Now().After(endTime) {
+ fmt.Printf("Terminating after max run time of %.f seconds\n", pps.cfg.MaxRuntime.Seconds())
return
}
- if pps.cfg.NftAsaPerSecond > 0 {
- sent, err := pps.makeNftTraffic(ac)
- if err != nil {
- fmt.Fprintf(os.Stderr, "error sending nft transactions: %v\n", err)
- }
- nftThrottler.maybeSleep(int(sent))
- totalSent += sent
- continue
- }
-
- minimumAmount := cfg.MinAccountFunds + (cfg.MaxAmt+cfg.MaxFee)*2
- pps.accountsMu.RLock()
- fromList := listSufficientAccounts(pps.accounts, minimumAmount, cfg.SrcAccount)
- pps.accountsMu.RUnlock()
+ minimumAmount := pps.cfg.MinAccountFunds + (pps.cfg.MaxAmt+pps.cfg.MaxFee)*2
+ fromList := listSufficientAccounts(pps.accounts, minimumAmount, pps.cfg.SrcAccount)
// in group tests txns are sent back and forth, so both parties need funds
var toList []string
- if cfg.GroupSize == 1 {
+ if pps.cfg.GroupSize == 1 {
minimumAmount = 0
- pps.accountsMu.RLock()
- toList = listSufficientAccounts(pps.accounts, minimumAmount, cfg.SrcAccount)
- pps.accountsMu.RUnlock()
+ toList = listSufficientAccounts(pps.accounts, minimumAmount, pps.cfg.SrcAccount)
} else {
// same selection with another shuffle
toList = make([]string, len(fromList))
@@ -563,16 +600,18 @@ func (pps *WorkerState) RunPingPong(ctx context.Context, ac libgoal.Client) {
totalSent += sent
totalSucceeded += succeeded
if err != nil {
- _, _ = fmt.Fprintf(os.Stderr, "error sending transactions: %v\n", err)
+ _, _ = fmt.Fprintf(os.Stderr, "error sending transactions, sleeping .5 seconds: %v\n", err)
+ pps.nextSendTime = time.Now().Add(500 * time.Millisecond)
+ pps.schedule(1)
}
- if cfg.RefreshTime > 0 && time.Now().After(refreshTime) {
- err = pps.refreshAccounts(ac, cfg)
+ if pps.cfg.RefreshTime > 0 && time.Now().After(refreshTime) {
+ err = pps.refreshAccounts(ac)
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "error refreshing: %v\n", err)
}
- refreshTime = refreshTime.Add(cfg.RefreshTime)
+ refreshTime = refreshTime.Add(pps.cfg.RefreshTime)
}
}
@@ -586,157 +625,58 @@ func NewPingpong(cfg PpConfig) *WorkerState {
return &WorkerState{cfg: cfg, nftHolders: make(map[string]int)}
}
-func randomizeCreatableID(cfg PpConfig, cinfo CreatablesInfo) (aidx uint64) {
- if cfg.NumAsset > 0 {
- rindex := rand.Intn(len(cinfo.AssetParams))
- i := 0
- for k := range cinfo.AssetParams {
- if i == rindex {
- aidx = k
- break
- }
- i++
- }
- } else if cfg.NumApp > 0 {
- rindex := rand.Intn(len(cinfo.AppParams))
- i := 0
- for k := range cinfo.AppParams {
- if i == rindex {
- aidx = k
- break
- }
- i++
+func (pps *WorkerState) randAssetID() (aidx uint64) {
+ if len(pps.cinfo.AssetParams) == 0 {
+ return 0
+ }
+ rindex := rand.Intn(len(pps.cinfo.AssetParams))
+ i := 0
+ for k := range pps.cinfo.AssetParams {
+ if i == rindex {
+ return k
}
+ i++
}
return
}
-
-func (pps *WorkerState) fee() uint64 {
- cfg := pps.cfg
- fee := cfg.MaxFee
- if cfg.RandomizeFee {
- fee = rand.Uint64()%(cfg.MaxFee-cfg.MinFee) + cfg.MinFee
+func (pps *WorkerState) randAppID() (aidx uint64) {
+ if len(pps.cinfo.AppParams) == 0 {
+ return 0
}
- return fee
-}
-
-func (pps *WorkerState) makeNftTraffic(client libgoal.Client) (sentCount uint64, err error) {
- fee := pps.fee()
- var srcCost uint64
- if (len(pps.nftHolders) == 0) || ((float64(int(pps.cfg.NftAsaAccountInFlight)-len(pps.nftHolders)) / float64(pps.cfg.NftAsaAccountInFlight)) >= rand.Float64()) {
- var addr string
-
- var seed [32]byte
- crypto.RandBytes(seed[:])
- privateKey := crypto.GenerateSignatureSecrets(seed)
- publicKey := basics.Address(privateKey.SignatureVerifier)
-
- pps.accountsMu.Lock()
- pps.accounts[publicKey.String()] = &pingPongAccount{
- sk: privateKey,
- pk: publicKey,
- }
- pps.accountsMu.Unlock()
- addr = publicKey.String()
-
- fmt.Printf("new NFT holder %s\n", addr)
- var proto config.ConsensusParams
- proto, err = getProto(client)
- if err != nil {
- return
- }
- // enough for the per-asa minbalance and more than enough for the txns to create them
- toSend := proto.MinBalance * uint64(pps.cfg.NftAsaPerAccount+1) * 2
- pps.nftHolders[addr] = 0
- var tx transactions.Transaction
- srcAcct := pps.acct(pps.cfg.SrcAccount)
- tx, err = pps.sendPaymentFromSourceAccount(client, addr, fee, toSend, srcAcct)
- if err != nil {
- return
+ rindex := rand.Intn(len(pps.cinfo.AppParams))
+ i := 0
+ for k := range pps.cinfo.AppParams {
+ if i == rindex {
+ return k
}
- srcCost += tx.Fee.Raw + toSend
- sentCount++
- // we ran one txn above already to fund the new addr,
- // we'll run a second txn below
- }
- pps.accountsMu.Lock()
- pps.accounts[pps.cfg.SrcAccount].addBalance(-int64(srcCost))
- pps.accountsMu.Unlock()
- // pick a random sender from nft holder sub accounts
- pick := rand.Intn(len(pps.nftHolders))
- pos := 0
- var sender string
- var senderNftCount int
- for addr, nftCount := range pps.nftHolders {
- sender = addr
- senderNftCount = nftCount
- if pos == pick {
- break
- }
- pos++
-
- }
- var meta [32]byte
- rand.Read(meta[:])
- assetName := pps.nftSpamAssetName()
- const totalSupply = 1
- txn, err := client.MakeUnsignedAssetCreateTx(totalSupply, false, sender, sender, sender, sender, "ping", assetName, "", meta[:], 0)
- if err != nil {
- fmt.Printf("Cannot make asset create txn with meta %v\n", meta)
- return
- }
- txn, err = client.FillUnsignedTxTemplate(sender, 0, 0, pps.cfg.MaxFee, txn)
- if err != nil {
- fmt.Printf("Cannot fill asset creation txn\n")
- return
- }
- if senderNftCount+1 >= int(pps.cfg.NftAsaPerAccount) {
- delete(pps.nftHolders, sender)
- } else {
- pps.nftHolders[sender] = senderNftCount + 1
- }
- signer := pps.acct(sender)
- stxn, err := signTxn(signer, txn, pps.cfg)
- if err != nil {
- return
+ i++
}
+ return
+}
- _, err = client.BroadcastTransaction(stxn)
- if err != nil {
- return
+func (pps *WorkerState) fee() uint64 {
+ fee := pps.cfg.MaxFee
+ if pps.cfg.RandomizeFee {
+ fee = rand.Uint64()%(pps.cfg.MaxFee-pps.cfg.MinFee) + pps.cfg.MinFee
}
- sentCount++
- return
+ return fee
}
func (pps *WorkerState) acct(from string) *pingPongAccount {
- pps.accountsMu.RLock()
- defer pps.accountsMu.RUnlock()
return pps.accounts[from]
}
func (pps *WorkerState) sendFromTo(
fromList, toList []string,
- client libgoal.Client, nextSendTime *time.Time,
+ client *libgoal.Client, nextSendTime *time.Time,
) (sentCount, successCount uint64, err error) {
- cinfo := pps.cinfo
- cfg := pps.cfg
-
- amt := cfg.MaxAmt
var minAccountRunningBalance uint64
- _, minAccountRunningBalance, err = computeAccountMinBalance(client, cfg)
+ _, minAccountRunningBalance, err = computeAccountMinBalance(client, pps.cfg)
if err != nil {
return 0, 0, err
}
belowMinBalanceAccounts := make(map[string] /*basics.Address*/ bool)
- assetsByCreator := make(map[string][]*v1.AssetParams)
- for _, p := range cinfo.AssetParams {
- c := p.Creator
- ap := &v1.AssetParams{}
- *ap = p
- assetsByCreator[c] = append(assetsByCreator[c], ap)
- }
for i, from := range fromList {
// keep going until the balances of at least 20% of the accounts is too low.
@@ -749,14 +689,10 @@ func (pps *WorkerState) sendFromTo(
continue
}
- if cfg.RandomizeAmt {
- amt = ((rand.Uint64() % cfg.MaxAmt) + 1) % cfg.MaxAmt
- }
-
fee := pps.fee()
to := toList[i]
- if cfg.RandomizeDst {
+ if pps.cfg.RandomizeDst {
var addr basics.Address
crypto.RandBytes(addr[:])
to = addr.String()
@@ -772,22 +708,15 @@ func (pps *WorkerState) sendFromTo(
// Broadcast transaction
var sendErr error
- fromBalanceChange := int64(0)
- toBalanceChange := int64(0)
- if cfg.NumAsset > 0 {
- amt = 1
- } else if cfg.NumApp > 0 {
- amt = 0
- }
- fromAcct := pps.acct(from)
- if cfg.GroupSize == 1 {
- // generate random assetID or appId if we send asset/app txns
- aidx := randomizeCreatableID(cfg, cinfo)
+ var fromAcct *pingPongAccount
+ var update txnUpdate
+ var updates []txnUpdate
+ if pps.cfg.GroupSize == 1 {
var txn transactions.Transaction
var consErr error
// Construct single txn
- txn, from, consErr = pps.constructTxn(from, to, fee, amt, aidx, client)
+ txn, from, update, consErr = pps.constructTxn(from, to, fee, client)
if consErr != nil {
err = consErr
_, _ = fmt.Fprintf(os.Stderr, "constructTxn failed: %v\n", err)
@@ -795,26 +724,29 @@ func (pps *WorkerState) sendFromTo(
}
// would we have enough money after taking into account the current updated fees ?
- if fromAcct.getBalance() <= (txn.Fee.Raw + amt + minAccountRunningBalance) {
- _, _ = fmt.Fprintf(os.Stdout, "Skipping sending %d: %s -> %s; Current cost too high(%d <= %d + %d + %d).\n", amt, from, to, fromAcct.getBalance(), txn.Fee.Raw, amt, minAccountRunningBalance)
+ fromAcct = pps.acct(from)
+ if fromAcct == nil {
+ err = fmt.Errorf("tx %v from %s -> no acct", txn, from)
+ fmt.Fprintf(os.Stderr, "%s\n", err.Error())
+ return
+ }
+
+ if fromAcct.getBalance() <= (txn.Fee.Raw + pps.cfg.MaxAmt + minAccountRunningBalance) {
+ _, _ = fmt.Fprintf(os.Stdout, "Skipping sending %d: %s -> %s; Current cost too high(%d <= %d + %d + %d).\n", pps.cfg.MaxAmt, from, to, fromAcct.getBalance(), txn.Fee.Raw, pps.cfg.MaxAmt, minAccountRunningBalance)
belowMinBalanceAccounts[from] = true
continue
}
- fromBalanceChange = -int64(txn.Fee.Raw + amt)
- toBalanceChange = int64(amt)
-
// Sign txn
- signer := pps.acct(from)
- stxn, signErr := signTxn(signer, txn, cfg)
+ stxn, signErr := signTxn(fromAcct, txn, pps.cfg)
if signErr != nil {
err = signErr
_, _ = fmt.Fprintf(os.Stderr, "signTxn failed: %v\n", err)
return
}
- schedule(cfg.TxnPerSec, nextSendTime)
sentCount++
+ pps.schedule(1)
_, sendErr = client.BroadcastTransaction(stxn)
} else {
// Generate txn group
@@ -826,31 +758,22 @@ func (pps *WorkerState) sendFromTo(
var txGroup []transactions.Transaction
var txSigners []string
- for j := 0; j < int(cfg.GroupSize); j++ {
+ for j := 0; j < int(pps.cfg.GroupSize); j++ {
var txn transactions.Transaction
var signer string
if j%2 == 0 {
- txn, signer, err = pps.constructTxn(from, to, fee, amt, 0, client)
- fromBalanceChange -= int64(txn.Fee.Raw + amt)
- toBalanceChange += int64(amt)
- } else if cfg.GroupSize == 2 && cfg.Rekey {
- txn, _, err = pps.constructTxn(from, to, fee, amt, 0, client)
- fromBalanceChange -= int64(txn.Fee.Raw + amt)
- toBalanceChange += int64(amt)
+ txn, signer, update, err = pps.constructTxn(from, to, fee, client)
+ } else if pps.cfg.GroupSize == 2 && pps.cfg.Rekey {
+ txn, _, update, err = pps.constructTxn(from, to, fee, client)
signer = to
} else {
- txn, signer, err = pps.constructTxn(to, from, fee, amt, 0, client)
- toBalanceChange -= int64(txn.Fee.Raw + amt)
- fromBalanceChange += int64(amt)
+ txn, signer, update, err = pps.constructTxn(to, from, fee, client)
}
if err != nil {
_, _ = fmt.Fprintf(os.Stderr, "group tx failed: %v\n", err)
return
}
- if cfg.RandomizeAmt && j%2 == 1 {
- amt = rand.Uint64()%cfg.MaxAmt + 1
- }
- if cfg.Rekey {
+ if pps.cfg.Rekey {
if from == signer {
// rekey to the receiver the first txn of the rekeying pair
txn.RekeyTo, err = basics.UnmarshalChecksumAddress(to)
@@ -865,17 +788,7 @@ func (pps *WorkerState) sendFromTo(
}
txGroup = append(txGroup, txn)
txSigners = append(txSigners, signer)
- }
-
- // would we have enough money after taking into account the current updated fees ?
- if int64(fromAcct.getBalance())+fromBalanceChange <= int64(cfg.MinAccountFunds) {
- _, _ = fmt.Fprintf(os.Stdout, "Skipping sending %d : %s -> %s; Current cost too high.\n", amt, from, to)
- continue
- }
- toAcct := pps.acct(to)
- if int64(toAcct.getBalance())+toBalanceChange <= int64(cfg.MinAccountFunds) {
- _, _ = fmt.Fprintf(os.Stdout, "Skipping sending back %d : %s -> %s; Current cost too high.\n", amt, to, from)
- continue
+ updates = append(updates, update)
}
// Generate group ID
@@ -885,7 +798,7 @@ func (pps *WorkerState) sendFromTo(
return
}
- if !cfg.Quiet {
+ if !pps.cfg.Quiet {
_, _ = fmt.Fprintf(os.Stdout, "Sending TxnGroup: ID %v, size %v \n", gid, len(txGroup))
}
@@ -895,29 +808,34 @@ func (pps *WorkerState) sendFromTo(
for j, txn := range txGroup {
txn.Group = gid
signer := pps.acct(txSigners[j])
- stxGroup[j], signErr = signTxn(signer, txn, cfg)
+ stxGroup[j], signErr = signTxn(signer, txn, pps.cfg)
if signErr != nil {
err = signErr
return
}
}
- schedule(cfg.TxnPerSec, nextSendTime)
- sentCount++
+ sentCount += uint64(len(txGroup))
+ pps.schedule(len(txGroup))
sendErr = client.BroadcastTransactionGroup(stxGroup)
}
if sendErr != nil {
- _, _ = fmt.Fprintf(os.Stderr, "error sending Transaction, sleeping .5 seconds: %v\n", sendErr)
err = sendErr
- time.Sleep(500 * time.Millisecond)
return
}
- successCount++
- fromAcct.addBalance(fromBalanceChange)
- // avoid updating the "to" account.
+ // assume that if it was accepted by an algod, it got processed
+ // (this is a bad assumption, we should be checking pending status or reading blocks to see if our txid were committed)
+ if len(updates) > 0 {
+ for _, ud := range updates {
+ ud.apply(pps)
+ }
+ } else if update != nil {
+ update.apply(pps)
+ }
+ successCount++
}
return
}
@@ -936,74 +854,14 @@ func (pps *WorkerState) makeNextUniqueNoteField() []byte {
return noteField[:usedBytes]
}
-func (pps *WorkerState) roundMonitor(client libgoal.Client) {
- var minFund uint64
- var err error
- for {
- minFund, _, err = computeAccountMinBalance(client, pps.cfg)
- if err == nil {
- break
- }
- }
- var newBalance uint64
- for {
- paramsResp, err := client.SuggestedParams()
- if err != nil {
- time.Sleep(5 * time.Millisecond)
- continue
- }
- pendingTxns, err := client.GetPendingTransactions(0)
- if err != nil {
- time.Sleep(5 * time.Millisecond)
- continue
- }
- pps.muSuggestedParams.Lock()
- pps.suggestedParams = paramsResp
- pps.pendingTxns = pendingTxns
- pps.muSuggestedParams.Unlock()
-
- // take a quick snapshot of accounts to decrease mutex shadow
- pps.accountsMu.Lock()
- accountsSnapshot := make([]*pingPongAccount, 0, len(pps.accounts))
- for _, acct := range pps.accounts {
- accountsSnapshot = append(accountsSnapshot, acct)
- }
- pps.accountsMu.Unlock()
-
- for _, acct := range accountsSnapshot {
- acct.Lock()
- needRefresh := acct.balance < minFund && acct.balanceRound < paramsResp.LastRound
- acct.Unlock()
- if needRefresh {
- newBalance, err = client.GetBalance(acct.pk.String())
- if err == nil {
- acct.Lock()
- acct.balanceRound, acct.balance = paramsResp.LastRound, newBalance
- acct.Unlock()
- }
- }
- }
-
- // wait for the next round.
- waitForNextRoundOrSleep(client, 200*time.Millisecond)
- }
-}
-
-func (pps *WorkerState) getSuggestedParams() v1.TransactionParams {
- pps.muSuggestedParams.Lock()
- defer pps.muSuggestedParams.Unlock()
- return pps.suggestedParams
-}
+var errNotOptedIn = errors.New("not opted in")
-func (pps *WorkerState) constructTxn(from, to string, fee, amt, aidx uint64, client libgoal.Client) (txn transactions.Transaction, sender string, err error) {
- cfg := pps.cfg
- cinfo := pps.cinfo
- sender = from
+func (pps *WorkerState) constructTxn(from, to string, fee uint64, client *libgoal.Client) (txn transactions.Transaction, sender string, update txnUpdate, err error) {
var noteField []byte
const pingpongTag = "pingpong"
const tagLen = len(pingpongTag)
// if random note flag set, then append a random number of additional bytes
- if cfg.RandomNote {
+ if pps.cfg.RandomNote {
const maxNoteFieldLen = 1024
noteLength := tagLen + int(rand.Uint32())%(maxNoteFieldLen-tagLen)
noteField = make([]byte, noteLength)
@@ -1015,83 +873,38 @@ func (pps *WorkerState) constructTxn(from, to string, fee, amt, aidx uint64, cli
// if random lease flag set, fill the lease field with random bytes
var lease [32]byte
- if cfg.RandomLease {
+ if pps.cfg.RandomLease {
crypto.RandBytes(lease[:])
}
- if cfg.NumApp > 0 { // Construct app transaction
- // select opted-in accounts for Txn.Accounts field
- var accounts []string
- assetOptIns := cinfo.OptIns[aidx]
- if len(assetOptIns) > 0 {
- indices := rand.Perm(len(assetOptIns))
- limit := 5
- if len(indices) < limit {
- limit = len(indices)
- }
- for i := 0; i < limit; i++ {
- idx := indices[i]
- accounts = append(accounts, assetOptIns[idx])
- }
- if cinfo.AssetParams[aidx].Creator == from {
- // if the application was created by the "from" account, then we don't need to worry about it being opted-in.
- } else {
- fromIsOptedIn := false
- for i := 0; i < len(assetOptIns); i++ {
- if assetOptIns[i] == from {
- fromIsOptedIn = true
- break
- }
- }
- if !fromIsOptedIn {
- sender = accounts[0]
- from = sender
- }
- }
- accounts = accounts[1:]
- }
- txn, err = client.MakeUnsignedAppNoOpTx(aidx, nil, accounts, nil, nil)
- if err != nil {
- return
- }
- txn.Note = noteField[:]
- txn.Lease = lease
- txn, err = client.FillUnsignedTxTemplate(from, 0, 0, cfg.MaxFee, txn)
- if !cfg.Quiet {
- _, _ = fmt.Fprintf(os.Stdout, "Calling app %d : %s\n", aidx, from)
- }
- } else if cfg.NumAsset > 0 { // Construct asset transaction
- // select a pair of random opted-in accounts by aidx
- // use them as from/to addresses
- if from != to {
- if len(cinfo.OptIns[aidx]) > 0 {
- indices := rand.Perm(len(cinfo.OptIns[aidx]))
- from = cinfo.OptIns[aidx][indices[0]]
- to = cinfo.OptIns[aidx][indices[1]]
- sender = from
- } else {
- err = fmt.Errorf("asset %d has not been opted in by any account", aidx)
- _, _ = fmt.Fprintf(os.Stdout, "error constructing transaction - %v\n", err)
- return
- }
- }
- txn, err = client.MakeUnsignedAssetSendTx(aidx, amt, to, "", "")
- if err != nil {
- _, _ = fmt.Fprintf(os.Stdout, "error making unsigned asset send tx %v\n", err)
- return
+ // weighted random selection of traffic type
+ // TODO: construct*Txn() have the same signature, make this data structures and loop over them?
+ totalWeight := pps.cfg.WeightPayment + pps.cfg.WeightAsset + pps.cfg.WeightApp
+ target := rand.Float64() * totalWeight
+ if target < pps.cfg.WeightAsset && pps.cfg.NumAsset > 0 {
+ txn, sender, update, err = pps.constructAssetTxn(from, to, fee, client, noteField, lease)
+ if err != errNotOptedIn {
+ goto weightdone
}
- txn.Note = noteField[:]
- txn.Lease = lease
- txn, err = client.FillUnsignedTxTemplate(sender, 0, 0, cfg.MaxFee, txn)
- if !cfg.Quiet {
- _, _ = fmt.Fprintf(os.Stdout, "Sending %d asset %d: %s -> %s\n", amt, aidx, sender, to)
+ }
+ target -= pps.cfg.WeightAsset
+ if target < pps.cfg.WeightApp && pps.cfg.NumApp > 0 {
+ txn, sender, update, err = pps.constructAppTxn(from, to, fee, client, noteField, lease)
+ if err != errNotOptedIn {
+ goto weightdone
}
- } else {
- txn, err = pps.constructPayment(from, to, fee, amt, noteField, "", lease)
- if !cfg.Quiet {
- _, _ = fmt.Fprintf(os.Stdout, "Sending %d : %s -> %s\n", amt, from, to)
+ }
+ target -= pps.cfg.WeightApp
+ if target < pps.cfg.WeightNFTCreation && pps.cfg.NftAsaPerSecond > 0 {
+ txn, sender, update, err = pps.constructNFTGenTxn(from, to, fee, client, noteField, lease)
+ if err != errNotOptedIn {
+ goto weightdone
}
}
+ // TODO: other traffic types here
+ // fallback on payment
+ txn, sender, update, err = pps.constructPaymentTxn(from, to, fee, client, noteField, lease)
+weightdone:
if err != nil {
_, _ = fmt.Fprintf(os.Stdout, "error constructing transaction %v\n", err)
@@ -1100,8 +913,8 @@ func (pps *WorkerState) constructTxn(from, to string, fee, amt, aidx uint64, cli
// adjust transaction duration for 5 rounds. That would prevent it from getting stuck in the transaction pool for too long.
txn.LastValid = txn.FirstValid + 5
- // if cfg.MaxFee == 0, automatically adjust the fee amount to required min fee
- if cfg.MaxFee == 0 {
+ // if pps.cfg.MaxFee == 0, automatically adjust the fee amount to required min fee
+ if pps.cfg.MaxFee == 0 {
var suggestedFee uint64
suggestedFee, err = client.SuggestedFee()
if err != nil {
@@ -1115,85 +928,313 @@ func (pps *WorkerState) constructTxn(from, to string, fee, amt, aidx uint64, cli
return
}
-// ConstructPayment builds a payment transaction to be signed
-// If the fee is 0, the function will use the suggested one form the network
-// Although firstValid and lastValid come pre-computed in a normal flow,
-// additional validation is done by computeValidityRounds:
-// if the lastValid is 0, firstValid + maxTxnLifetime will be used
-// if the firstValid is 0, lastRound + 1 will be used
-func (pps *WorkerState) constructPayment(from, to string, fee, amount uint64, note []byte, closeTo string, lease [32]byte) (transactions.Transaction, error) {
- fromAddr, err := basics.UnmarshalChecksumAddress(from)
- if err != nil {
- return transactions.Transaction{}, err
+type txnUpdate interface {
+ apply(pps *WorkerState)
+}
+
+func (pps *WorkerState) constructPaymentTxn(from, to string, fee uint64, client *libgoal.Client, noteField []byte, lease [32]byte) (txn transactions.Transaction, sender string, update txnUpdate, err error) {
+ amt := pps.cfg.MaxAmt
+ if pps.cfg.RandomizeAmt {
+ amt = uint64(rand.Int63n(int64(pps.cfg.MaxAmt-1))) + 1
+ }
+ txn, err = client.ConstructPayment(from, to, fee, amt, noteField, "", lease, 0, 0)
+ if !pps.cfg.Quiet {
+ _, _ = fmt.Fprintf(os.Stdout, "Sending %d : %s -> %s\n", amt, from, to)
+ }
+ update = &paymentUpdate{
+ from: from,
+ to: to,
+ amt: amt,
+ fee: fee,
}
+ return txn, from, update, err
+}
+
+type paymentUpdate struct {
+ from string
+ to string
+ amt uint64
+ fee uint64
+}
- var toAddr basics.Address
- if to != "" {
- toAddr, err = basics.UnmarshalChecksumAddress(to)
+func (au *paymentUpdate) apply(pps *WorkerState) {
+ pps.accounts[au.from].balance -= (au.fee + au.amt)
+ pps.accounts[au.to].balance += au.amt
+}
+
+// return true with probability 1/i
+func pReplace(i int) bool {
+ if i <= 1 {
+ return true
+ }
+ return rand.Intn(i) == 0
+}
+
+func (pps *WorkerState) constructAssetTxn(from, toUnused string, fee uint64, client *libgoal.Client, noteField []byte, lease [32]byte) (txn transactions.Transaction, sender string, update txnUpdate, err error) {
+ // select a pair of random opted-in accounts by aidx
+ // use them as from/to addresses
+ amt := uint64(1)
+ aidx := pps.randAssetID()
+ if aidx == 0 {
+ err = fmt.Errorf("no known assets")
+ return
+ }
+ if len(pps.cinfo.OptIns[aidx]) == 0 {
+ // Opt-in another
+ // TODO: continue opt-in up to some amount? gradually?
+ txn, err = pps.appOptIn(from, aidx, client)
if err != nil {
- return transactions.Transaction{}, err
+ return
+ }
+ update = &appOptInUpdate{
+ addr: from,
+ aidx: aidx,
}
+ return txn, from, update, nil
}
- // Get current round, protocol, genesis ID
- var params v1.TransactionParams
- for params.LastRound == 0 {
- params = pps.getSuggestedParams()
- }
-
- cp, ok := config.Consensus[protocol.ConsensusVersion(params.ConsensusVersion)]
- if !ok {
- return transactions.Transaction{}, fmt.Errorf("ConstructPayment: unknown consensus protocol %s", params.ConsensusVersion)
- }
- fv := params.LastRound + 1
- lv := fv + cp.MaxTxnLife - 1
-
- tx := transactions.Transaction{
- Type: protocol.PaymentTx,
- Header: transactions.Header{
- Sender: fromAddr,
- Fee: basics.MicroAlgos{Raw: fee},
- FirstValid: basics.Round(fv),
- LastValid: basics.Round(lv),
- Lease: lease,
- Note: note,
- },
- PaymentTxnFields: transactions.PaymentTxnFields{
- Receiver: toAddr,
- Amount: basics.MicroAlgos{Raw: amount},
- },
- }
-
- // If requesting closing, put it in the transaction. The protocol might
- // not support it, but in that case, better to fail the transaction,
- // because the user explicitly asked for it, and it's not supported.
- if closeTo != "" {
- closeToAddr, err := basics.UnmarshalChecksumAddress(closeTo)
- if err != nil {
- return transactions.Transaction{}, err
+ optInsForAsset := pps.cinfo.OptIns[aidx]
+
+ var richest *pingPongAccount
+ var richestv uint64
+ var fromAcct *pingPongAccount
+ var toAcct *pingPongAccount
+ for i, addr := range optInsForAsset {
+ acct := pps.accounts[addr]
+ if acct.holdings[aidx] > richestv {
+ richestv = acct.holdings[aidx]
+ richest = acct
+ continue
+ }
+ if (acct.holdings[aidx] > 1000) && (fromAcct == nil || pReplace(i)) {
+ fromAcct = acct
+ continue
+ }
+ if toAcct == nil || pReplace(i) {
+ toAcct = acct
+ continue
}
+ }
+ if richest == nil {
+ err = fmt.Errorf("don't know any account holding asset %d", aidx)
+ return
+ }
+ if fromAcct == nil {
+ fromAcct = richest
+ }
+ if toAcct == nil {
+ toAcct = fromAcct
+ }
+
+ to := toAcct.pk.String()
+ from = fromAcct.pk.String()
+ sender = from
+ if to != from {
+ if toAcct.holdings[aidx] < 1000 && fromAcct.holdings[aidx] > 11000 {
+ amt = 10000
+ }
+ }
+ txn, err = client.MakeUnsignedAssetSendTx(aidx, amt, to, "", "")
+ if err != nil {
+ _, _ = fmt.Fprintf(os.Stdout, "error making unsigned asset send tx %v\n", err)
+ return
+ }
+ txn.Note = noteField[:]
+ txn.Lease = lease
+ txn, err = client.FillUnsignedTxTemplate(sender, 0, 0, fee, txn)
+ if !pps.cfg.Quiet {
+ _, _ = fmt.Fprintf(os.Stdout, "Sending %d asset %d: %s -> %s\n", amt, aidx, sender, to)
+ }
+ update = &assetUpdate{
+ from: from,
+ to: to,
+ aidx: aidx,
+ amt: amt,
+ fee: fee,
+ }
+ return txn, sender, update, err
+}
+
+type appOptInUpdate struct {
+ addr string
+ aidx uint64
+}
+
+func (au *appOptInUpdate) apply(pps *WorkerState) {
+ pps.accounts[au.addr].holdings[au.aidx] = 0
+ pps.cinfo.OptIns[au.aidx] = uniqueAppend(pps.cinfo.OptIns[au.aidx], au.addr)
+}
+
+type nopUpdate struct {
+}
+
+func (au *nopUpdate) apply(pps *WorkerState) {
+}
+
+var nopUpdateSingleton = &nopUpdate{}
+
+type assetUpdate struct {
+ from string
+ to string
+ aidx uint64
+ amt uint64
+ fee uint64
+}
- tx.PaymentTxnFields.CloseRemainderTo = closeToAddr
+func (au *assetUpdate) apply(pps *WorkerState) {
+ pps.accounts[au.from].balance -= au.fee
+ pps.accounts[au.from].holdings[au.aidx] -= au.amt
+ to := pps.accounts[au.to]
+ if to.holdings == nil {
+ to.holdings = make(map[uint64]uint64)
}
+ to.holdings[au.aidx] += au.amt
+}
+
+func (pps *WorkerState) constructAppTxn(from, to string, fee uint64, client *libgoal.Client, noteField []byte, lease [32]byte) (txn transactions.Transaction, sender string, update txnUpdate, err error) {
+ // select opted-in accounts for Txn.Accounts field
+ var accounts []string
+ aidx := pps.randAppID()
+ if aidx == 0 {
+ err = fmt.Errorf("no known apps")
+ return
+ }
+ appOptIns := pps.cinfo.OptIns[aidx]
+ sender = from
+ if len(appOptIns) > 0 {
+ indices := rand.Perm(len(appOptIns))
+ limit := 5
+ if len(indices) < limit {
+ limit = len(indices)
+ }
+ for i := 0; i < limit; i++ {
+ idx := indices[i]
+ accounts = append(accounts, appOptIns[idx])
+ }
+ if pps.cinfo.AppParams[aidx].Creator == from {
+ // if the application was created by the "from" account, then we don't need to worry about it being opted-in.
+ } else {
+ fromIsOptedIn := false
+ for i := 0; i < len(appOptIns); i++ {
+ if appOptIns[i] == from {
+ fromIsOptedIn = true
+ break
+ }
+ }
+ if !fromIsOptedIn {
+ sender = accounts[0]
+ from = sender
+ }
+ }
+ accounts = accounts[1:]
+ }
+ txn, err = client.MakeUnsignedAppNoOpTx(aidx, nil, accounts, nil, nil)
+ if err != nil {
+ return
+ }
+ txn.Note = noteField[:]
+ txn.Lease = lease
+ txn, err = client.FillUnsignedTxTemplate(from, 0, 0, fee, txn)
+ if !pps.cfg.Quiet {
+ _, _ = fmt.Fprintf(os.Stdout, "Calling app %d : %s\n", aidx, from)
+ }
+ update = &appUpdate{
+ from: from,
+ fee: fee,
+ }
+ return txn, sender, update, err
+}
+
+type appUpdate struct {
+ from string
+ fee uint64
+}
- tx.Header.GenesisID = params.GenesisID
+func (au *appUpdate) apply(pps *WorkerState) {
+ pps.accounts[au.from].balance -= au.fee
+}
+
+func (pps *WorkerState) constructNFTGenTxn(from, to string, fee uint64, client *libgoal.Client, noteField []byte, lease [32]byte) (txn transactions.Transaction, sender string, update txnUpdate, err error) {
+ if (len(pps.nftHolders) == 0) || ((float64(int(pps.cfg.NftAsaAccountInFlight)-len(pps.nftHolders)) / float64(pps.cfg.NftAsaAccountInFlight)) >= rand.Float64()) {
+ var addr string
- // Check if the protocol supports genesis hash
- if cp.SupportGenesisHash {
- copy(tx.Header.GenesisHash[:], params.GenesisHash)
+ var seed [32]byte
+ crypto.RandBytes(seed[:])
+ privateKey := crypto.GenerateSignatureSecrets(seed)
+ publicKey := basics.Address(privateKey.SignatureVerifier)
+
+ pps.accounts[publicKey.String()] = &pingPongAccount{
+ sk: privateKey,
+ pk: publicKey,
+ }
+ addr = publicKey.String()
+
+ fmt.Printf("new NFT holder %s\n", addr)
+ var proto config.ConsensusParams
+ proto, err = getProto(client)
+ if err != nil {
+ return
+ }
+ // enough for the per-asa minbalance and more than enough for the txns to create them
+ amount := proto.MinBalance * uint64(pps.cfg.NftAsaPerAccount+1) * 2
+ pps.nftHolders[addr] = 0
+ srcAcct := pps.acct(pps.cfg.SrcAccount)
+ sender = srcAcct.pk.String()
+ txn, err = client.ConstructPayment(sender, to, fee, amount, noteField, "", [32]byte{}, 0, 0)
+ update = &paymentUpdate{
+ from: from,
+ to: to,
+ fee: fee,
+ amt: amount,
+ }
+ return txn, sender, update, err
}
+ // pick a random sender from nft holder sub accounts
+ pick := rand.Intn(len(pps.nftHolders))
+ pos := 0
+ var senderNftCount int
+ for addr, nftCount := range pps.nftHolders {
+ sender = addr
+ senderNftCount = nftCount
+ if pos == pick {
+ break
+ }
+ pos++
- // Default to the suggested fee, if the caller didn't supply it
- // Fee is tricky, should taken care last. We encode the final transaction to get the size post signing and encoding
- // Then, we multiply it by the suggested fee per byte.
- if fee == 0 {
- tx.Fee = basics.MulAIntSaturate(basics.MicroAlgos{Raw: params.Fee}, tx.EstimateEncodedSize())
}
- if tx.Fee.Raw < cp.MinTxnFee {
- tx.Fee.Raw = cp.MinTxnFee
+ var meta [32]byte
+ rand.Read(meta[:])
+ assetName := pps.nftSpamAssetName()
+ const totalSupply = 1
+ txn, err = client.MakeUnsignedAssetCreateTx(totalSupply, false, sender, sender, sender, sender, "ping", assetName, "", meta[:], 0)
+ if err != nil {
+ fmt.Printf("Cannot make asset create txn with meta %v\n", meta)
+ return
+ }
+ txn, err = client.FillUnsignedTxTemplate(sender, 0, 0, fee, txn)
+ if err != nil {
+ fmt.Printf("Cannot fill asset creation txn\n")
+ return
+ }
+ if senderNftCount+1 >= int(pps.cfg.NftAsaPerAccount) {
+ delete(pps.nftHolders, sender)
+ } else {
+ pps.nftHolders[sender] = senderNftCount + 1
+ }
+ update = &nftgenUpdate{
+ from: from,
+ fee: fee,
}
+ return txn, sender, update, err
+}
- return tx, nil
+type nftgenUpdate struct {
+ from string
+ fee uint64
+}
+
+func (au *nftgenUpdate) apply(pps *WorkerState) {
+ pps.accounts[au.from].balance -= au.fee
}
func signTxn(signer *pingPongAccount, txn transactions.Transaction, cfg PpConfig) (stxn transactions.SignedTxn, err error) {
@@ -1203,7 +1244,7 @@ func signTxn(signer *pingPongAccount, txn transactions.Transaction, cfg PpConfig
if cfg.Rekey {
stxn, err = txn.Sign(signer.sk), nil
- } else if len(cfg.Program) > 0 {
+ } else if len(cfg.Program) > 0 && rand.Float64() < cfg.ProgramProbability {
// If there's a program, sign it and use that in a lsig
progb := logic.Program(cfg.Program)
psig = signer.sk.Sign(&progb)
@@ -1220,53 +1261,3 @@ func signTxn(signer *pingPongAccount, txn transactions.Transaction, cfg PpConfig
}
return
}
-
-type timeCount struct {
- when time.Time
- count int
-}
-
-type throttler struct {
- times []timeCount
-
- next int
-
- // target x per-second
- xps float64
-
- // rough proportional + integral control
- iterm float64
-}
-
-func newThrottler(windowSize int, targetPerSecond float64) *throttler {
- return &throttler{times: make([]timeCount, windowSize), xps: targetPerSecond, iterm: 0.0}
-}
-
-func (t *throttler) maybeSleep(count int) {
- now := time.Now()
- t.times[t.next].when = now
- t.times[t.next].count = count
- nn := (t.next + 1) % len(t.times)
- t.next = nn
- if t.times[nn].when.IsZero() {
- return
- }
- dt := now.Sub(t.times[nn].when)
- countsum := 0
- for i, tc := range t.times {
- if i != nn {
- countsum += tc.count
- }
- }
- rate := float64(countsum) / dt.Seconds()
- if rate > t.xps {
- // rate too high, slow down
- desiredSeconds := float64(countsum) / t.xps
- extraSeconds := desiredSeconds - dt.Seconds()
- t.iterm += 0.1 * extraSeconds / float64(len(t.times))
- util.NanoSleep(time.Duration(1000000000.0 * (extraSeconds + t.iterm) / float64(len(t.times))))
-
- } else {
- t.iterm *= 0.95
- }
-}
diff --git a/stateproof/worker_test.go b/stateproof/worker_test.go
index 4c35c8c693..9145141179 100644
--- a/stateproof/worker_test.go
+++ b/stateproof/worker_test.go
@@ -21,7 +21,7 @@ import (
"database/sql"
"encoding/binary"
"fmt"
- "io/ioutil"
+ "io"
"strings"
"sync"
"testing"
@@ -573,7 +573,7 @@ func TestSignerDoesntDeleteKeysWhenDBDoesntStoreSigs(t *testing.T) {
dbs, _ := dbOpenTest(t, true)
logger := logging.NewLogger()
- logger.SetOutput(ioutil.Discard)
+ logger.SetOutput(io.Discard)
w := NewWorker(dbs.Wdb, logger, s, s, s, s)
diff --git a/test/commandandcontrol/cc_agent/component/pingPongComponent.go b/test/commandandcontrol/cc_agent/component/pingPongComponent.go
index 7f992e793b..5bb4be8907 100644
--- a/test/commandandcontrol/cc_agent/component/pingPongComponent.go
+++ b/test/commandandcontrol/cc_agent/component/pingPongComponent.go
@@ -20,7 +20,7 @@ import (
"context"
"encoding/json"
"fmt"
- "io/ioutil"
+ "os"
"time"
"github.com/algorand/go-algorand/libgoal"
@@ -101,7 +101,7 @@ func (componentInstance *PingPongComponentInstance) Terminate() (err error) {
func (componentInstance *PingPongComponentInstance) startPingPong(cfg *pingpong.PpConfig) (err error) {
// Make a cache dir for wallet handle tokens
- cacheDir, err := ioutil.TempDir(GetHostAgent().TempDir, PINGPONG)
+ cacheDir, err := os.MkdirTemp(GetHostAgent().TempDir, PINGPONG)
if err != nil {
log.Errorf("Cannot make temp dir: %v\n", err)
return
@@ -124,7 +124,7 @@ func (componentInstance *PingPongComponentInstance) startPingPong(cfg *pingpong.
// Initialize accounts if necessary, this may take several attempts while previous transactions to settle
for i := 0; i < 10; i++ {
- err = pps.PrepareAccounts(ac)
+ err = pps.PrepareAccounts(&ac)
if err == nil {
break
} else {
@@ -143,7 +143,7 @@ func (componentInstance *PingPongComponentInstance) startPingPong(cfg *pingpong.
componentInstance.ctx, componentInstance.cancelFunc = context.WithCancel(context.Background())
// Kick off the real processing
- go pps.RunPingPong(componentInstance.ctx, ac)
+ go pps.RunPingPong(componentInstance.ctx, &ac)
return
}
diff --git a/test/commandandcontrol/cc_client/main.go b/test/commandandcontrol/cc_client/main.go
index c45fec1b8f..817afb850a 100644
--- a/test/commandandcontrol/cc_client/main.go
+++ b/test/commandandcontrol/cc_client/main.go
@@ -19,7 +19,6 @@ package main
import (
"flag"
"fmt"
- "io/ioutil"
"net/url"
"os"
"os/signal"
@@ -65,7 +64,7 @@ func main() {
os.Exit(1)
}
- options, err := ioutil.ReadFile(*componentOptions)
+ options, err := os.ReadFile(*componentOptions)
if err != nil {
log.Errorf("failed to read options file %s", *componentOptions)
}
diff --git a/test/e2e-go/cli/algod/expect/algod_expect_test.go b/test/e2e-go/cli/algod/expect/algod_expect_test.go
index ec69fc715c..06d54b9f32 100644
--- a/test/e2e-go/cli/algod/expect/algod_expect_test.go
+++ b/test/e2e-go/cli/algod/expect/algod_expect_test.go
@@ -19,12 +19,12 @@ import (
"testing"
"github.com/algorand/go-algorand/test/framework/fixtures"
- "github.com/algorand/go-algorand/test/partitiontest"
)
// TestAlgodWithExpect Process all expect script files with suffix Test.exp within the test/e2e-go/cli/algod/expect directory
func TestAlgodWithExpect(t *testing.T) {
- partitiontest.PartitionTest(t)
+ // partitiontest.PartitionTest(t)
+ // Causes double partition, so commented out on purpose
defer fixtures.ShutdownSynchronizedTest(t)
et := fixtures.MakeExpectTest(t)
diff --git a/test/e2e-go/cli/goal/expect/catchpointCatchupTest.exp b/test/e2e-go/cli/goal/expect/catchpointCatchupTest.exp
index be6fc0ab31..d26074e2fa 100644
--- a/test/e2e-go/cli/goal/expect/catchpointCatchupTest.exp
+++ b/test/e2e-go/cli/goal/expect/catchpointCatchupTest.exp
@@ -37,7 +37,7 @@ proc spawnCatchpointCatchupWebProxy { TARGET_ENDPOINT RUNTIME REQUEST_DELAY } {
eof { ::AlgorandGoal::CheckEOF "web proxy failed to start"}
}
- puts "Web proxy listening address is $WEBPROXY_LISTEN_ADDRESS"
+ puts "[clock format [clock seconds] -format %H:%M:%S]: Web proxy listening address is $WEBPROXY_LISTEN_ADDRESS"
return $WEBPROXY_LISTEN_ADDRESS
}
@@ -105,7 +105,7 @@ if { [catch {
exit 1
}
- puts "Primary node listening address is $PRIMARY_LISTEN_ADDRESS"
+ puts "[clock format [clock seconds] -format %H:%M:%S]: Primary node listening address is $PRIMARY_LISTEN_ADDRESS"
# start the web proxy
set WP_SPAWN_ID 0
@@ -120,11 +120,11 @@ if { [catch {
set CATCHPOINT [::AlgorandGoal::GetNodeLastCatchpoint $TEST_ROOT_DIR/Primary]
- puts "Catchpoint is $CATCHPOINT"
+ puts "[clock format [clock seconds] -format %H:%M:%S]: Catchpoint is $CATCHPOINT"
regexp -nocase {([0-9]*)#[A-Z2-7]*} $CATCHPOINT CATCHPOINT_ROUND CATCHPOINT_ROUND
- puts "Catchpoint round is $CATCHPOINT_ROUND"
+ puts "[clock format [clock seconds] -format %H:%M:%S]: Catchpoint round is $CATCHPOINT_ROUND"
# wait for the primary to reach $CATCHPOINT_ROUND + 5, so that the catchpoint file would be saved
::AlgorandGoal::WaitForRound [expr {int($CATCHPOINT_ROUND + 5)}] $TEST_ROOT_DIR/Primary
@@ -138,7 +138,7 @@ if { [catch {
# close the web proxy
close -i $WP_SPAWN_ID
- puts "catchpointCatchupTest basic test completed"
+ puts "[clock format [clock seconds] -format %H:%M:%S]: catchpointCatchupTest basic test completed"
} EXCEPTION ] } {
::AlgorandGoal::Abort "ERROR in catchpointCatchupTest - basic test: $EXCEPTION"
@@ -191,7 +191,7 @@ if { [catch {
::AlgorandGoal::StopNode $TEST_ROOT_DIR/Primary
- puts "catchpointCatchupTest stop/start test completed"
+ puts "[clock format [clock seconds] -format %H:%M:%S]: catchpointCatchupTest stop/start test completed"
} EXCEPTION ] } {
::AlgorandGoal::Abort "ERROR in catchpointCatchupTest - stop/start: $EXCEPTION"
}
diff --git a/test/e2e-go/cli/goal/expect/catchpointCatchupWebProxy/webproxy.go b/test/e2e-go/cli/goal/expect/catchpointCatchupWebProxy/webproxy.go
index 8c574c4b1e..3ba4764383 100644
--- a/test/e2e-go/cli/goal/expect/catchpointCatchupWebProxy/webproxy.go
+++ b/test/e2e-go/cli/goal/expect/catchpointCatchupWebProxy/webproxy.go
@@ -58,7 +58,7 @@ func main() {
// prevent requests for block #2 to go through.
if strings.HasSuffix(request.URL.String(), "/block/2") {
response.WriteHeader(http.StatusBadRequest)
- response.Write([]byte("webProxy prevents block 2 from serving"))
+ response.Write([]byte("webProxy prevents block 2 from serving")) //nolint:errcheck // don't care
return
}
if *webProxyLogFile != "" {
diff --git a/test/e2e-go/cli/goal/expect/goalExpectCommon.exp b/test/e2e-go/cli/goal/expect/goalExpectCommon.exp
index f528dabb19..2f2f4f8265 100644
--- a/test/e2e-go/cli/goal/expect/goalExpectCommon.exp
+++ b/test/e2e-go/cli/goal/expect/goalExpectCommon.exp
@@ -44,48 +44,54 @@ package require Tcl 8.0
# Utility method to abort out of this script
proc ::AlgorandGoal::Abort { ERROR } {
- puts "Aborting with Error: $ERROR"
+ puts "[clock format [clock seconds] -format %H:%M:%S]: Aborting with Error: $ERROR"
+ set LOGS_COLLECTED 0
if { [info exists ::GLOBAL_TEST_ROOT_DIR] } {
# terminate child algod processes, if there are active child processes the test will hang on a test failure
puts "GLOBAL_TEST_ROOT_DIR $::GLOBAL_TEST_ROOT_DIR"
puts "GLOBAL_NETWORK_NAME $::GLOBAL_NETWORK_NAME"
+ ::AlgorandGoal::StopNetwork $::GLOBAL_NETWORK_NAME $::GLOBAL_TEST_ROOT_DIR
+
log_user 1
set NODE_DATA_DIR $::GLOBAL_TEST_ROOT_DIR/Primary
- if { [info exists ::NODE_DATA_DIR] } {
+ if { [file exists $NODE_DATA_DIR] } {
set outLog [exec cat $NODE_DATA_DIR/algod-out.log]
- puts "$NODE_DATA_DIR/algod-out.log :\r\n$outLog"
+ puts "\n$NODE_DATA_DIR/algod-out.log:\r\n$outLog"
set errLog [exec cat $NODE_DATA_DIR/algod-err.log]
- puts "$NODE_DATA_DIR/algod-err.log :\r\n$errLog"
- set nodeLog [exec -- tail -n 30 $NODE_DATA_DIR/node.log]
- puts "$NODE_DATA_DIR/node.log :\r\n$nodeLog"
+ puts "\n$NODE_DATA_DIR/algod-err.log:\r\n$errLog"
+ set nodeLog [exec -- tail -n 50 $NODE_DATA_DIR/node.log]
+ puts "\n$NODE_DATA_DIR/node.log:\r\n$nodeLog"
+ set LOGS_COLLECTED 1
}
set NODE_DATA_DIR $::GLOBAL_TEST_ROOT_DIR/Node
- if { [info exists ::NODE_DATA_DIR] } {
+ puts "Node path $NODE_DATA_DIR"
+ if { [file exists $NODE_DATA_DIR] } {
set outLog [exec cat $NODE_DATA_DIR/algod-out.log]
- puts "$NODE_DATA_DIR/algod-out.log :\r\n$outLog"
+ puts "\n$NODE_DATA_DIR/algod-out.log:\r\n$outLog"
set errLog [exec cat $NODE_DATA_DIR/algod-err.log]
- puts "$NODE_DATA_DIR/algod-err.log :\r\n$errLog"
- set nodeLog [exec -- tail -n 30 $NODE_DATA_DIR/node.log]
- puts "$NODE_DATA_DIR/node.log :\r\n$nodeLog"
+ puts "\n$NODE_DATA_DIR/algod-err.log:\r\n$errLog"
+ set nodeLog [exec -- tail -n 50 $NODE_DATA_DIR/node.log]
+ puts "\n$NODE_DATA_DIR/node.log:\r\n$nodeLog"
+ set LOGS_COLLECTED 1
}
-
- ::AlgorandGoal::StopNetwork $::GLOBAL_NETWORK_NAME $::GLOBAL_TEST_ROOT_DIR
}
if { [info exists ::GLOBAL_TEST_ALGO_DIR] } {
puts "GLOBAL_TEST_ALGO_DIR $::GLOBAL_TEST_ALGO_DIR"
- log_user 1
- set outLog [exec cat $::GLOBAL_TEST_ALGO_DIR/algod-out.log]
- puts "$::GLOBAL_TEST_ALGO_DIR/algod-out.log :\r\n$outLog"
- set errLog [exec cat $::GLOBAL_TEST_ALGO_DIR/algod-err.log]
- puts "$NODE_DATA_DIR/algod-err.log :\r\n$errLog"
- set nodeLog [exec -- tail -n 30 $::GLOBAL_TEST_ALGO_DIR/node.log]
- puts "$::GLOBAL_TEST_ALGO_DIR/node.log :\r\n$nodeLog"
-
::AlgorandGoal::StopNode $::GLOBAL_TEST_ALGO_DIR
+
+ if { $LOGS_COLLECTED == 0 } {
+ log_user 1
+ set outLog [exec cat $::GLOBAL_TEST_ALGO_DIR/algod-out.log]
+ puts "\n$::GLOBAL_TEST_ALGO_DIR/algod-out.log:\r\n$outLog"
+ set errLog [exec cat $::GLOBAL_TEST_ALGO_DIR/algod-err.log]
+ puts "\n$::GLOBAL_TEST_ALGO_DIR/algod-err.log:\r\n$errLog"
+ set nodeLog [exec -- tail -n 50 $::GLOBAL_TEST_ALGO_DIR/node.log]
+ puts "\n$::GLOBAL_TEST_ALGO_DIR/node.log:\r\n$nodeLog"
+ }
}
exit 1
@@ -137,7 +143,7 @@ proc ::AlgorandGoal::StartNode { TEST_ALGO_DIR {SYSTEMD_MANAGED "False"} {PEER_A
set GOAL_PARAMS "$GOAL_PARAMS -p $PEER_ADDRESS"
}
if { [catch {
- puts "node start with $TEST_ALGO_DIR"
+ puts "[clock format [clock seconds] -format %H:%M:%S]: node start with $TEST_ALGO_DIR"
spawn goal {*}$GOAL_PARAMS
if { $SYSTEMD_MANAGED eq "True" } {
expect {
@@ -164,7 +170,7 @@ proc ::AlgorandGoal::StopNode { TEST_ALGO_DIR {SYSTEMD_MANAGED ""} } {
set timeout 15
if { [catch {
- puts "node stop with $TEST_ALGO_DIR"
+ puts "[clock format [clock seconds] -format %H:%M:%S]: node stop with $TEST_ALGO_DIR"
if { $SYSTEMD_MANAGED eq "" } {
spawn goal node stop -d $TEST_ALGO_DIR
expect {
@@ -192,7 +198,7 @@ proc ::AlgorandGoal::RestartNode { TEST_ALGO_DIR {SYSTEMD_MANAGED ""} } {
set timeout 30
if { [catch {
- puts "node restart with $TEST_ALGO_DIR"
+ puts "[clock format [clock seconds] -format %H:%M:%S]: node restart with $TEST_ALGO_DIR"
if { $SYSTEMD_MANAGED eq "" } {
spawn goal node restart -d $TEST_ALGO_DIR
expect {
@@ -241,7 +247,7 @@ proc ::AlgorandGoal::StartNetwork { NETWORK_NAME NETWORK_TEMPLATE TEST_ROOT_DIR
if { [catch {
# Start network
- puts "network start $NETWORK_NAME"
+ puts "[clock format [clock seconds] -format %H:%M:%S]: network start $NETWORK_NAME"
spawn goal network start -r $TEST_ROOT_DIR
expect {
timeout { close; ::AlgorandGoal::Abort "Timed out starting network" }
@@ -272,7 +278,7 @@ proc ::AlgorandGoal::StartNetwork { NETWORK_NAME NETWORK_TEMPLATE TEST_ROOT_DIR
proc ::AlgorandGoal::StopNetwork { NETWORK_NAME TEST_ROOT_DIR } {
set timeout 60
set NETWORK_STOP_MESSAGE ""
- puts "Stopping network: $NETWORK_NAME"
+ puts "[clock format [clock seconds] -format %H:%M:%S]: Stopping network: $NETWORK_NAME"
spawn goal network stop -r $TEST_ROOT_DIR
expect {
timeout {
@@ -896,7 +902,7 @@ proc ::AlgorandGoal::GetNodeLastCommittedBlock { NODE_DATA_DIR } {
proc ::AlgorandGoal::StartCatchup { NODE_DATA_DIR CATCHPOINT } {
if { [catch {
# start catchup
- puts "spawn node catchup $CATCHPOINT"
+ puts "[clock format [clock seconds] -format %H:%M:%S]: spawn node catchup $CATCHPOINT"
spawn goal node catchup $CATCHPOINT -d $NODE_DATA_DIR
expect {
timeout { ::AlgorandGoal::Abort "goal node catchup timed out" }
@@ -914,7 +920,7 @@ proc ::AlgorandGoal::WaitCatchup { TEST_PRIMARY_NODE_DIR WAIT_DURATION_SEC } {
set i 0
while { $i < $WAIT_DURATION_SEC } {
# Check node status
- puts "spawn node status "
+ puts "[clock format [clock seconds] -format %H:%M:%S]: spawn node status "
spawn goal node status -d $TEST_PRIMARY_NODE_DIR
expect {
timeout { ::AlgorandGoal::Abort "goal node status timed out" }
@@ -967,22 +973,22 @@ proc ::AlgorandGoal::WaitForRound { WAIT_FOR_ROUND_NUMBER NODE_DATA_DIR } {
eof {
catch wait result;
if { [lindex $result 3] != 0 } {
- ::AlgorandGoal::Abort "failed to wait for round : error code [lindex $result 3]"
+ ::AlgorandGoal::Abort "failed to wait for round : error code [lindex $result 3], output: $expect_out(buffer)"
}
}
}
log_user 1
if { $BLOCK > -1 } {
- puts "node status check complete, current round is $BLOCK"
+ puts "[clock format [clock seconds] -format %H:%M:%S]: node status check complete, current round is $BLOCK"
} else {
::AlgorandGoal::Abort "failed to retrieve block round number"
}
# Check if the round number is reached
if { $BLOCK >= $WAIT_FOR_ROUND_NUMBER } {
- puts "Reached Round number: $WAIT_FOR_ROUND_NUMBER"; break
+ puts "[clock format [clock seconds] -format %H:%M:%S]: Reached Round number: $WAIT_FOR_ROUND_NUMBER"; break
} else {
- puts "Current Round: '$BLOCK' is less than wait for round: '$WAIT_FOR_ROUND_NUMBER'"
+ puts "[clock format [clock seconds] -format %H:%M:%S]: Current Round: '$BLOCK' is less than wait for round: '$WAIT_FOR_ROUND_NUMBER'"
if { $LAST_ROUND >= $BLOCK } {
# no progress was made since last time we checked.
incr SLEEP_TIME
diff --git a/test/e2e-go/cli/goal/expect/goalFormattingTest.exp b/test/e2e-go/cli/goal/expect/goalFormattingTest.exp
index 054406479d..cfa0af63f8 100644
--- a/test/e2e-go/cli/goal/expect/goalFormattingTest.exp
+++ b/test/e2e-go/cli/goal/expect/goalFormattingTest.exp
@@ -26,7 +26,7 @@ if { [catch {
set NON_PRINTABLE_CHARS_WARNING 1
exp_continue
}
- {Cannot decode transactions from *: msgpack decode error \[pos 33\]: no matching struct field found when decoding stream map with key \[0G\[0K\[33munexpected_key\[0m} {
+ {Cannot decode transactions from *: Unknown field: \[0G\[0K\[33munexpected_key\[0m} {
set CANNOT_DECODE_MESSAGE 1
exp_continue
}
diff --git a/test/e2e-go/cli/goal/expect/statefulTealAppInfoTest.exp b/test/e2e-go/cli/goal/expect/statefulTealAppInfoTest.exp
deleted file mode 100644
index 6414f05f40..0000000000
--- a/test/e2e-go/cli/goal/expect/statefulTealAppInfoTest.exp
+++ /dev/null
@@ -1,159 +0,0 @@
-#!/usr/bin/expect -f
-#exp_internal 1
-set err 0
-log_user 1
-
-source goalExpectCommon.exp
-
-set TEST_ALGO_DIR [lindex $argv 0]
-set TEST_DATA_DIR [lindex $argv 1]
-
-proc statefulTealAppInfoTest { TEST_ALGO_DIR TEST_DATA_DIR} {
-
- set timeout 60
- set TIME_STAMP [clock seconds]
-
- set TEST_ROOT_DIR $TEST_ALGO_DIR/root_$TIME_STAMP
- set TEST_PRIMARY_NODE_DIR $TEST_ROOT_DIR/Primary/
- set NETWORK_NAME test_net_expect_$TIME_STAMP
- set NETWORK_TEMPLATE "$TEST_DATA_DIR/nettemplates/TwoNodes50EachFuture.json"
-
- exec cp $TEST_DATA_DIR/../../installer/genesis/devnet/genesis.json $TEST_ALGO_DIR
-
- # Create network
- ::AlgorandGoal::CreateNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ALGO_DIR $TEST_ROOT_DIR
-
- # Start network
- ::AlgorandGoal::StartNetwork $NETWORK_NAME $NETWORK_TEMPLATE $TEST_ROOT_DIR
-
- set PRIMARY_NODE_ADDRESS [ ::AlgorandGoal::GetAlgodNetworkAddress $TEST_PRIMARY_NODE_DIR ]
- puts "Primary Node Address: $PRIMARY_NODE_ADDRESS"
-
- set PRIMARY_WALLET_NAME unencrypted-default-wallet
-
- # Determine primary account
- set PRIMARY_ACCOUNT_ADDRESS [::AlgorandGoal::GetHighestFundedAccountForWallet $PRIMARY_WALLET_NAME $TEST_PRIMARY_NODE_DIR]
-
- # Check the balance of the primary account
- set PRIMARY_ACCOUNT_BALANCE [::AlgorandGoal::GetAccountBalance $PRIMARY_WALLET_NAME $PRIMARY_ACCOUNT_ADDRESS $TEST_PRIMARY_NODE_DIR]
- puts "Primary Account Balance: $PRIMARY_ACCOUNT_BALANCE"
-
- ::AlgorandGoal::WaitForRound 1 $TEST_PRIMARY_NODE_DIR
-
- set TEAL_PROGS_DIR "$TEST_DATA_DIR/../scripts/e2e_subs/tealprogs"
-
- # Network Setup complete
- #----------------------
-
- puts "calling app compile"
- ::AlgorandGoal::AppCompile ${TEAL_PROGS_DIR}/upgraded.teal ${TEST_ROOT_DIR}/upgraded.tealc $TEST_PRIMARY_NODE_DIR
- puts "computing target hash"
-
- puts "compute target hash"
- set TARGET_HASH [ exec shasum -a 256 "${TEST_ROOT_DIR}/upgraded.tealc" | awk {{print $1}} ]
- puts "TARGET_HASH ${TARGET_HASH}"
-
- # Compile dummy, wrong contract
- ::AlgorandGoal::AppCompile ${TEAL_PROGS_DIR}/wrongupgrade.teal ${TEST_ROOT_DIR}/wrongupgrade.tealc $TEST_PRIMARY_NODE_DIR
-
- # Copy template
- exec cp ${TEAL_PROGS_DIR}/bootloader.teal.tmpl ${TEST_ROOT_DIR}/bootloader.teal
-
- # Substitute template values
- exec sed -i"" -e "s/TMPL_APPROV_HASH/${TARGET_HASH}/g" ${TEST_ROOT_DIR}/bootloader.teal
- exec sed -i"" -e "s/TMPL_CLEARSTATE_HASH/${TARGET_HASH}/g" ${TEST_ROOT_DIR}/bootloader.teal
-
- # Create an app using filled-in bootloader template
- puts "calling app create"
- set GLOBAL_BYTE_SLICES 1
- set LOCAL_BYTE_SLICES 0
- set APP_ID [::AlgorandGoal::AppCreate0 $PRIMARY_WALLET_NAME "" $PRIMARY_ACCOUNT_ADDRESS ${TEST_ROOT_DIR}/bootloader.teal $GLOBAL_BYTE_SLICES $LOCAL_BYTE_SLICES ${TEAL_PROGS_DIR}/clear_program_state.teal $TEST_PRIMARY_NODE_DIR]
-
- # Application setup complete
- #----------------------
-
- # Calling app as an update but with right scripts should succeed
- spawn goal app info --app-id $APP_ID -d $TEST_PRIMARY_NODE_DIR
- expect {
- timeout { puts timeout; ::AlgorandGoal::Abort "\n Failed to see expected output" }
- -re {^Application ID:\s+(\d+)\r\n} {set APP_INFO_ID $expect_out(1,string) ; exp_continue }
- -re {Creator:\s+([A-Z0-9]+)\r\n} {set APP_INFO_CREATOR $expect_out(1,string) ; exp_continue }
- -re {Approval hash:\s+([A-Z0-9]+)\r\n} {set APP_INFO_APPROVAL_HASH $expect_out(1,string); exp_continue }
- -re {Clear hash:\s+([A-Z0-9]+)\r\n} {set APP_INFO_CLEAR_HASH $expect_out(1,string); exp_continue }
- -re {Max global byteslices:\s+(\d+)\r\n} {set APP_INFO_GLOBAL_BYTESLICES $expect_out(1,string); exp_continue }
- -re {Max global integers:\s+(\d+)\r\n} {set APP_INFO_GLOBAL_INTEGERS $expect_out(1,string) ; exp_continue }
- -re {Max local byteslices:\s+(\d+)\r\n} {set APP_INFO_LOCAL_BYTESLICES $expect_out(1,string) ; exp_continue }
- -re {Max local integers:\s+(\d+)\r\n} {set APP_INFO_LOCAL_INTEGERS $expect_out(1,string) ; close }
- eof {close; ::AlgorandGoal::Abort "app update failed" }
- }
- puts "APP_INFO_ID $APP_INFO_ID"
- puts "APP_INFO_CREATOR $APP_INFO_CREATOR"
- puts "APP_INFO_APPROVAL_HASH $APP_INFO_APPROVAL_HASH"
- puts "APP_INFO_CLEAR_HASH $APP_INFO_CLEAR_HASH"
- puts "APP_INFO_GLOBAL_BYTESLICES $APP_INFO_GLOBAL_BYTESLICES"
- puts "APP_INFO_GLOBAL_INTEGERS $APP_INFO_GLOBAL_INTEGERS"
- puts "APP_INFO_LOCAL_BYTESLICES $APP_INFO_LOCAL_BYTESLICES"
- puts "APP_INFO_LOCAL_INTEGERS $APP_INFO_LOCAL_INTEGERS"
-
- set errors 0
- if { $APP_INFO_ID != $APP_ID } {
- puts "error APP_INFO_ID $APP_INFO_ID does not match expected $APP_ID" ; incr errors
- }
- if { $APP_INFO_CREATOR != $PRIMARY_ACCOUNT_ADDRESS } {
- puts "error APP_INFO_CREATOR $APP_INFO_CREATOR does not match expected $PRIMARY_ACCOUNT_ADDRESS" ; incr errors
- }
- set EXPECTED_APP_INFO_APPROVAL_HASH "AJM7G3WXKKL6YTITFNRYT53HRFKHKWGTEZF6UZXKSUNO6GI7FOBCA7LDTU"
- if { $APP_INFO_APPROVAL_HASH != "AJM7G3WXKKL6YTITFNRYT53HRFKHKWGTEZF6UZXKSUNO6GI7FOBCA7LDTU" } {
- puts "error APP_INFO_APPROVAL_HASH $APP_INFO_APPROVAL_HASH does not match expected $EXPECTED_APP_INFO_APPROVAL_HASH" ; incr errors
- }
- set EXPECTED_APP_INFO_CLEAR_HASH "YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA"
- if { $APP_INFO_CLEAR_HASH != $EXPECTED_APP_INFO_CLEAR_HASH } {
- puts "error APP_INFO_CLEAR_HASH $APP_INFO_CLEAR_HASH does not match expected $EXPECTED_APP_INFO_CLEAR_HASH" ; incr errors
- }
- set EXPECTED_APP_INFO_GLOBAL_BYTESLICES 1
- if { $APP_INFO_GLOBAL_BYTESLICES != $EXPECTED_APP_INFO_GLOBAL_BYTESLICES } {
- puts "error APP_INFO_GLOBAL_BYTESLICES $APP_INFO_GLOBAL_BYTESLICES does not match expected $EXPECTED_APP_INFO_GLOBAL_BYTESLICES" ; incr errors
- }
- set EXPECTED_APP_INFO_GLOBAL_INTEGERS 0
- if { $APP_INFO_GLOBAL_INTEGERS != $EXPECTED_APP_INFO_GLOBAL_INTEGERS } {
- puts "error APP_INFO_GLOBAL_INTEGERS $APP_INFO_GLOBAL_INTEGERS does not match expected $EXPECTED_APP_INFO_GLOBAL_INTEGERS" ; incr errors
- }
- set EXPECTED_APP_INFO_LOCAL_BYTESLICES 0
- if { $APP_INFO_LOCAL_BYTESLICES != $EXPECTED_APP_INFO_LOCAL_BYTESLICES } {
- puts "error APP_INFO_LOCAL_BYTESLICES $APP_INFO_LOCAL_BYTESLICES does not match expected $EXPECTED_APP_INFO_LOCAL_BYTESLICES" ; incr errors
- }
- set EXPECTED_APP_INFO_LOCAL_INTEGERS 0
- if { $APP_INFO_LOCAL_INTEGERS != $EXPECTED_APP_INFO_LOCAL_INTEGERS } {
- puts "error APP_INFO_LOCAL_INTEGERS $APP_INFO_LOCAL_INTEGERS does not match expected $EXPECTED_APP_INFO_LOCAL_INTEGERS" ; incr errors
- }
-
- if { $errors > 0 } {
- puts "there were a total of $errors"
- ::AlgorandGoal::Abort "ERROR in statefulTealAppInfoTest"
- } else {
- puts "app info test was successful"
- }
-
- # Shutdown the network
- ::AlgorandGoal::StopNetwork $NETWORK_NAME $TEST_ROOT_DIR
-
- puts "Goal statefulTealAppInfoTest Successful"
-
-}
-
-
-if { [catch {
- source goalExpectCommon.exp
-
- puts "starting statefulTealAppInfoTest"
-
- puts "TEST_ALGO_DIR: $TEST_ALGO_DIR"
- puts "TEST_DATA_DIR: $TEST_DATA_DIR"
-
- statefulTealAppInfoTest $TEST_ALGO_DIR $TEST_DATA_DIR
-
- exit 0
-
-} EXCEPTION ] } {
- ::AlgorandGoal::Abort "ERROR in statefulTealAppInfoTest: $EXCEPTION"
-}
diff --git a/test/e2e-go/features/participation/participationRewards_test.go b/test/e2e-go/features/participation/participationRewards_test.go
index a7dd6452e4..3c8a293714 100644
--- a/test/e2e-go/features/participation/participationRewards_test.go
+++ b/test/e2e-go/features/participation/participationRewards_test.go
@@ -347,8 +347,7 @@ func TestRewardRateRecalculation(t *testing.T) {
r.NoError(err)
minFee, minBal, err := fixture.MinFeeAndBalance(curStatus.LastRound)
r.NoError(err)
- deadline := curStatus.LastRound + uint64(5)
- fixture.SendMoneyAndWait(deadline, amountToSend, minFee, richAccount.Address, rewardsAccount, "")
+ fixture.SendMoneyAndWait(curStatus.LastRound, amountToSend, minFee, richAccount.Address, rewardsAccount, "")
blk, err := client.Block(curStatus.LastRound)
r.NoError(err)
@@ -373,8 +372,7 @@ func TestRewardRateRecalculation(t *testing.T) {
curStatus, err = client.Status()
r.NoError(err)
- deadline = curStatus.LastRound + uint64(5)
- fixture.SendMoneyAndWait(deadline, amountToSend, minFee, richAccount.Address, rewardsAccount, "")
+ fixture.SendMoneyAndWait(curStatus.LastRound, amountToSend, minFee, richAccount.Address, rewardsAccount, "")
rewardRecalcRound = rewardRecalcRound + consensusParams.RewardsRateRefreshInterval
diff --git a/test/e2e-go/features/stateproofs/stateproofs_test.go b/test/e2e-go/features/stateproofs/stateproofs_test.go
index 456c7b3cbf..a3b72180f1 100644
--- a/test/e2e-go/features/stateproofs/stateproofs_test.go
+++ b/test/e2e-go/features/stateproofs/stateproofs_test.go
@@ -19,7 +19,6 @@ package stateproofs
import (
"bytes"
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"runtime"
@@ -641,7 +640,7 @@ func TestUnableToRecoverFromLaggingStateProofChain(t *testing.T) {
// installParticipationKey generates a new key for a given account and installs it with the client.
func installParticipationKey(t *testing.T, client libgoal.Client, addr string, firstValid, lastValid uint64) (resp generated.PostParticipationResponse, part account.Participation, err error) {
- dir, err := ioutil.TempDir("", "temporary_partkey_dir")
+ dir, err := os.MkdirTemp("", "temporary_partkey_dir")
require.NoError(t, err)
defer os.RemoveAll(dir)
@@ -676,14 +675,10 @@ func registerParticipationAndWait(t *testing.T, client libgoal.Client, part acco
// After making the first Stateproof, we transfer three-quarters of the stake of the
// rich node to the poor node. For both cases, we assert different stakes, that is, to
// conclude whether the poor node is used to create the StateProof or the rich node.
-func TestAttestorsChangeTest(t *testing.T) {
+func TestAttestorsChange(t *testing.T) {
partitiontest.PartitionTest(t)
defer fixtures.ShutdownSynchronizedTest(t)
- if runtime.GOARCH == "arm" || runtime.GOARCH == "arm64" {
- t.Skip("This test is difficult for ARM")
- }
-
a := require.New(fixtures.SynchronizedTest(t))
consensusParams := getDefaultStateProofConsensusParams()
@@ -718,7 +713,7 @@ func TestAttestorsChangeTest(t *testing.T) {
from: accountFetcher{nodeName: "richNode", accountNumber: 0},
to: accountFetcher{nodeName: "poorNode", accountNumber: 0},
}
-
+ sum := uint64(0)
for rnd := uint64(1); rnd <= consensusParams.StateProofInterval*(expectedNumberOfStateProofs+1); rnd++ {
// Changing the amount to pay. This should transfer most of the money from the rich node to the poor node.
if consensusParams.StateProofInterval*2 == rnd {
@@ -739,15 +734,10 @@ func TestAttestorsChangeTest(t *testing.T) {
blk, err := libgoal.BookkeepingBlock(rnd)
a.NoErrorf(err, "failed to retrieve block from algod on round %d", rnd)
- if (rnd % consensusParams.StateProofInterval) == 0 {
- // Must have a merkle commitment for participants
- a.True(len(blk.StateProofTracking[protocol.StateProofBasic].StateProofVotersCommitment) > 0)
- a.True(blk.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight != basics.MicroAlgos{})
-
- stake := blk.BlockHeader.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight.ToUint64()
-
+ // We sample the accounts' balances StateProofVotersLookback rounds before state proof round.
+ if (rnd+consensusParams.StateProofVotersLookback)%consensusParams.StateProofInterval == 0 {
+ sum = 0
// the main part of the test (computing the total stake of the nodes):
- sum := uint64(0)
for i := 1; i <= 3; i++ {
sum += accountFetcher{fmt.Sprintf("Node%d", i), 0}.getBalance(a, &fixture)
}
@@ -755,6 +745,14 @@ func TestAttestorsChangeTest(t *testing.T) {
richNodeStake := accountFetcher{"richNode", 0}.getBalance(a, &fixture)
poorNodeStake := accountFetcher{"poorNode", 0}.getBalance(a, &fixture)
sum = sum + richNodeStake + poorNodeStake
+ }
+
+ if (rnd % consensusParams.StateProofInterval) == 0 {
+ // Must have a merkle commitment for participants
+ a.True(len(blk.StateProofTracking[protocol.StateProofBasic].StateProofVotersCommitment) > 0)
+ a.True(blk.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight != basics.MicroAlgos{})
+
+ stake := blk.BlockHeader.StateProofTracking[protocol.StateProofBasic].StateProofOnlineTotalWeight.ToUint64()
a.Equal(sum, stake)
diff --git a/test/e2e-go/features/transactions/asset_test.go b/test/e2e-go/features/transactions/asset_test.go
index 520f8af0e0..9616f3cb05 100644
--- a/test/e2e-go/features/transactions/asset_test.go
+++ b/test/e2e-go/features/transactions/asset_test.go
@@ -21,6 +21,7 @@ import (
"path/filepath"
"strings"
"testing"
+ "time"
"github.com/stretchr/testify/require"
@@ -970,6 +971,13 @@ func TestAssetCreateWaitRestartDelete(t *testing.T) {
verifyAssetParameters(asset, "test", "testunit", manager, reserve, freeze, clawback,
assetMetadataHash, assetURL, a)
+ // Ensure manager is funded before submitting any transactions
+ currentRound, err := client.CurrentRound()
+ a.NoError(err)
+
+ err = fixture.WaitForAccountFunded(currentRound+5, manager)
+ a.NoError(err)
+
// Destroy the asset
tx, err := client.MakeUnsignedAssetDestroyTx(assetIndex)
a.NoError(err)
@@ -1009,6 +1017,8 @@ func TestAssetCreateWaitBalLookbackDelete(t *testing.T) {
consensusParams.SeedLookback = 2
consensusParams.SeedRefreshInterval = 8
consensusParams.MaxBalLookback = 2 * consensusParams.SeedLookback * consensusParams.SeedRefreshInterval // 32
+ consensusParams.AgreementFilterTimeoutPeriod0 = 400 * time.Millisecond
+ consensusParams.AgreementFilterTimeout = 400 * time.Millisecond
configurableConsensus[consensusVersion] = consensusParams
@@ -1059,6 +1069,13 @@ func TestAssetCreateWaitBalLookbackDelete(t *testing.T) {
verifyAssetParameters(asset, "test", "testunit", manager, reserve, freeze, clawback,
assetMetadataHash, assetURL, a)
+ // Ensure manager is funded before submitting any transactions
+ currentRound, err := client.CurrentRound()
+ a.NoError(err)
+
+ err = fixture.WaitForAccountFunded(currentRound+5, manager)
+ a.NoError(err)
+
// Destroy the asset
tx, err := client.MakeUnsignedAssetDestroyTx(assetIndex)
a.NoError(err)
@@ -1169,8 +1186,8 @@ func verifyAssetParameters(asset v1.AssetParams,
unitName, assetName, manager, reserve, freeze, clawback string,
metadataHash []byte, assetURL string, asser *require.Assertions) {
- asser.Equal(asset.UnitName, "test")
- asser.Equal(asset.AssetName, "testunit")
+ asser.Equal(asset.UnitName, unitName)
+ asser.Equal(asset.AssetName, assetName)
asser.Equal(asset.ManagerAddr, manager)
asser.Equal(asset.ReserveAddr, reserve)
asser.Equal(asset.FreezeAddr, freeze)
diff --git a/test/framework/fixtures/baseFixture.go b/test/framework/fixtures/baseFixture.go
index 37086e5365..a2205d3c63 100644
--- a/test/framework/fixtures/baseFixture.go
+++ b/test/framework/fixtures/baseFixture.go
@@ -18,7 +18,6 @@ package fixtures
import (
"fmt"
- "io/ioutil"
"os"
"path"
"runtime"
@@ -55,7 +54,7 @@ func (f *baseFixture) initialize(instance Fixture) {
}
f.testDir = os.Getenv("TESTDIR")
if f.testDir == "" {
- f.testDir, _ = ioutil.TempDir("", "tmp")
+ f.testDir, _ = os.MkdirTemp("", "tmp")
f.testDirTmp = true
}
f.testDataDir = os.Getenv("TESTDATADIR")
diff --git a/test/framework/fixtures/expectFixture.go b/test/framework/fixtures/expectFixture.go
index 3d7293d404..35b7489689 100644
--- a/test/framework/fixtures/expectFixture.go
+++ b/test/framework/fixtures/expectFixture.go
@@ -18,7 +18,6 @@ package fixtures
import (
"bytes"
- "fmt"
"os"
"os/exec"
"path"
@@ -148,7 +147,7 @@ func (ef *ExpectFixture) Run() {
if match, _ := regexp.MatchString(ef.testFilter, testName); match {
ef.t.Run(testName, func(t *testing.T) {
if reason, ok := disabledTest[testName]; ok {
- t.Skip(fmt.Sprintf("Skipping %s test: %s", testName, reason))
+ t.Skipf("Skipping %s test: %s", testName, reason)
}
partitiontest.PartitionTest(t) // Check if this expect test should by run, may SKIP
diff --git a/test/framework/fixtures/kmdFixture.go b/test/framework/fixtures/kmdFixture.go
index 75a357f2a2..db4794d3d3 100644
--- a/test/framework/fixtures/kmdFixture.go
+++ b/test/framework/fixtures/kmdFixture.go
@@ -17,7 +17,6 @@
package fixtures
import (
- "io/ioutil"
"os"
"path/filepath"
"strings"
@@ -133,14 +132,14 @@ func (f *KMDFixture) SetupWithConfig(t TestingTB, config string) {
// Write a token
f.APIToken = defaultAPIToken
tokenFilepath := filepath.Join(f.kmdDir, "kmd.token")
- err := ioutil.WriteFile(tokenFilepath, f.APIToken, 0640)
+ err := os.WriteFile(tokenFilepath, f.APIToken, 0640)
require.NoError(f.t, err)
if config == "" {
config = defaultConfig
}
configFilepath := filepath.Join(f.kmdDir, "kmd_config.json")
- err = ioutil.WriteFile(configFilepath, []byte(config), 0640)
+ err = os.WriteFile(configFilepath, []byte(config), 0640)
require.NoError(f.t, err)
// Start kmd
@@ -197,7 +196,7 @@ func (f *KMDFixture) MakeWalletAndHandleToken() (handleToken string, err error)
func (f *KMDFixture) TestConfig(cfg []byte) error {
// Write the passed config
configFilepath := filepath.Join(f.kmdDir, "kmd_config.json")
- err := ioutil.WriteFile(configFilepath, cfg, 0640)
+ err := os.WriteFile(configFilepath, cfg, 0640)
if err != nil {
return err
}
diff --git a/test/framework/fixtures/libgoalFixture.go b/test/framework/fixtures/libgoalFixture.go
index af84e4d2e1..746a0c2f94 100644
--- a/test/framework/fixtures/libgoalFixture.go
+++ b/test/framework/fixtures/libgoalFixture.go
@@ -17,8 +17,8 @@
package fixtures
import (
+ "bufio"
"fmt"
- "io/ioutil"
"os"
"os/exec"
"path/filepath"
@@ -135,7 +135,7 @@ func (f *LibGoalFixture) importRootKeys(lg *libgoal.Client, dataDir string) {
}
keyDir := filepath.Join(dataDir, genID)
- files, err := ioutil.ReadDir(keyDir)
+ files, err := os.ReadDir(keyDir)
if err != nil {
return
}
@@ -311,6 +311,10 @@ func (f *LibGoalFixture) ShutdownImpl(preserveData bool) {
f.NC.StopKMD()
if preserveData {
f.network.Stop(f.binDir)
+ f.dumpLogs(filepath.Join(f.PrimaryDataDir(), "node.log"))
+ for _, nodeDir := range f.NodeDataDirs() {
+ f.dumpLogs(filepath.Join(nodeDir, "node.log"))
+ }
} else {
f.network.Delete(f.binDir)
@@ -324,6 +328,24 @@ func (f *LibGoalFixture) ShutdownImpl(preserveData bool) {
}
}
+// dumpLogs prints out log files for the running nodes
+func (f *LibGoalFixture) dumpLogs(filePath string) {
+ file, err := os.Open(filePath)
+ if err != nil {
+ f.t.Logf("could not open %s", filePath)
+ return
+ }
+ defer file.Close()
+
+ f.t.Log("=================================\n")
+ parts := strings.Split(filePath, "/")
+ f.t.Logf("%s/%s:", parts[len(parts)-2], parts[len(parts)-1]) // Primary/node.log
+ scanner := bufio.NewScanner(file)
+ for scanner.Scan() {
+ f.t.Logf(scanner.Text())
+ }
+}
+
// intercept baseFixture.failOnError so we can clean up any algods that are still alive
func (f *LibGoalFixture) failOnError(err error, message string) {
if err != nil {
diff --git a/test/framework/fixtures/restClientFixture.go b/test/framework/fixtures/restClientFixture.go
index 7265c560cb..c9f3befa84 100644
--- a/test/framework/fixtures/restClientFixture.go
+++ b/test/framework/fixtures/restClientFixture.go
@@ -18,6 +18,7 @@ package fixtures
import (
"fmt"
+ "github.com/algorand/go-algorand/data/basics"
"sort"
"time"
"unicode"
@@ -25,7 +26,7 @@ import (
"github.com/stretchr/testify/require"
"github.com/algorand/go-algorand/daemon/algod/api/client"
- "github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
+ v1 "github.com/algorand/go-algorand/daemon/algod/api/spec/v1"
"github.com/algorand/go-algorand/libgoal"
"github.com/algorand/go-algorand/nodecontrol"
"github.com/algorand/go-algorand/test/e2e-go/globals"
@@ -265,12 +266,56 @@ func (f *RestClientFixture) WaitForAllTxnsToConfirm(roundTimeout uint64, txidsAn
for txid, addr := range txidsAndAddresses {
_, err := f.WaitForConfirmedTxn(roundTimeout, addr, txid)
if err != nil {
+ f.t.Logf("txn failed to confirm: ", addr, txid)
+ pendingTxns, err := f.AlgodClient.GetPendingTransactions(0)
+ if err == nil {
+ pendingTxids := make([]string, 0, pendingTxns.TotalTxns)
+ for _, txn := range pendingTxns.TruncatedTxns.Transactions {
+ pendingTxids = append(pendingTxids, txn.TxID)
+ }
+ f.t.Logf("pending txids: ", pendingTxids)
+ } else {
+ f.t.Logf("unable to log pending txns, ", err)
+ }
+ allTxids := make([]string, 0, len(txidsAndAddresses))
+ for txID := range txidsAndAddresses {
+ allTxids = append(allTxids, txID)
+ }
+ f.t.Logf("all txids: ", allTxids)
return false
}
}
return true
}
+// WaitForAccountFunded waits until either the passed account gets non-empty balance
+// or until the passed roundTimeout passes
+// or until waiting for a round to pass times out
+func (f *RestClientFixture) WaitForAccountFunded(roundTimeout uint64, accountAddress string) (err error) {
+ client := f.AlgodClient
+ for {
+ // Get current round information
+ curStatus, statusErr := client.Status()
+ require.NoError(f.t, statusErr, "fixture should be able to get node status")
+ curRound := curStatus.LastRound
+
+ // Check if we know about the transaction yet
+ acct, acctErr := client.AccountInformation(accountAddress)
+ require.NoError(f.t, acctErr, "fixture should be able to get account info")
+ if acct.Amount > 0 {
+ return nil
+ }
+
+ // Check if we should wait a round
+ if curRound > roundTimeout {
+ return fmt.Errorf("failed to see confirmed transaction by round %v", roundTimeout)
+ }
+ // Wait a round
+ err = f.WaitForRoundWithTimeout(curRound + 1)
+ require.NoError(f.t, err, "fixture should be able to wait for one round to pass")
+ }
+}
+
// SendMoneyAndWait uses the rest client to send money and WaitForTxnConfirmation to wait for the send to confirm
// it adds some extra error checking as well
func (f *RestClientFixture) SendMoneyAndWait(curRound, amountToSend, transactionFee uint64, fromAccount, toAccount string, closeToAccount string) (txn v1.Transaction) {
@@ -284,7 +329,8 @@ func (f *RestClientFixture) SendMoneyAndWait(curRound, amountToSend, transaction
// SendMoneyAndWaitFromWallet is as above, but for a specific wallet
func (f *RestClientFixture) SendMoneyAndWaitFromWallet(walletHandle, walletPassword []byte, curRound, amountToSend, transactionFee uint64, fromAccount, toAccount string, closeToAccount string) (txn v1.Transaction) {
client := f.LibGoalClient
- fundingTx, err := client.SendPaymentFromWallet(walletHandle, walletPassword, fromAccount, toAccount, transactionFee, amountToSend, nil, closeToAccount, 0, 0)
+ // use one curRound - 1 in case other nodes are behind
+ fundingTx, err := client.SendPaymentFromWallet(walletHandle, walletPassword, fromAccount, toAccount, transactionFee, amountToSend, nil, closeToAccount, basics.Round(curRound).SubSaturate(1), 0)
require.NoError(f.t, err, "client should be able to send money from rich to poor account")
require.NotEmpty(f.t, fundingTx.ID().String(), "transaction ID should not be empty")
waitingDeadline := curRound + uint64(5)
diff --git a/test/heapwatch/README.md b/test/heapwatch/README.md
index 27cb54d316..04c0568be6 100644
--- a/test/heapwatch/README.md
+++ b/test/heapwatch/README.md
@@ -1,14 +1,54 @@
# Heap Watch
-Tools for checking if algod has memory leaks.
+Collect RAM, bandwidth, and other stats over the course of a test cluster run.
-Run a local private network of three nodes and two pingpongs.
+Produce reports and plots from data.
-Periodically sample pprof memory profiles.
+## Scripts
-Watch memory usage from `ps` and write to a CSV file for each algod.
+* heapWatch.py
+ * collect data from algod
+ * heap profiling, /metrics, cpu profiling, block headers, goroutine profile
+ * capture from local algod by data dir or cluster from terraform-inventory.host
+ * convert profiles to svg or other reports
-# Usage
+* block_history.py
+ * Capture block headers every round from a running `algod`
+
+* block_history_relays.py
+ * Capture block headers every round from one or more running `algod`
+ * Talk to a set of relays found in a terraform-inventory.host file.
+
+* block_history_plot.py
+ * Plot the output of test/heapwatch/{block_history.py,block_history_relays.py}
+
+* client_ram_report.py
+ * Process heap profiles (*.heap) collected from heapWatch.py
+ * Create a report on `algod` RAM usage
+
+* plot_crr_csv.py
+ * Plot the output of test/heapwatch/client_ram_report.py --csv
+
+* metrics_delta.py
+ * Process /metrics data captured by heapWatch.py
+ * Generate text report on bandwidth in and out of relays/PN/NPN
+ * optionally plot txn pool fullness
+
+* start.sh stop.sh
+ * Run a local private network of three nodes and two pingpongs.
+ * Periodically sample pprof memory profiles.
+ * Watch memory usage from `ps` and write to a CSV file for each algod.
+
+* bwstart.sh stop.sh
+ * Run a local private network of 3 relays and 8 leafs
+ * Run 40 TPS of payment txns through it.
+ * Record metrics for bandwidth analysis.
+
+* runNodeHost.py nodeHostTarget.py
+ * run new ec2 host with npn and pn algod on it pointed at one relay (no DNS needed)
+
+
+## heapWatch.py local cluster usage
To start:
diff --git a/test/heapwatch/block_history.py b/test/heapwatch/block_history.py
new file mode 100644
index 0000000000..29182e760a
--- /dev/null
+++ b/test/heapwatch/block_history.py
@@ -0,0 +1,258 @@
+#!/usr/bin/env python3
+# Copyright (C) 2019-2022 Algorand, Inc.
+# This file is part of go-algorand
+#
+# go-algorand is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# go-algorand is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with go-algorand. If not, see .
+#
+###
+#
+# Capture block headers every round from a running `algod`
+#
+# pip install py-algorand-sdk
+
+import argparse
+import base64
+import logging
+import os
+import re
+import signal
+import sys
+import time
+
+import algosdk
+from algosdk.encoding import msgpack
+from algosdk.v2client.algod import AlgodClient
+
+logger = logging.getLogger(__name__)
+
+def addr_token_from_algod(algorand_data):
+ with open(os.path.join(algorand_data, 'algod.net')) as fin:
+ addr = fin.read().strip()
+ with open(os.path.join(algorand_data, 'algod.token')) as fin:
+ token = fin.read().strip()
+ if not addr.startswith('http'):
+ addr = 'http://' + addr
+ return addr, token
+
+def loads(blob):
+ return msgpack.loads(base64.b64decode(blob), strict_map_key=False)
+
+def dumps(blob):
+ return base64.b64encode(msgpack.dumps(blob))
+
+class Fetcher:
+ def __init__(self, algorand_data=None, token=None, addr=None, headers=None, prev_round=None, outpath=None):
+ """
+ algorand_data = path to algod data dir
+ addr, token = algod URI and access token
+ headers = dict of HTTP headers to send to algod
+ prev_round = start with (prev_round + 1)
+ outpath = path to append base64-msgpack-per-line data to
+ """
+ self.algorand_data = algorand_data
+ self.token = token
+ self.addr = addr
+ self.headers = headers
+ self._algod = None
+ self.go = True
+ self.prev_round = prev_round
+ self.block_time = None
+ self.outpath = outpath
+ self._outf = None
+ if outpath and ((prev_round is None) or (prev_round == -1)):
+ # load data, find last known round in data
+ try:
+ with open(outpath) as fin:
+ for line in fin:
+ if not line:
+ continue
+ line = line.strip()
+ if not line:
+ continue
+ if line[0] == '#':
+ continue
+ ob = loads(line)
+ rnd = ob['block'].get('rnd', 0)
+ if (self.prev_round is None) or (rnd > self.prev_round):
+ self.prev_round = rnd
+ except:
+ pass # whatever
+ return
+
+ def algod(self):
+ "return an open algosdk.v2client.algod.AlgodClient"
+ if self._algod is None:
+ if self.algorand_data:
+ addr, token = addr_token_from_algod(self.algorand_data)
+ logger.debug('algod from %r, (%s %s)', self.algorand_data, addr, token)
+ else:
+ token = self.token
+ addr = self.addr
+ logger.debug('algod from args (%s %s)', self.addr, self.token)
+ self._algod = AlgodClient(token, addr, headers=self.headers)
+ return self._algod
+
+ def outf(self):
+ if self._outf is None:
+ self._outf = open(self.outpath, 'ab')
+ return self._outf
+
+ def nextblock(self, lastround=None, retries=30):
+ trycount = 0
+ while (trycount < retries) and self.go:
+ trycount += 1
+ try:
+ return self._nextblock_inner(lastround)
+ except Exception as e:
+ if trycount >= retries:
+ logger.error('too many errors in nextblock retries')
+ raise
+ else:
+ logger.warning('error in nextblock(%r) (retrying): %s', lastround, e)
+ self._algod = None # retry with a new connection
+ time.sleep(1.2)
+ return None
+
+ def _nextblock_inner(self, lastround):
+ self.block_time = None
+ algod = self.algod()
+ if lastround is None:
+ status = algod.status()
+ lastround = status['last-round']
+ logger.debug('nextblock status last-round %s', lastround)
+ else:
+ try:
+ blk = self.algod().block_info(lastround + 1, response_format='msgpack')
+ if blk:
+ return blk
+ logger.warning('null block %d, lastround=%r', lastround+1, lastround)
+ except Exception as e:
+ pass
+ #logger.debug('could not get block %d: %s', lastround + 1, e, exc_info=True)
+ status = algod.status_after_block(lastround)
+ block_time = time.time() # the block has happened, don't count block data transit time
+ nbr = status['last-round']
+ retries = 30
+ while (nbr > lastround + 1) and self.go:
+ # if more than one block elapsed, we don't have a good time for either block
+ block_time = None
+ # try lastround+1 one last time
+ try:
+ blk = self.algod().block_info(lastround + 1, response_format='msgpack')
+ if blk:
+ return blk
+ logger.warning('null block %d, lastround=%r, status.last-round=%d', lastround+1, lastround, nbr)
+ time.sleep(1.1)
+ retries -= 1
+ if retries <= 0:
+ raise Exception("too many null block for %d", lastround+1)
+ except:
+ break
+ blk = self.algod().block_info(nbr, response_format='msgpack')
+ if blk:
+ self.block_time = block_time
+ return blk
+ raise Exception('got None for blk {}'.format(nbr))
+
+ def loop(self):
+ """Start processing blocks and txns
+ runs until error or bot.go=False
+ """
+ try:
+ self._loop_inner(self.prev_round)
+ finally:
+ self.close()
+
+ def _loop_inner(self, lastround):
+ while self.go:
+ b = self.nextblock(lastround)
+ if b is None:
+ print("got None nextblock. exiting")
+ return
+ b = msgpack.loads(b, strict_map_key=False)
+ nowround = b['block'].get('rnd', 0)
+ if (lastround is not None) and (nowround != lastround + 1):
+ logger.info('round jump %d to %d', lastround, nowround)
+ self._block_handler(b)
+ lastround = nowround
+
+ def _block_handler(self, b):
+ # throw away txns, count is kept in round differential ['block']['tc']
+ b['block'].pop('txns', [])
+ # throw away certs
+ b.pop('cert', None)
+ # Add fine grained time. This should be better than ['block']['ts']
+ b['_time'] = self.block_time or time.time()
+ self.outf().write(dumps(b) + b'\n')
+
+ def close(self):
+ self._algod = None
+
+def header_list_to_dict(hlist):
+ if not hlist:
+ return None
+ p = re.compile(r':\s+')
+ out = {}
+ for x in hlist:
+ a, b = p.split(x, 1)
+ out[a] = b
+ return out
+
+def main():
+ ap = argparse.ArgumentParser()
+ ap.add_argument('-d', '--algod', default=None, help='algod data dir')
+ ap.add_argument('-a', '--addr', default=None, help='algod host:port address')
+ ap.add_argument('-t', '--token', default=None, help='algod API access token')
+ ap.add_argument('--header', dest='headers', nargs='*', help='"Name: value" HTTP header (repeatable)')
+ ap.add_argument('--all', default=False, action='store_true', help='fetch all blocks from 0')
+ ap.add_argument('--verbose', default=False, action='store_true')
+ ap.add_argument('-o', '--out', default=None, help='file to append json lines to')
+ args = ap.parse_args()
+
+ if args.verbose:
+ logging.basicConfig(level=logging.DEBUG)
+ else:
+ logging.basicConfig(level=logging.INFO)
+
+ algorand_data = args.algod or os.getenv('ALGORAND_DATA')
+ if not algorand_data and not (args.token and args.addr):
+ sys.stderr.write('must specify algod data dir by $ALGORAND_DATA or -d/--algod; OR --a/--addr and -t/--token\n')
+ sys.exit(1)
+
+ prev_round = None
+ if args.all:
+ prev_round = -1
+ bot = Fetcher(
+ algorand_data,
+ token=args.token,
+ addr=args.addr,
+ headers=header_list_to_dict(args.headers),
+ outpath=args.out,
+ prev_round=prev_round,
+ )
+
+ import signal
+ def do_graceful_stop(signum, frame):
+ if bot.go == False:
+ sys.stderr.write("second signal, quitting\n")
+ sys.exit(1)
+ sys.stderr.write("graceful stop...\n")
+ bot.go = False
+ signal.signal(signal.SIGTERM, do_graceful_stop)
+ signal.signal(signal.SIGINT, do_graceful_stop)
+
+ bot.loop()
+
+if __name__ == '__main__':
+ main()
diff --git a/test/heapwatch/block_history_plot.py b/test/heapwatch/block_history_plot.py
new file mode 100644
index 0000000000..174c1dca11
--- /dev/null
+++ b/test/heapwatch/block_history_plot.py
@@ -0,0 +1,162 @@
+#!/usr/bin/env python3
+# Copyright (C) 2019-2022 Algorand, Inc.
+# This file is part of go-algorand
+#
+# go-algorand is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# go-algorand is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with go-algorand. If not, see .
+#
+###
+#
+# Plot the output of test/heapwatch/{block_history.py,block_history_relays.py}
+#
+# Histograms of round times, TPS, txn/block
+# Graph over time of TPS or 10-round-moving-average-TPS
+
+import base64
+import statistics
+
+from algosdk.encoding import msgpack
+from matplotlib import pyplot as plt
+
+def process(path, args):
+ prevtime = None
+ prevtc = 0
+ prevts = None
+ prevrnd = None
+ mintxn = 9999999
+ maxtxn = 0
+ mindt = 999999
+ maxdt = 0
+ mintps = 999999
+ maxtps = 0
+ tcv = []
+ tsv = []
+ tpsv = []
+ dtv = []
+ txnv = []
+ count = 0
+ with open(path, 'rb') as fin:
+ for line in fin:
+ line = line.strip()
+ row = msgpack.loads(base64.b64decode(line), strict_map_key=False)
+ count += 1
+ block = row['block']
+ rnd = block.get('rnd',0)
+ tc = block.get('tc', 0)
+ ts = block.get('ts', 0) # timestamp recorded at algod, 1s resolution int
+ _time = row['_time'] # timestamp recorded at client, 0.000001s resolution float
+ tcv.append(tc)
+ if prevtime is not None:
+ dt = _time - prevtime
+ if dt < 1:
+ dt = ts - prevts
+ tsv.append(ts)
+ else:
+ if _time < tsv[-1]:
+ tsv.append(ts)
+ else:
+ tsv.append(_time)
+ dtxn = tc - prevtc
+ tps = dtxn / dt
+ mintxn = min(dtxn,mintxn)
+ maxtxn = max(dtxn,maxtxn)
+ mindt = min(dt,mindt)
+ maxdt = max(dt,maxdt)
+ mintps = min(tps,mintps)
+ maxtps = max(tps,maxtps)
+ tpsv.append(tps)
+ dtv.append(dt)
+ txnv.append(dtxn)
+ else:
+ tsv.append(ts)
+ prevrnd = rnd
+ prevtc = tc
+ prevts = ts
+ prevtime = _time
+ print('{} blocks, block txns [{}-{}], block seconds [{}-{}], tps [{}-{}]'.format(
+ count,
+ mintxn,maxtxn,
+ mindt,maxdt,
+ mintps,maxtps,
+ ))
+
+ start = args.start
+ end = len(txnv)-1
+ if not args.all:
+ # find the real start of the test
+ start += 1
+ for i in range(len(txnv)):
+ if len(list(filter(lambda x: x > 100, txnv[i:i+5]))) == 5:
+ start = i + 5
+ break
+ txmean = statistics.mean(txnv[start:])
+ txstd = statistics.stdev(txnv[start:])
+ end = len(txnv)
+ for i in range(start,len(txnv)):
+ if len(list(filter(lambda x: x > txmean-(txstd*2), txnv[i:i+5]))) < 4:
+ print(i)
+ end = i
+ break
+
+ print('core test rounds [{}:{}]'.format(start,end))
+ print('block txns [{}-{}], block seconds [{}-{}], tps [{}-{}]'.format(
+ min(txnv[start:end]), max(txnv[start:end]),
+ min(dtv[start:end]), max(dtv[start:end]),
+ min(tpsv[start:end]), max(tpsv[start:end]),
+ ))
+ print('long round times: {}'.format(' '.join(list(map(str,filter(lambda x: x >= 9,dtv[start:end]))))))
+ fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2,2)
+ ax1.set_title('round time (seconds)')
+ ax1.hist(list(filter(lambda x: x < 9,dtv[start:end])),bins=20)
+
+ ax2.set_title('TPS')
+ ax2.hist(tpsv[start:end],bins=20)
+
+ ax3.set_title('txn/block')
+ ax3.hist(txnv[start:end],bins=20)
+
+ # 10 round moving average TPS
+ tpsv10 = []
+ for i in range(10,len(tsv)):
+ ts0 = tsv[i-10]
+ tsa = tsv[i]
+ tc0 = tcv[i-10]
+ tca = tcv[i]
+ dt = tsa-ts0
+ dtxn = tca-tc0
+ tpsv10.append(dtxn/dt)
+ if args.tps1:
+ ax4.set_title('TPS')
+ ax4.plot(tpsv[start:end])
+ print('fullish block sizes: {}'.format(list(filter(lambda x: x > 100, txnv))))
+ else:
+ ax4.set_title('TPS(10 round window)')
+ ax4.plot(tpsv10)
+ fig.tight_layout()
+ plt.savefig(path + '_hist.svg', format='svg')
+ plt.savefig(path + '_hist.png', format='png')
+
+def main():
+ import argparse
+ ap = argparse.ArgumentParser()
+ ap.add_argument('files', nargs='+')
+ ap.add_argument('--all', default=False, action='store_true')
+ ap.add_argument('--tps1', default=False, action='store_true')
+ ap.add_argument('--start', default=0, type=int, help='start round')
+ args = ap.parse_args()
+
+ for fname in args.files:
+ process(fname, args)
+
+if __name__ == '__main__':
+ main()
diff --git a/test/heapwatch/block_history_relays.py b/test/heapwatch/block_history_relays.py
new file mode 100644
index 0000000000..5d3c7b0c75
--- /dev/null
+++ b/test/heapwatch/block_history_relays.py
@@ -0,0 +1,109 @@
+#!/usr/bin/env python3
+# Copyright (C) 2019-2022 Algorand, Inc.
+# This file is part of go-algorand
+#
+# go-algorand is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# go-algorand is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with go-algorand. If not, see .
+#
+###
+#
+# Capture block headers every round from a running `algod`
+# Talk to a set of relays found in a terraform-inventory.host file.
+#
+# pip install py-algorand-sdk
+
+
+import argparse
+import atexit
+import configparser
+import logging
+import os
+import re
+import signal
+import sys
+import threading
+
+import block_history
+
+logger = logging.getLogger(__name__)
+
+graceful_stop = False
+fetchers = []
+
+def do_graceful_stop(signum, frame):
+ global fetchers
+ global graceful_stop
+ if graceful_stop:
+ sys.stderr.write("second signal, quitting\n")
+ sys.exit(1)
+ sys.stderr.write("graceful stop...\n")
+ graceful_stop = True
+ for fet in fetchers:
+ fet.go = False
+
+relay_pat = re.compile(r'name_r\d+')
+
+def main():
+ ap = argparse.ArgumentParser()
+ ap.add_argument('--tf-inventory', default='terraform-inventory.host', help='terraform inventory file to use if no data_dirs specified')
+ ap.add_argument('--all', default=False, action='store_true')
+ ap.add_argument('-p', '--port', default='8580', help='algod port on each host in terraform-inventory')
+ ap.add_argument('--pid')
+ ap.add_argument('--token', default='', help='default algod api token to use')
+ ap.add_argument('--outdir', required=True)
+ ap.add_argument('--all-rounds', default=False, action='store_true', help='fetch all blocks from 0')
+ ap.add_argument('--verbose', default=False, action='store_true')
+ args = ap.parse_args()
+
+ if args.verbose:
+ logging.basicConfig(level=logging.DEBUG)
+ else:
+ logging.basicConfig(level=logging.INFO)
+
+ if args.pid:
+ with open(args.pid, 'w') as fout:
+ fout.write('{}'.format(os.getpid()))
+ atexit.register(os.remove, args.pid)
+ prev_round = None
+ if args.all_rounds:
+ prev_round = -1
+ signal.signal(signal.SIGTERM, do_graceful_stop)
+ signal.signal(signal.SIGINT, do_graceful_stop)
+
+ threads = []
+ cp = configparser.ConfigParser(allow_no_value=True)
+ cp.read(args.tf_inventory)
+ for k,v in cp.items():
+ if not relay_pat.match(k):
+ continue
+ if args.all:
+ pass
+ elif k.endswith('1'):
+ pass
+ else:
+ continue
+ for net in v.keys():
+ addr = 'http://' + net + ':' + args.port
+ outpath = os.path.join(args.outdir, k + '_' + net + '.blockhistory')
+ fet = block_history.Fetcher(addr=addr, token=args.token, outpath=outpath, prev_round=prev_round)
+ t = threading.Thread(target=fet.loop)
+ logger.debug('starting %s -> %s', addr, outpath)
+ t.start()
+ threads.append(t)
+ fetchers.append(fet)
+ for t in threads:
+ t.join()
+ logger.debug('block_history_relays.py done')
+
+if __name__ == '__main__':
+ main()
diff --git a/test/heapwatch/client_ram_report.py b/test/heapwatch/client_ram_report.py
index 5ac0f2dd25..04f212f18a 100644
--- a/test/heapwatch/client_ram_report.py
+++ b/test/heapwatch/client_ram_report.py
@@ -1,6 +1,27 @@
#!/usr/bin/env python3
+# Copyright (C) 2019-2022 Algorand, Inc.
+# This file is part of go-algorand
+#
+# go-algorand is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# go-algorand is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with go-algorand. If not, see .
+#
+###
+#
+# Process heap profiles (*.heap) collected from heapWatch.py
+# Create a report on `algod` RAM usage
import argparse
+import configparser
import csv
import glob
import json
@@ -83,6 +104,43 @@ def get_heap_inuse_totals(dirpath):
return cached
+def maybe_load_tf_nicks(args):
+ tf_inventory_path = os.path.join(args.dir, 'terraform-inventory.host')
+ if not os.path.exists(tf_inventory_path):
+ return None
+ tf_inventory = configparser.ConfigParser(allow_no_value=True)
+ tf_inventory.read(tf_inventory_path)
+ ip_to_name = {}
+ for k, sub in tf_inventory.items():
+ if k.startswith('name_'):
+ nick = k[5:]
+ for ip in sub:
+ if ip in ip_to_name:
+ logger.warning('ip %r already named %r, also got %r', ip, ip_to_name[ip], k)
+ ip_to_name[ip] = nick
+ return ip_to_name
+
+
+def hostports_to_nicks(args, hostports):
+ ip_to_nick = maybe_load_tf_nicks(args)
+ if not ip_to_nick:
+ return hostports
+ out = []
+ for hp in hostports:
+ hit = None
+ for ip, nick in ip_to_nick.items():
+ if ip in hp:
+ if hit is None:
+ hit = nick
+ else:
+ logger.warning('nick collision in ip=%r, hit=%r nick=%r', ip, hit, nick)
+ hit = nick
+ if not hit:
+ hit = hp
+ out.append(hit)
+ return out
+
+
def main():
ap = argparse.ArgumentParser()
ap.add_argument('-d', '--dir', required=True, help='dir path to find /*.metrics in')
@@ -109,7 +167,7 @@ def main():
whens.add(ts)
whens = sorted(whens)
nodes = sorted(heap_totals.keys())
- writer.writerow(['when','dt','round'] + nodes)
+ writer.writerow(['when','dt','round'] + hostports_to_nicks(args, nodes))
first = None
for ts in whens:
tv = time.mktime(time.strptime(ts, '%Y%m%d_%H%M%S'))
diff --git a/test/heapwatch/heapWatch.py b/test/heapwatch/heapWatch.py
index 43e51618e4..aced214f0b 100644
--- a/test/heapwatch/heapWatch.py
+++ b/test/heapwatch/heapWatch.py
@@ -1,6 +1,24 @@
#!/usr/bin/python3
#
-# repeatedly snapshot heap profiles for one or more algod
+# Copyright (C) 2019-2022 Algorand, Inc.
+# This file is part of go-algorand
+#
+# go-algorand is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# go-algorand is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with go-algorand. If not, see .
+#
+###
+#
+# repeatedly snapshot metrics & profiles for one or more algod
#
# usage:
# mkdir -p /tmp/heaps
@@ -12,12 +30,15 @@
import fnmatch
import json
import logging
+import math
import os
+import queue
import re
import signal
import shutil
import subprocess
import sys
+import threading
import time
import urllib.request
@@ -79,9 +100,17 @@ def jsonable(ob):
return {jsonable(k):jsonable(v) for k,v in ob.items()}
return ob
+def nmax(a,b):
+ if a is None:
+ return b
+ if b is None:
+ return a
+ return max(a,b)
+
class algodDir:
def __init__(self, path, net=None, token=None, admin_token=None):
self.path = path
+ self.isdir = os.path.isdir(path)
self.nick = os.path.basename(self.path)
if net is None:
net, token, admin_token = read_algod_dir(self.path)
@@ -91,9 +120,12 @@ def __init__(self, path, net=None, token=None, admin_token=None):
self.headers = {}
self._pid = None
self._algod = None
+ self.timeout = 15
def pid(self):
if self._pid is None:
+ if not self.isdir:
+ return None
with open(os.path.join(self.path, 'algod.pid')) as fin:
self._pid = int(fin.read())
return self._pid
@@ -106,11 +138,17 @@ def algod(self):
self._algod = algosdk.v2client.algod.AlgodClient(self.token, net, self.headers)
return self._algod
- def get_pprof_snapshot(self, name, snapshot_name=None, outdir=None):
+ def get_pprof_snapshot(self, name, snapshot_name=None, outdir=None, timeout=None):
+ if timeout is None:
+ timeout = self.timeout
url = 'http://' + self.net + '/urlAuth/' + self.admin_token + '/debug/pprof/' + name
- response = urllib.request.urlopen(urllib.request.Request(url, headers=self.headers))
+ try:
+ response = urllib.request.urlopen(urllib.request.Request(url, headers=self.headers), timeout=timeout)
+ except Exception as e:
+ logger.error('could not fetch %s from %s via %r (%s)', name, self.path, url, e)
+ return
if response.code != 200:
- logger.error('could not fetch %s from %s via %r', name, self.path. url)
+ logger.error('could not fetch %s from %s via %r (%r)', name, self.path, url, response.code)
return
blob = response.read()
if snapshot_name is None:
@@ -127,10 +165,16 @@ def get_heap_snapshot(self, snapshot_name=None, outdir=None):
def get_goroutine_snapshot(self, snapshot_name=None, outdir=None):
return self.get_pprof_snapshot('goroutine', snapshot_name, outdir)
- def get_metrics(self, snapshot_name=None, outdir=None):
+ def get_cpu_profile(self, snapshot_name=None, outdir=None, seconds=90):
+ seconds = int(seconds)
+ return self.get_pprof_snapshot('profile?seconds={}'.format(seconds), snapshot_name, outdir, timeout=seconds+20)
+
+ def get_metrics(self, snapshot_name=None, outdir=None, timeout=None):
url = 'http://' + self.net + '/metrics'
+ if timeout is None:
+ timeout = self.timeout
try:
- response = urllib.request.urlopen(urllib.request.Request(url, headers=self.headers))
+ response = urllib.request.urlopen(urllib.request.Request(url, headers=self.headers), timeout=timeout)
if response.code != 200:
logger.error('could not fetch %s from %s via %r', snapshot_name, self.path. url)
return
@@ -143,6 +187,11 @@ def get_metrics(self, snapshot_name=None, outdir=None):
fout.write(blob)
logger.debug('%s -> %s', self.nick, outpath)
+ def go_metrics(self, snapshot_name=None, outdir=None):
+ t = threading.Thread(target=self.get_metrics, args=(snapshot_name, outdir))
+ t.start()
+ return t
+
def get_blockinfo(self, snapshot_name=None, outdir=None):
try:
algod = self.algod()
@@ -160,9 +209,20 @@ def get_blockinfo(self, snapshot_name=None, outdir=None):
with open(outpath, 'wt') as fout:
json.dump(jsonable(bi), fout)
return bi
- #txncount = bi['block']['tc']
+
+ def _get_blockinfo_q(self, snapshot_name=None, outdir=None, biqueue=None):
+ bi = self.get_blockinfo(snapshot_name, outdir)
+ if biqueue and bi:
+ biqueue.put(bi)
+
+ def go_blockinfo(self, snapshot_name=None, outdir=None, biqueue=None):
+ t = threading.Thread(target=self._get_blockinfo_q, args=(snapshot_name, outdir, biqueue))
+ t.start()
+ return t
def psHeap(self):
+ if not self.isdir:
+ return None, None
# return rss, vsz (in kilobytes)
# ps -o rss,vsz $(cat ${ALGORAND_DATA}/algod.pid)
subp = subprocess.Popen(['ps', '-o', 'rss,vsz', str(self.pid())], stdout=subprocess.PIPE)
@@ -177,12 +237,33 @@ def psHeap(self):
except:
return None, None
+class maxrnd:
+ def __init__(self, biqueue):
+ self.biqueue = biqueue
+ self.maxrnd = None
+
+ def _run(self):
+ while True:
+ bi = self.biqueue.get()
+ if 'block' not in bi:
+ return
+ rnd = bi['block'].get('rnd',0)
+ if (self.maxrnd is None) or (rnd > self.maxrnd):
+ self.maxrnd = rnd
+ def start(self):
+ t = threading.Thread(target=self._run)
+ t.start()
+ return t
+
class watcher:
def __init__(self, args):
self.args = args
self.prevsnapshots = {}
self.they = []
self.netseen = set()
+ self.latest_round = None
+ self.bi_hosts = []
+ self.netToAd = {}
os.makedirs(self.args.out, exist_ok=True)
if not args.data_dirs and os.path.exists(args.tf_inventory):
cp = configparser.ConfigParser(allow_no_value=True)
@@ -190,6 +271,8 @@ def __init__(self, args):
shutil.copy2(args.tf_inventory, self.args.out)
for role in args.tf_roles.split(','):
role_name = 'role_' + role
+ if role_name not in cp:
+ continue
for net in cp[role_name].keys():
logger.debug('addnet role %s %s', role, net)
self._addnet(net)
@@ -201,6 +284,19 @@ def __init__(self, args):
for net in v.keys():
logger.debug('addnet re %s %s', nre, net)
self._addnet(net)
+ if args.tf_bi_re:
+ namere = re.compile(args.tf_bi_re)
+ for k,v in cp.items():
+ if not namere.match(k):
+ continue
+ for net in v.keys():
+ logger.debug('bi net %s %s', nre, net)
+ ad = self.netToAd.get(net)
+ if not ad:
+ self._addnet(net)
+ ad = self.netToAd.get(net)
+ if ad:
+ self.bi_hosts.append(ad)
for path in args.data_dirs:
if not os.path.isdir(path):
continue
@@ -222,20 +318,29 @@ def _addnet(self, net):
try:
ad = algodDir(net, net=net, token=self.args.token, admin_token=self.args.admin_token)
self.they.append(ad)
+ self.netToAd[net] = ad
except:
logger.error('bad algod: %r', net, exc_info=True)
- def do_snap(self, now):
+ def do_snap(self, now, get_cpu=False, fraction=False):
snapshot_name = time.strftime('%Y%m%d_%H%M%S', time.gmtime(now))
snapshot_isotime = time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(now))
+ if fraction:
+ sf = now - math.floor(now)
+ sfs = '{:.6f}'.format(sf)
+ if sfs[0] == '0':
+ sfs = sfs[1:]
+ snapshot_name += sfs
+ snapshot_isotime += sfs
logger.debug('begin snapshot %s', snapshot_name)
psheaps = {}
newsnapshots = {}
if self.args.heaps:
for ad in self.they:
snappath = ad.get_heap_snapshot(snapshot_name, outdir=self.args.out)
- newsnapshots[ad.path] = snappath
+ if snappath:
+ newsnapshots[ad.path] = snappath
rss, vsz = ad.psHeap()
if rss and vsz:
psheaps[ad.nick] = (rss, vsz)
@@ -247,11 +352,35 @@ def do_snap(self, now):
for ad in self.they:
ad.get_goroutine_snapshot(snapshot_name, outdir=self.args.out)
if self.args.metrics:
+ threads = []
for ad in self.they:
- ad.get_metrics(snapshot_name, outdir=self.args.out)
+ threads.append(ad.go_metrics(snapshot_name, outdir=self.args.out))
+ for t in threads:
+ t.join()
+ logger.debug('metrics done')
if self.args.blockinfo:
+ threads = []
+ biq = queue.SimpleQueue()
+ mr = maxrnd(biq)
+ mrt = mr.start()
+ bi_hosts = self.bi_hosts or self.they
+ for ad in bi_hosts:
+ threads.append(ad.go_blockinfo(snapshot_name, outdir=self.args.out, biqueue=biq))
+ for t in threads:
+ t.join()
+ biq.put({})
+ mrt.join()
+ self.latest_round = mr.maxrnd
+ logger.debug('blockinfo done')
+ if get_cpu:
+ cpuSample = durationToSeconds(self.args.cpu_sample) or 90
+ threads = []
for ad in self.they:
- ad.get_blockinfo(snapshot_name, outdir=self.args.out)
+ t = threading.Thread(target=ad.get_cpu_profile, kwargs={'snapshot_name':snapshot_name, 'outdir':self.args.out, 'seconds': cpuSample})
+ t.start()
+ threads.append(t)
+ for t in threads:
+ t.join()
if self.args.svg:
logger.debug('snapped, processing pprof...')
# make absolute and differential plots
@@ -263,6 +392,24 @@ def do_snap(self, now):
subprocess.call(['go', 'tool', 'pprof', '-sample_index=inuse_space', '-svg', '-output', snappath + '.inuse_diff.svg', '-base='+prev, snappath])
subprocess.call(['go', 'tool', 'pprof', '-sample_index=alloc_space', '-svg', '-output', snappath + '.alloc_diff.svg', '-diff_base='+prev, snappath])
self.prevsnapshots = newsnapshots
+ logger.debug('end snapshot %s', snapshot_name)
+
+def durationToSeconds(rts):
+ if rts is None:
+ return None
+ rts = rts.lower()
+ if rts.endswith('h'):
+ mult = 3600
+ rts = rts[:-1]
+ elif rts.endswith('m'):
+ mult = 60
+ rts = rts[:-1]
+ elif rts.endswith('s'):
+ mult = 1
+ rts = rts[:-1]
+ else:
+ mult = 1
+ return float(rts) * mult
def main():
ap = argparse.ArgumentParser()
@@ -273,14 +420,18 @@ def main():
ap.add_argument('--blockinfo', default=False, action='store_true', help='also capture block header info')
ap.add_argument('--period', default=None, help='seconds between automatically capturing')
ap.add_argument('--runtime', default=None, help='(\d+)[hm]? time in hour/minute (default second) to gather info then exit')
+ ap.add_argument('--rounds', default=None, type=int, help='number of rounds to run')
ap.add_argument('--tf-inventory', default='terraform-inventory.host', help='terraform inventory file to use if no data_dirs specified')
ap.add_argument('--token', default='', help='default algod api token to use')
ap.add_argument('--admin-token', default='', help='default algod admin-api token to use')
ap.add_argument('--tf-roles', default='relay', help='comma separated list of terraform roles to follow')
ap.add_argument('--tf-name-re', action='append', default=[], help='regexp to match terraform node names, may be repeated')
+ ap.add_argument('--tf-bi-re', help='hosts to get blocks from')
ap.add_argument('--svg', dest='svg', default=False, action='store_true', help='automatically run `go tool pprof` to generate performance profile svg from collected data')
ap.add_argument('-p', '--port', default='8580', help='algod port on each host in terraform-inventory')
ap.add_argument('-o', '--out', default=None, help='directory to write to')
+ ap.add_argument('--cpu-after', help='capture cpu profile after some time (e.g. 5m (after start))')
+ ap.add_argument('--cpu-sample', help='capture cpu profile for some time (e.g. 90s)')
ap.add_argument('--verbose', default=False, action='store_true')
args = ap.parse_args()
@@ -305,45 +456,49 @@ def main():
app.do_snap(now)
endtime = None
+ end_round = None
+ if (app.latest_round is not None) and (args.rounds is not None):
+ end_round = app.latest_round + args.rounds
if args.runtime:
- rts = args.runtime
- if rts.endswith('h'):
- mult = 3600
- rts = rts[:-1]
- elif rts.endswith('m'):
- mult = 60
- rts = rts[:-1]
- else:
- mult = 1
- endtime = (float(rts) * mult) + start
+ endtime = durationToSeconds(args.runtime) + start
+ logger.debug('now %.1f; endtime %.1f', start, endtime)
+
+ cpuAfter = durationToSeconds(args.cpu_after)
+ if cpuAfter is not None:
+ cpuAfter += start
+
if args.period:
- lastc = args.period.lower()[-1:]
- if lastc == 's':
- periodSecs = int(args.period[:-1])
- elif lastc == 'm':
- periodSecs = int(args.period[:-1]) * 60
- elif lastc == 'h':
- periodSecs = int(args.period[:-1]) * 3600
- else:
- periodSecs = int(args.period)
+ periodSecs = durationToSeconds(args.period)
+ snap_fraction = periodSecs < 1.0
periodi = 1
nextt = start + (periodi * periodSecs)
while not graceful_stop:
+ logger.debug('nextt %f now %f', nextt, now)
while nextt < now:
nextt = start + (periodi * periodSecs)
+ periodi += 1
while now < nextt - (periodSecs * 0.05):
logger.debug('sleep %f', nextt - now)
time.sleep(nextt - now)
if graceful_stop:
- return
+ return 0
now = time.time()
periodi += 1
nextt += periodSecs
- app.do_snap(now)
+ get_cpu = False
+ if (cpuAfter is not None) and (now > cpuAfter):
+ get_cpu = True
+ cpuAfter = None
+ app.do_snap(now, get_cpu, fraction=snap_fraction)
+ now = time.time()
if (endtime is not None) and (now > endtime):
- return
+ logger.debug('after endtime, done')
+ return 0
+ if (end_round is not None) and (app.latest_round is not None) and (app.latest_round >= end_round):
+ logger.debug('after end round %d > %d', app.latest_round, end_round)
+ return 0
return 0
if __name__ == '__main__':
diff --git a/test/heapwatch/metrics_delta.py b/test/heapwatch/metrics_delta.py
index b6aa2ae8a4..70324c3c7c 100644
--- a/test/heapwatch/metrics_delta.py
+++ b/test/heapwatch/metrics_delta.py
@@ -1,4 +1,25 @@
#!/usr/bin/env python3
+# Copyright (C) 2019-2022 Algorand, Inc.
+# This file is part of go-algorand
+#
+# go-algorand is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# go-algorand is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with go-algorand. If not, see .
+#
+###
+#
+# Process /metrics data captured by heapWatch.py
+#
+# Generate text report on bandwidth in and out of relays/PN/NPN
import argparse
import configparser
@@ -143,18 +164,32 @@ def __init__(self, label=None):
self.tpsMeanSum = 0
self.txBpsMeanSum = 0
self.rxBpsMeanSum = 0
+ self.tpsSum = 0
+ self.blockTimeSum = 0
self.sumsCount = 0
self.nodes = {}
+ self.biByTime = {}
+ self.verifyMillis = []
def __call__(self, ttr, nick):
if not ttr:
+ logger.debug('no summary for %s', nick)
return
self.nodes[nick] = ttr
logger.debug('%d points from %s', len(ttr.tpsList), nick)
self.tpsMeanSum += meanOrZero(ttr.tpsList)
self.txBpsMeanSum += meanOrZero(ttr.txBpsList)
self.rxBpsMeanSum += meanOrZero(ttr.rxBpsList)
+ self.tpsSum += ttr.tps
+ self.blockTimeSum += ttr.blockTime
self.sumsCount += 1
+ if ttr.biByTime:
+ self.biByTime.update(ttr.biByTime)
+ if ttr.verifyMillis:
+ self.verifyMillis.append(ttr.verifyMillis)
+
+ def blockinfo(self, curtime):
+ return self.biByTime.get(curtime)
def byMsg(self):
txPSums = {}
@@ -209,14 +244,42 @@ def txPool(self):
def __str__(self):
if not self.sumsCount:
tps, txbps, rxbps = math.nan, math.nan, math.nan
+ blockTimes = math.nan
else:
- tps = self.tpsMeanSum/self.sumsCount
+ #tps = self.tpsMeanSum/self.sumsCount
+ tps = self.tpsSum/self.sumsCount
+ blockTimes = self.blockTimeSum/self.sumsCount
txbps = self.txBpsMeanSum/self.sumsCount
rxbps = self.rxBpsMeanSum/self.sumsCount
labelspace = ""
if self.label:
labelspace = self.label + " "
- return '{byMsg}\n{labelspace}{txPool}\n{labelspace}summary: {TPS:0.2f} TPS, tx {txBps}B/s, rx {rxBps}B/s'.format(labelspace=labelspace, byMsg=self.byMsg(), txPool=self.txPool(), TPS=tps, txBps=hunum(txbps), rxBps=hunum(rxbps))
+ if self.verifyMillis:
+ verifyMillis = labelspace + 'verify ms ({:.0f}/{:.0f}/{:.0f})\n'.format(min(self.verifyMillis), meanOrZero(self.verifyMillis), max(self.verifyMillis))
+ else:
+ verifyMillis = ''
+ return '{byMsg}\n{verifyMillis}{labelspace}{txPool}\n{labelspace}summary: {TPS:0.2f} TPS, {bt:1.2f}s/block, tx {txBps}B/s, rx {rxBps}B/s'.format(labelspace=labelspace, byMsg=self.byMsg(), txPool=self.txPool(), TPS=tps, txBps=hunum(txbps), rxBps=hunum(rxbps), bt=blockTimes, verifyMillis=verifyMillis)
+
+ def plot_pool(self, outpath):
+ from matplotlib import pyplot as plt
+ any = False
+ for nick, ns in self.nodes.items():
+ if not ns.txPool:
+ continue
+ any = True
+ plt.plot(ns.times, ns.txPool, label=nick)
+ csvoutpath = outpath + nick + '.csv'
+ with open(csvoutpath, 'w') as fout:
+ writer = csv.writer(fout)
+ writer.writerow(['time', 'pool'])
+ for t, p in zip(ns.times, ns.txPool):
+ writer.writerow([t,p])
+ if not any:
+ logger.error('no txPool in {}'.format(list(self.nodes.keys())))
+ return
+ plt.legend(loc='upper right')
+ plt.savefig(outpath + '.svg', format='svg')
+ plt.savefig(outpath + '.png', format='png')
def anynickre(nick_re, nicks):
if not nick_re:
@@ -230,7 +293,7 @@ def anynickre(nick_re, nicks):
def gather_metrics_files_by_nick(metrics_files, metrics_dirs=None):
'''return {"node nickname":[path, path, ...], ...}'''
- metrics_fname_re = re.compile(r'(.*)\.(.*).metrics')
+ metrics_fname_re = re.compile(r'(.*?)\.([0-9_]+\.?\d+)\.metrics')
filesByNick = {}
nonick = []
tf_inventory_path = None
@@ -250,14 +313,16 @@ def gather_metrics_files_by_nick(metrics_files, metrics_dirs=None):
dapp(filesByNick, nick, path)
return tf_inventory_path, filesByNick, nonick
-def process_nick_re(nre, filesByNick, nick_to_tfname, rsum, args):
+def process_nick_re(nre, filesByNick, nick_to_tfname, rsum, args, grsum):
nretup = (nre,)
for rnick, paths in filesByNick.items():
nick = nick_to_tfname.get(rnick, rnick)
if anynickre(nretup, (rnick,nick)):
- rsum(process_files(args, nick, paths), nick)
+ rsum(process_files(args, nick, paths, grsum), nick)
def main():
+ os.environ['TZ'] = 'UTC'
+ time.tzset()
test_metric_line_re()
ap = argparse.ArgumentParser()
ap.add_argument('metrics_files', nargs='*')
@@ -267,6 +332,7 @@ def main():
ap.add_argument('--report', default=None, help='path to write csv report')
ap.add_argument('--nick-re', action='append', default=[], help='regexp to filter node names, may be repeated')
ap.add_argument('--nick-lre', action='append', default=[], help='label:regexp to filter node names, may be repeated')
+ ap.add_argument('--pool-plot-root', help='write to foo.svg and .png')
ap.add_argument('--verbose', default=False, action='store_true')
args = ap.parse_args()
@@ -281,6 +347,7 @@ def main():
metrics_dirs.add(args.dir)
metrics_files += glob.glob(os.path.join(args.dir, '*.metrics'))
tf_inventory_path, filesByNick, nonick = gather_metrics_files_by_nick(metrics_files, metrics_dirs)
+ logger.debug('%d files gathered into %d nicks', len(metrics_files), len(filesByNick))
if not tf_inventory_path:
for md in metrics_dirs:
tp = os.path.join(md, 'terraform-inventory.host')
@@ -300,6 +367,7 @@ def main():
ip_to_name[ip] = k
#logger.debug('names: %r', sorted(ip_to_name.values()))
#logger.debug('ip to name %r', ip_to_name)
+ unfound = []
for ip, name in ip_to_name.items():
found = []
for nick in filesByNick.keys():
@@ -310,14 +378,30 @@ def main():
elif len(found) > 1:
logger.warning('ip %s (%s) found in nicks: %r', ip, name, found)
else:
+ unfound.append((ip,name))
+ if not nick_to_tfname:
+ for ip,name in unfound:
logger.warning('ip %s (%s) no nick', ip, name)
#logger.debug('nick_to_tfname %r', nick_to_tfname)
+ logger.debug('nicks: %s', ' '.join(map(lambda x: nick_to_tfname.get(x,x), filesByNick.keys())))
+
+ # global stats across all nodes
+ grsum = summary()
+ if nonick:
+ grsum(process_files(args, None, nonick), 'no nick')
+ for rnick, paths in filesByNick.items():
+ nick = nick_to_tfname.get(rnick, rnick)
+ logger.debug('%s: %d files', nick, len(paths))
+ grsum(process_files(args, nick, paths), nick)
+ if args.pool_plot_root:
+ grsum.plot_pool(args.pool_plot_root)
+ # maybe subprocess for stats across named groups
if args.nick_re:
# use each --nick-re=foo as a group
for nre in args.nick_re:
rsum = summary()
- process_nick_re(nre, filesByNick, nick_to_tfname, rsum, args)
+ process_nick_re(nre, filesByNick, nick_to_tfname, rsum, args, grsum)
print(rsum)
print('\n')
return 0
@@ -325,20 +409,13 @@ def main():
for lnre in args.nick_lre:
label, nre = lnre.split(':', maxsplit=1)
rsum = summary(label)
- process_nick_re(nre, filesByNick, nick_to_tfname, rsum, args)
+ process_nick_re(nre, filesByNick, nick_to_tfname, rsum, args, grsum)
print(rsum)
print('\n')
return 0
-
- # no filters, glob it all up
- rsum = summary()
- if nonick:
- rsum(process_files(args, None, nonick), 'no nick')
- for rnick, paths in filesByNick.items():
- nick = nick_to_tfname.get(rnick, rnick)
- rsum(process_files(args, nick, paths), nick)
- print(rsum)
+ # no filters, print global result
+ print(grsum)
return 0
def perProtocol(prefix, lists, sums, deltas, dt):
@@ -349,9 +426,22 @@ def perProtocol(prefix, lists, sums, deltas, dt):
dapp(lists, sub, v/dt)
sums[sub] = sums.get(sub,0) + v
-def process_files(args, nick, paths):
+def process_files(args, nick, paths, grsum=None):
"returns a nodestats object"
- return nodestats().process_files(args, nick, paths)
+ return nodestats().process_files(args, nick, paths, grsum and grsum.biByTime)
+
+path_time_re = re.compile(r'(\d\d\d\d)(\d\d)(\d\d)_(\d\d)(\d\d)(\d\d\.+\d+)')
+
+def parse_path_time(path):
+ m = path_time_re.search(path)
+ if not m:
+ return None
+ ts = float(m.group(6))
+ si = math.floor(ts)
+ t = time.mktime((int(m.group(1)), int(m.group(2)), int(m.group(3)),
+ int(m.group(4)), int(m.group(5)), si, 0, 0, 0))
+ t += ts - si
+ return t
class nodestats:
def __init__(self):
@@ -371,14 +461,24 @@ def __init__(self):
# algod_network_sent_bytes_*
self.txPLists = {}
self.txPSums = {}
+ self.times = []
# algod_tx_pool_count{}
self.txPool = []
-
- def process_files(self, args, nick=None, metrics_files=None):
+ # total across all measurements
+ self.tps = 0
+ self.blockTime = 0
+ self.biByTime = {}
+ # average milliseconds per agreement block verify
+ self.verifyMillis = None
+
+ def process_files(self, args, nick=None, metrics_files=None, bisource=None):
"returns self, a nodestats object"
+ if bisource is None:
+ bisource = {}
self.args = args
self.nick = nick
if metrics_files is None:
+ logger.debug('nodestats(%s) no metrics files', nick)
return self
reportf = None
writer = None
@@ -398,18 +498,30 @@ def process_files(self, args, nick=None, metrics_files=None):
prevtime = None
prevPath = None
prevbi = None
+ firstTime = None
+ firstBi = None
for path in sorted(metrics_files):
+ curtime = parse_path_time(path) or os.path.getmtime(path)
+ self.times.append(curtime)
with open(path, 'rt', encoding="utf-8") as fin:
cur = parse_metrics(fin)
+ # TODO: use _any_ node's blockinfo json
bijsonpath = path.replace('.metrics', '.blockinfo.json')
bi = None
if os.path.exists(bijsonpath):
with open(bijsonpath, 'rt', encoding="utf-8") as fin:
bi = json.load(fin)
- curtime = os.path.getmtime(path)
+ self.biByTime[curtime] = bi
+ if bi is None:
+ bi = bisource.get(curtime)
self.txPool.append(cur.get('algod_tx_pool_count{}'))
#logger.debug('%s: %r', path, cur)
+ verifyGood = cur.get('algod_agreement_proposal_verify_good{}')
+ verifyMs = cur.get('algod_agreement_proposal_verify_ms{}')
+ if verifyGood and verifyMs:
+ # last writer wins
+ self.verifyMillis = verifyMs / verifyGood
if prev is not None:
d = metrics_delta(prev, cur)
dt = curtime - prevtime
@@ -451,10 +563,20 @@ def process_files(self, args, nick=None, metrics_files=None):
tps,
blocktime,
))
+ else:
+ firstTime = curtime
+ firstBi = bi
prev = cur
prevPath = path
prevtime = curtime
prevbi = bi
+ if prevbi is None or firstBi is None:
+ return self
+ txnCount = prevbi.get('block',{}).get('tc',0) - firstBi.get('block',{}).get('tc',0)
+ rounds = prevbi.get('block',{}).get('rnd',0) - firstBi.get('block',{}).get('rnd',0)
+ totalDt = prevtime - firstTime
+ self.tps = txnCount / totalDt
+ self.blockTime = totalDt / rounds
if writer and self.txBpsList:
writer.writerow([])
for bsum, msg in sorted([(bsum,msg) for msg,bsum in self.txPSums.items()]):
diff --git a/test/heapwatch/nodeHostTarget.py b/test/heapwatch/nodeHostTarget.py
index 5332a1aea9..2a29f4f8f5 100644
--- a/test/heapwatch/nodeHostTarget.py
+++ b/test/heapwatch/nodeHostTarget.py
@@ -1,4 +1,21 @@
#!/usr/bin/env python3
+# Copyright (C) 2019-2022 Algorand, Inc.
+# This file is part of go-algorand
+#
+# go-algorand is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# go-algorand is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with go-algorand. If not, see .
+#
+###
#
# this is the script that runs on a node host started by runNodeHost.py
#
diff --git a/test/heapwatch/plot_crr_csv.py b/test/heapwatch/plot_crr_csv.py
new file mode 100755
index 0000000000..14f23b857b
--- /dev/null
+++ b/test/heapwatch/plot_crr_csv.py
@@ -0,0 +1,75 @@
+#!/usr/bin/env python3
+#
+# Plot the output of test/heapwatch/client_ram_report.py --csv
+
+import csv
+import random
+
+from matplotlib import pyplot as plt
+
+_meta_cols = {'when', 'dt', 'round'}
+
+def smin(a,b):
+ if a is None:
+ return b
+ if b is None:
+ return a
+ return min(a,b)
+def smax(a,b):
+ if a is None:
+ return b
+ if b is None:
+ return a
+ return max(a,b)
+
+def main():
+ import argparse
+ ap = argparse.ArgumentParser()
+ ap.add_argument('files', nargs='+')
+ args = ap.parse_args()
+
+ for fname in args.files:
+ fvals = {}
+ minv = None
+ maxv = None
+ with open(fname) as fin:
+ reader = csv.DictReader(fin)
+ for rec in reader:
+ xround = int(rec['round'])
+ for k,v in rec.items():
+ if k in _meta_cols:
+ continue
+ klist = fvals.get(k)
+ if klist is None:
+ klist = []
+ fvals[k] = klist
+ v = float(v)
+ klist.append((xround, v))
+ minv = smin(minv, v)
+ maxv = smax(maxv, v)
+ print("{} found series {}".format(fname, sorted(fvals.keys())))
+ fig, ax = plt.subplots()
+ ax.set_ylabel('bytes')
+ ax.set_xlabel('round')
+ ax.set_ylim(minv,maxv)
+ for k in sorted(fvals.keys()):
+ xy = fvals[k]
+ #for k, xy in fvals.items():
+ lc = None
+ if k.startswith('r'):
+ # blueish
+ lc = (0.3*random.random(), 0.3*random.random(), 0.7+(0.3*random.random()))
+ elif k.startswith('npn'):
+ # greenish
+ lc = (0.3*random.random(), 0.7+(0.3*random.random()), 0.3*random.random())
+ elif k.startswith('n'):
+ # reddish
+ lc = (0.7+(0.3*random.random()), 0.3*random.random(), 0.3*random.random())
+ ax.plot([p[0] for p in xy], [p[1] for p in xy], label=k, color=lc)
+ ax.legend(loc='upper left', ncol=2)
+ plt.savefig(fname + '.svg', format='svg')
+ plt.savefig(fname + '.png', format='png')
+ #plt.show()
+
+if __name__ == '__main__':
+ main()
diff --git a/test/heapwatch/runNodeHost.py b/test/heapwatch/runNodeHost.py
index 10657907b7..1d14881fd1 100644
--- a/test/heapwatch/runNodeHost.py
+++ b/test/heapwatch/runNodeHost.py
@@ -1,4 +1,21 @@
#!/usr/bin/python3
+# Copyright (C) 2019-2022 Algorand, Inc.
+# This file is part of go-algorand
+#
+# go-algorand is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as
+# published by the Free Software Foundation, either version 3 of the
+# License, or (at your option) any later version.
+#
+# go-algorand is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with go-algorand. If not, see .
+#
+###
#
# launch an ec2 instance in the same AZ with the same AMI, run some algod on it
#
diff --git a/test/netperf-go/puppeteer/promMetricFetcher.go b/test/netperf-go/puppeteer/promMetricFetcher.go
index 64a3341a03..06e0853c84 100644
--- a/test/netperf-go/puppeteer/promMetricFetcher.go
+++ b/test/netperf-go/puppeteer/promMetricFetcher.go
@@ -19,7 +19,7 @@ package main
import (
"encoding/json"
"fmt"
- "io/ioutil"
+ "io"
"net/http"
"reflect"
"strconv"
@@ -62,7 +62,7 @@ func (r *promMetricFetcher) getMetric(query string) (results []promValueResult,
return nil, fmt.Errorf("http error code received %v", resp.StatusCode)
}
- bytes, err := ioutil.ReadAll(resp.Body)
+ bytes, err := io.ReadAll(resp.Body)
if err != nil {
return
}
diff --git a/test/netperf-go/puppeteer/puppeteer.go b/test/netperf-go/puppeteer/puppeteer.go
index bcc870672f..5a5fab11ce 100644
--- a/test/netperf-go/puppeteer/puppeteer.go
+++ b/test/netperf-go/puppeteer/puppeteer.go
@@ -21,7 +21,6 @@ import (
"encoding/json"
"fmt"
"io"
- "io/ioutil"
"os"
"os/exec"
"path"
@@ -73,7 +72,7 @@ type puppet struct {
}
func puppeteer(channel, jsonFile string) error {
- jsonBytes, err := ioutil.ReadFile(jsonFile)
+ jsonBytes, err := os.ReadFile(jsonFile)
if err != nil {
return err
}
@@ -367,7 +366,7 @@ func (p *puppet) runStep(recipeStep recipeStep, timeout time.Duration) error {
outFile: os.Stdout,
}
} else {
- output = ioutil.Discard
+ output = io.Discard
}
cmd.Stderr = &errorOutput
diff --git a/test/scripts/e2e.sh b/test/scripts/e2e.sh
index ea829b2e8a..b7d04ff4aa 100755
--- a/test/scripts/e2e.sh
+++ b/test/scripts/e2e.sh
@@ -125,7 +125,7 @@ if [ -z "$E2E_TEST_FILTER" ] || [ "$E2E_TEST_FILTER" == "SCRIPTS" ]; then
# Pin a version of our python SDK's so that breaking changes don't spuriously break our tests.
# Please update as necessary.
- "${TEMPDIR}/ve/bin/pip3" install py-algorand-sdk==1.9.0b1
+ "${TEMPDIR}/ve/bin/pip3" install py-algorand-sdk==1.17.0
# Enable remote debugging:
"${TEMPDIR}/ve/bin/pip3" install --upgrade debugpy
diff --git a/test/scripts/e2e_basic_start_stop.sh b/test/scripts/e2e_basic_start_stop.sh
index 8d9da35c9a..1bb9b0bf13 100755
--- a/test/scripts/e2e_basic_start_stop.sh
+++ b/test/scripts/e2e_basic_start_stop.sh
@@ -28,6 +28,8 @@ function verify_at_least_one_running() {
}
function verify_none_running() {
+ local datadir=$1
+
# Shutting down can take some time, so wait at least 5 seconds
for TRIES in 1 2 3 4 5; do
update_running_count
@@ -37,6 +39,15 @@ function verify_none_running() {
sleep 1.4
done
echo "algod not expected to be running but it is"
+ if [ -n "$datadir" ]; then
+ echo "last 20 lines of node.log:"
+ tail -20 "$datadir/node.log"
+ echo "================================"
+ echo "stdout and stdin:"
+ cat "$datadir/algod-out.log"
+ echo "================================"
+ cat "$datadir/algod-err.log"
+ fi
exit 1
}
@@ -64,7 +75,7 @@ verify_at_least_one_running
echo Verifying we can stop it using goal
goal node stop -d ${DATADIR}
-verify_none_running
+verify_none_running ${DATADIR}
#----------------------
# Test that we can start a generic node straight with no overrides
@@ -72,7 +83,7 @@ echo Verifying a generic node will start directly
algod -d ${DATADIR} &
verify_at_least_one_running
pkill -u $(whoami) -x algod || true
-verify_none_running
+verify_none_running ${DATADIR}
#----------------------
# Test that we can start a generic node against the datadir
@@ -85,7 +96,7 @@ verify_at_least_one_running # one should still be running
verify_one_running # in fact, exactly one should still be running
# clean up
pkill -u $(whoami) -x algod || true
-verify_none_running
+verify_none_running ${DATADIR}
echo "----------------------------------------------------------------------"
echo " DONE: e2e_basic_start_stop"
diff --git a/test/scripts/e2e_client_runner.py b/test/scripts/e2e_client_runner.py
index 6fa5b4ffba..53a0e484ef 100755
--- a/test/scripts/e2e_client_runner.py
+++ b/test/scripts/e2e_client_runner.py
@@ -246,10 +246,10 @@ def get_pub_wallet(self):
return self.pubw, self.maxpubaddr
def start(self, scriptname, timeout):
- self.event_log("run", scriptname)
t = threading.Thread(target=script_thread, args=(self, scriptname, timeout))
t.start()
with self.lock:
+ self.event_log("run", scriptname)
self.threads[scriptname] = t
def running(self, scriptname, p):
@@ -257,8 +257,8 @@ def running(self, scriptname, p):
self.procs[scriptname] = p
def done(self, scriptname, ok, seconds):
- self.event_log("pass" if ok else "fail", scriptname, seconds)
with self.lock:
+ self.event_log("pass" if ok else "fail", scriptname, seconds)
self.statuses.append( {'script':scriptname, 'ok':ok, 'seconds':seconds} )
if not ok:
self.errors.append('{} failed'.format(scriptname))
diff --git a/test/scripts/e2e_subs/goal-app-info.sh b/test/scripts/e2e_subs/goal-app-info.sh
new file mode 100755
index 0000000000..ef8cee7acd
--- /dev/null
+++ b/test/scripts/e2e_subs/goal-app-info.sh
@@ -0,0 +1,95 @@
+#!/bin/bash
+
+scriptname="goal-app-info-test"
+date "+${scriptname} start %Y%m%d_%H%M%S"
+
+set -e
+set -x
+set -o pipefail
+export SHELLOPTS
+
+WALLET=$1
+
+# Directory of this bash program
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+
+gcmd="goal -w ${WALLET}"
+
+ACCOUNT=$(${gcmd} account list|awk '{ print $3 }')
+
+EXTRA_PAGES=1
+GLOBAL_BYTESLICES=2
+GLOBAL_INTS=3
+LOCAL_BYTESLICES=4
+LOCAL_INTS=5
+
+APPID=$(${gcmd} app create --creator ${ACCOUNT} --approval-prog ${DIR}/tealprogs/upgraded.teal --clear-prog ${DIR}/tealprogs/clear_program_state.teal --extra-pages ${EXTRA_PAGES} --global-byteslices ${GLOBAL_BYTESLICES} --global-ints ${GLOBAL_INTS} --local-byteslices ${LOCAL_BYTESLICES} --local-ints ${LOCAL_INTS} | grep Created | awk '{ print $6 }')
+
+APP_INFO=$(${gcmd} app info --app-id $APPID)
+
+ACTUAL_APPID=($(echo "$APP_INFO" | grep "Application ID:"))
+ACTUAL_APP_ACCOUNT=($(echo "$APP_INFO" | grep "Application account:"))
+ACTUAL_CREATOR=($(echo "$APP_INFO" | grep "Creator:"))
+ACTUAL_APPROVAL_HASH=($(echo "$APP_INFO" | grep "Approval hash:"))
+ACTUAL_CLEAR_HASH=($(echo "$APP_INFO" | grep "Clear hash:"))
+ACTUAL_EXTRA_PAGES=($(echo "$APP_INFO" | grep "Extra program pages:"))
+ACTUAL_GLOBAL_BYTESLICES=($(echo "$APP_INFO" | grep "Max global byteslices:"))
+ACTUAL_GLOBAL_INTS=($(echo "$APP_INFO" | grep "Max global integers:"))
+ACTUAL_LOCAL_BYTESLICES=($(echo "$APP_INFO" | grep "Max local byteslices:"))
+ACTUAL_LOCAL_INTS=($(echo "$APP_INFO" | grep "Max local integers:"))
+
+if [[ ${APPID} -ne ${ACTUAL_APPID[2]} ]]; then
+ date "+${scriptname} FAIL returned app ID does not match ${APPID} != ${ACTUAL_APPID[2]} %Y%m%d_%H%M%S"
+ false
+fi
+
+# Use the Python SDK to get the expected app escrow address
+EXPECTED_APP_ACCOUNT=$(python3 -c "from algosdk.logic import get_application_address;print(get_application_address($APPID))")
+if [[ $EXPECTED_APP_ACCOUNT != ${ACTUAL_APP_ACCOUNT[2]} ]]; then
+ date "+${scriptname} FAIL returned app account does not match ${EXPECTED_APP_ACCOUNT} != ${ACTUAL_APP_ACCOUNT[2]} %Y%m%d_%H%M%S"
+ false
+fi
+
+if [[ ${ACCOUNT} != ${ACTUAL_CREATOR[1]} ]]; then
+ date "+${scriptname} FAIL returned app creator does not match ${ACCOUNT} != ${ACTUAL_CREATOR[1]} %Y%m%d_%H%M%S"
+ false
+fi
+
+EXPECTED_APPROVAL_HASH="RBHEXJWG2M4T4OBDMNOQFKYYDPDMXQXZIMFZCINJAYVI5KPZLXVUWZRR2Q"
+if [[ ${EXPECTED_APPROVAL_HASH} != ${ACTUAL_APPROVAL_HASH[2]} ]]; then
+ date "+${scriptname} FAIL returned app approval hash does not match ${EXPECTED_APPROVAL_HASH} != ${ACTUAL_APPROVAL_HASH[2]} %Y%m%d_%H%M%S"
+ false
+fi
+
+EXPECTED_CLEAR_HASH="YOE6C22GHCTKAN3HU4SE5PGIPN5UKXAJTXCQUPJ3KKF5HOAH646MKKCPDA"
+if [[ ${EXPECTED_CLEAR_HASH} != ${ACTUAL_CLEAR_HASH[2]} ]]; then
+ date "+${scriptname} FAIL returned app clear hash does not match ${EXPECTED_CLEAR_HASH} != ${ACTUAL_CLEAR_HASH[2]} %Y%m%d_%H%M%S"
+ false
+fi
+
+if [[ ${EXTRA_PAGES} -ne ${ACTUAL_EXTRA_PAGES[3]} ]]; then
+ date "+${scriptname} FAIL returned app extra pages does not match ${EXTRA_PAGES} != ${ACTUAL_EXTRA_PAGES[3]} %Y%m%d_%H%M%S"
+ false
+fi
+
+if [[ ${GLOBAL_BYTESLICES} -ne ${ACTUAL_GLOBAL_BYTESLICES[3]} ]]; then
+ date "+${scriptname} FAIL returned app global byte slice schema does not match ${GLOBAL_BYTESLICES} != ${ACTUAL_GLOBAL_BYTESLICES[3]} %Y%m%d_%H%M%S"
+ false
+fi
+
+if [[ ${GLOBAL_INTS} -ne ${ACTUAL_GLOBAL_INTS[3]} ]]; then
+ date "+${scriptname} FAIL returned app global int schema does not match ${GLOBAL_INTS} != ${ACTUAL_GLOBAL_INTS[3]} %Y%m%d_%H%M%S"
+ false
+fi
+
+if [[ ${LOCAL_BYTESLICES} -ne ${ACTUAL_LOCAL_BYTESLICES[3]} ]]; then
+ date "+${scriptname} FAIL returned app local byte slice schema does not match ${LOCAL_BYTESLICES} != ${ACTUAL_LOCAL_BYTESLICES[3]} %Y%m%d_%H%M%S"
+ false
+fi
+
+if [[ ${LOCAL_INTS} -ne ${ACTUAL_LOCAL_INTS[3]} ]]; then
+ date "+${scriptname} FAIL returned app local int schema does not match ${LOCAL_INTS} != ${ACTUAL_LOCAL_INTS[3]} %Y%m%d_%H%M%S"
+ false
+fi
+
+date "+${scriptname} OK %Y%m%d_%H%M%S"
diff --git a/test/scripts/tps.py b/test/scripts/tps.py
index 834cdbb7a4..103f83a7e6 100644
--- a/test/scripts/tps.py
+++ b/test/scripts/tps.py
@@ -37,14 +37,13 @@ def algod_client_for_dir(algorand_data, headers=None):
def get_blockinfo_tps(algod, rounds=10):
status = algod.status()
- rounds = 10
ba = msgpack.loads(algod.block_info(status['last-round']-rounds, response_format='msgpack'), strict_map_key=False)
bb = msgpack.loads(algod.block_info(status['last-round'], response_format='msgpack'), strict_map_key=False)
ra = ba['block']['rnd']
rb = bb['block']['rnd']
assert(rb - ra == rounds)
- tca = ba['block']['tc']
- tcb = bb['block']['tc']
+ tca = ba['block'].get('tc',0)
+ tcb = bb['block'].get('tc',0)
tsa = ba['block']['ts']
tsb = bb['block']['ts']
dt = tsb-tsa
@@ -54,11 +53,57 @@ def get_blockinfo_tps(algod, rounds=10):
logger.debug('(b[%d].TxnCounter %d) - (b[%d].TxnCounter %d) = %d txns', ra, tca, rb, tcb, dtxn)
return tps
+def mins(a,b):
+ if a is None:
+ return b
+ if b is None:
+ return a
+ return min(a,b)
+
+def maxs(a,b):
+ if a is None:
+ return b
+ if b is None:
+ return a
+ return max(a,b)
+
+def get_blockinfo_tps_with_types(algod, rounds=10, adir=''):
+ status = algod.status()
+ lastround = status['last-round']
+ cround = lastround - rounds
+ bytxtype = {}
+ mintime = None
+ maxtime = None
+ mintc = None
+ maxtc = 0
+ while cround <= lastround:
+ ba = msgpack.loads(algod.block_info(cround, response_format='msgpack'), strict_map_key=False)
+ #logger.debug('block keys %s', sorted(ba['block'].keys()))
+ mintime = mins(mintime, ba['block']['ts'])
+ maxtime = maxs(maxtime, ba['block']['ts'])
+ mintc = mins(mintc, ba['block'].get('tc'))
+ maxtc = maxs(maxtc, ba['block'].get('tc',0))
+ txns = ba['block'].get('txns',[])
+ for stxib in txns:
+ #logger.debug('txn keys %s', sorted(stxib['txn'].keys()))
+ tt = stxib['txn']['type']
+ bytxtype[tt] = bytxtype.get(tt, 0) + 1
+ cround += 1
+ summary = [(count, tt) for tt,count in bytxtype.items()]
+ summary.sort(reverse=True)
+ print(summary)
+ dt = maxtime-mintime
+ dtxn = maxtc-mintc
+ logger.debug('%s ts=[%d..%d] (%ds), tc=[%d..%d] (%d txn)', adir, mintime, maxtime, dt, mintc, maxtc, dtxn)
+ tps = dtxn/dt
+ return tps
+
def main():
ap = argparse.ArgumentParser()
ap.add_argument('data_dirs', nargs='*', help='list paths to algorand datadirs to grab heap profile from')
ap.add_argument('-d', dest='algorand_data')
- ap.add_argument('-r', '--rounds', type=int, help='number of rounds to calculate over')
+ ap.add_argument('-T', '--types', default=False, action='store_true', help='show txn types counts within round range')
+ ap.add_argument('-r', '--rounds', type=int, default=10, help='number of rounds to calculate over')
ap.add_argument('--verbose', default=False, action='store_true')
args = ap.parse_args()
@@ -70,11 +115,21 @@ def main():
datadirs = args.data_dirs
if args.algorand_data:
datadirs = datadirs + [args.algorand_data]
+ if not datadirs:
+ ad = os.getenv('ALGORAND_DATA')
+ if ad:
+ datadirs.append(ad)
+ if not datadirs:
+ sys.stderr.write('no data dirs specified (positional file, -d AD, $ALGORAND_DATA)')
+ sys.exit(1)
for adir in datadirs:
algod = algod_client_for_dir(adir)
- tps = get_blockinfo_tps(algod, rounds=args.rounds)
- print('{:5.1f}\t{}'.format(tps, adir))
+ if args.types:
+ tps = get_blockinfo_tps_with_types(algod, rounds=args.rounds)
+ else:
+ tps = get_blockinfo_tps(algod, rounds=args.rounds)
+ print('{:5.1f} TPS\t{}'.format(tps, adir))
return 0
if __name__ == '__main__':
diff --git a/test/testdata/deployednettemplates/hosttemplates/hosttemplates.json b/test/testdata/deployednettemplates/hosttemplates/hosttemplates.json
index e2cc497901..0bba73353b 100644
--- a/test/testdata/deployednettemplates/hosttemplates/hosttemplates.json
+++ b/test/testdata/deployednettemplates/hosttemplates/hosttemplates.json
@@ -37,6 +37,12 @@
"Region": "ap-southeast-2",
"BaseConfiguration": "m5d.4xlarge"
},
+ {
+ "Name": "AWS_AP_SOUTHEAST-2-c5d.4xl",
+ "Provider": "AWS",
+ "Region": "ap-southeast-2",
+ "BaseConfiguration": "c5d.4xlarge"
+ },
{
"Name": "AWS_AP_SOUTHEAST-2-c5d.9xl",
"Provider": "AWS",
@@ -80,6 +86,12 @@
"Region": "eu-north-1",
"BaseConfiguration": "m5d.4xlarge"
},
+ {
+ "Name": "AWS-EU-NORTH-1-c5d.4xl",
+ "Provider": "AWS",
+ "Region": "eu-north-1",
+ "BaseConfiguration": "c5d.4xlarge"
+ },
{
"Name": "AWS-EU-NORTH-1-c5d.9xl",
"Provider": "AWS",
@@ -122,6 +134,12 @@
"Region": "us-west-1",
"BaseConfiguration": "m5d.4xlarge"
},
+ {
+ "Name": "AWS-US-WEST-1-c5d.4xl",
+ "Provider": "AWS",
+ "Region": "us-west-1",
+ "BaseConfiguration": "c5d.4xlarge"
+ },
{
"Name": "AWS-US-WEST-1-c5d.9xl",
"Provider": "AWS",
@@ -158,6 +176,12 @@
"Region": "us-west-2",
"BaseConfiguration": "m5d.4xlarge"
},
+ {
+ "Name": "AWS-US-WEST-2-c5d.4xl",
+ "Provider": "AWS",
+ "Region": "us-west-2",
+ "BaseConfiguration": "c5d.4xlarge"
+ },
{
"Name": "AWS-US-WEST-2-c5d.9xl",
"Provider": "AWS",
@@ -338,6 +362,12 @@
"Region": "ap-south-1",
"BaseConfiguration": "m5d.4xlarge"
},
+ {
+ "Name": "AWS-AP-SOUTH-1-c5d.4xl",
+ "Provider": "AWS",
+ "Region": "ap-south-1",
+ "BaseConfiguration": "c5d.4xlarge"
+ },
{
"Name": "AWS-AP-SOUTH-1-c5d.9xl",
"Provider": "AWS",
@@ -374,6 +404,12 @@
"Region": "ap-southeast-1",
"BaseConfiguration": "m5d.4xlarge"
},
+ {
+ "Name": "AWS-AP-SOUTHEAST-1-c5d.4xl",
+ "Provider": "AWS",
+ "Region": "ap-southeast-1",
+ "BaseConfiguration": "c5d.4xlarge"
+ },
{
"Name": "AWS-AP-SOUTHEAST-1-c5d.9xl",
"Provider": "AWS",
@@ -410,6 +446,12 @@
"Region": "ap-southeast-2",
"BaseConfiguration": "m5d.4xlarge"
},
+ {
+ "Name": "AWS-AP-SOUTHEAST-2-c5d.4xl",
+ "Provider": "AWS",
+ "Region": "ap-southeast-2",
+ "BaseConfiguration": "c5d.4xlarge"
+ },
{
"Name": "AWS-AP-SOUTHEAST-2-c5d.9xl",
"Provider": "AWS",
@@ -536,6 +578,12 @@
"Region": "eu-central-1",
"BaseConfiguration": "m5d.4xlarge"
},
+ {
+ "Name": "AWS-EU-CENTRAL-1-c5d.4xl",
+ "Provider": "AWS",
+ "Region": "eu-central-1",
+ "BaseConfiguration": "c5d.4xlarge"
+ },
{
"Name": "AWS-EU-CENTRAL-1-c5d.9xl",
"Provider": "AWS",
@@ -608,6 +656,12 @@
"Region": "eu-west-1",
"BaseConfiguration": "m5d.4xlarge"
},
+ {
+ "Name": "AWS-EU-WEST-1-c5d.4xl",
+ "Provider": "AWS",
+ "Region": "eu-west-1",
+ "BaseConfiguration": "c5d.4xlarge"
+ },
{
"Name": "AWS-EU-WEST-1-c5d.9xl",
"Provider": "AWS",
@@ -644,6 +698,12 @@
"Region": "eu-west-2",
"BaseConfiguration": "m5d.4xlarge"
},
+ {
+ "Name": "AWS-EU-WEST-2-c5d.4xl",
+ "Provider": "AWS",
+ "Region": "eu-west-2",
+ "BaseConfiguration": "c5d.4xlarge"
+ },
{
"Name": "AWS-EU-WEST-2-c5d.9xl",
"Provider": "AWS",
@@ -680,6 +740,12 @@
"Region": "eu-west-3",
"BaseConfiguration": "m5d.4xlarge"
},
+ {
+ "Name": "AWS-EU-WEST-3-c5d.4xl",
+ "Provider": "AWS",
+ "Region": "eu-west-3",
+ "BaseConfiguration": "c5d.4xlarge"
+ },
{
"Name": "AWS-EU-WEST-3-c5d.9xl",
"Provider": "AWS",
@@ -717,6 +783,12 @@
"Region": "sa-east-1",
"BaseConfiguration": "m5d.4xlarge"
},
+ {
+ "Name": "AWS-SA-EAST-1-c5d.4xl",
+ "Provider": "AWS",
+ "Region": "sa-east-1",
+ "BaseConfiguration": "c5d.4xlarge"
+ },
{
"Name": "AWS-SA-EAST-1-c5d.9xl",
"Provider": "AWS",
@@ -741,6 +813,12 @@
"Region": "ca-central-1",
"BaseConfiguration": "m5d.4xlarge"
},
+ {
+ "Name": "AWS-CA-CENTRAL-1-c5d.4xl",
+ "Provider": "AWS",
+ "Region": "ca-central-1",
+ "BaseConfiguration": "c5d.4xlarge"
+ },
{
"Name": "AWS-CA-CENTRAL-1-c5d.9xl",
"Provider": "AWS",
diff --git a/test/testdata/deployednettemplates/recipes/alphanet-extension/Makefile b/test/testdata/deployednettemplates/recipes/alphanet-extension/Makefile
new file mode 100644
index 0000000000..2a7d450398
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/alphanet-extension/Makefile
@@ -0,0 +1,15 @@
+PARAMS=-w 20 -R 1 -N 20 -n 20 -H 1 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
+
+all: topology.json net.json genesis.json
+
+topology.json: gen_topology.py
+ python gen_topology.py
+
+net.json: node.json relay.json nonPartNode.json ${GOPATH}/bin/netgoal Makefile
+ netgoal generate -t net -r /tmp/wat -o net.json ${PARAMS}
+
+genesis.json: node.json relay.json nonPartNode.json ${GOPATH}/bin/netgoal Makefile
+ netgoal generate -t genesis -r /tmp/wat -o genesis.json ${PARAMS}
+
+clean:
+ rm -f net.json genesis.json topology.json
diff --git a/test/testdata/deployednettemplates/recipes/alphanet-extension/gen_topology.py b/test/testdata/deployednettemplates/recipes/alphanet-extension/gen_topology.py
new file mode 100644
index 0000000000..c952456476
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/alphanet-extension/gen_topology.py
@@ -0,0 +1,31 @@
+import json
+import os
+
+node_types = {"R":1, "N":20, "NPN":1}
+node_size = {"R":"-c5d.4xl", "N":"-c5d.4xl", "NPN":"-c5d.4xl"}
+regions = [
+ "AWS-US-EAST-1",
+ "AWS-US-WEST-1",
+ "AWS-SA-EAST-1",
+ "AWS-EU-NORTH-1",
+ "AWS-AP-SOUTHEAST-1"
+]
+
+network = "alphanet"
+
+host_elements = []
+region_count = len(regions)
+for node_type in node_types.keys():
+ node_count = node_types[node_type]
+ region_size = node_size[node_type]
+ for i in range(node_count):
+ host = {}
+ node_name = node_type + str(i + 1) + "-" + network
+ region = regions[i % region_count]
+ host["Name"] = node_name
+ host["Template"] = region + region_size
+ host_elements.append(host)
+
+ec2_hosts = {"Hosts": host_elements}
+with open("topology.json", "w") as f:
+ f.write(json.dumps(ec2_hosts, indent = 2) + os.linesep)
diff --git a/test/testdata/deployednettemplates/recipes/alphanet-extension/genesis.json b/test/testdata/deployednettemplates/recipes/alphanet-extension/genesis.json
new file mode 100644
index 0000000000..0be2b32674
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/alphanet-extension/genesis.json
@@ -0,0 +1,154 @@
+{
+ "NetworkName": "",
+ "VersionModifier": "",
+ "ConsensusProtocol": "alpha4",
+ "FirstPartKeyRound": 0,
+ "LastPartKeyRound": 50000,
+ "PartKeyDilution": 0,
+ "Wallets": [
+ {
+ "Name": "Wallet1",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet2",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet3",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet4",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet5",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet6",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet7",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet8",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet9",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet10",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet11",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet12",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet13",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet14",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet15",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet16",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet17",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet18",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet19",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet20",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet21",
+ "Stake": 6.25,
+ "Online": false
+ },
+ {
+ "Name": "Wallet22",
+ "Stake": 6.25,
+ "Online": false
+ },
+ {
+ "Name": "Wallet23",
+ "Stake": 6.25,
+ "Online": false
+ },
+ {
+ "Name": "Wallet24",
+ "Stake": 6.25,
+ "Online": false
+ },
+ {
+ "Name": "Wallet25",
+ "Stake": 6.25,
+ "Online": false
+ },
+ {
+ "Name": "Wallet26",
+ "Stake": 6.25,
+ "Online": false
+ },
+ {
+ "Name": "Wallet27",
+ "Stake": 6.25,
+ "Online": false
+ },
+ {
+ "Name": "Wallet28",
+ "Stake": 6.25,
+ "Online": false
+ }
+ ],
+ "FeeSink": "OOZZ32IHB6SS6ZTARKJ2PQP3QKE7R3IWQTOPXRGLTAGPVCDS3FHJOEOYVM",
+ "RewardsPool": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ",
+ "DevMode": false,
+ "Comment": ""
+}
diff --git a/test/testdata/deployednettemplates/recipes/alphanet-extension/net.json b/test/testdata/deployednettemplates/recipes/alphanet-extension/net.json
new file mode 100644
index 0000000000..0e2afae8b6
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/alphanet-extension/net.json
@@ -0,0 +1,504 @@
+{
+ "Hosts": [
+ {
+ "Name": "R1-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay1",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N1-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node1",
+ "Wallets": [
+ {
+ "Name": "Wallet1",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N2-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node2",
+ "Wallets": [
+ {
+ "Name": "Wallet2",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N3-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node3",
+ "Wallets": [
+ {
+ "Name": "Wallet3",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N4-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node4",
+ "Wallets": [
+ {
+ "Name": "Wallet4",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N5-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node5",
+ "Wallets": [
+ {
+ "Name": "Wallet5",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N6-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node6",
+ "Wallets": [
+ {
+ "Name": "Wallet6",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N7-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node7",
+ "Wallets": [
+ {
+ "Name": "Wallet7",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N8-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node8",
+ "Wallets": [
+ {
+ "Name": "Wallet8",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N9-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node9",
+ "Wallets": [
+ {
+ "Name": "Wallet9",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N10-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node10",
+ "Wallets": [
+ {
+ "Name": "Wallet10",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N11-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node11",
+ "Wallets": [
+ {
+ "Name": "Wallet11",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N12-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node12",
+ "Wallets": [
+ {
+ "Name": "Wallet12",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N13-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node13",
+ "Wallets": [
+ {
+ "Name": "Wallet13",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N14-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node14",
+ "Wallets": [
+ {
+ "Name": "Wallet14",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N15-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node15",
+ "Wallets": [
+ {
+ "Name": "Wallet15",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N16-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node16",
+ "Wallets": [
+ {
+ "Name": "Wallet16",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N17-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node17",
+ "Wallets": [
+ {
+ "Name": "Wallet17",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N18-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node18",
+ "Wallets": [
+ {
+ "Name": "Wallet18",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N19-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node19",
+ "Wallets": [
+ {
+ "Name": "Wallet19",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N20-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node20",
+ "Wallets": [
+ {
+ "Name": "Wallet20",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ }
+ ]
+}
diff --git a/test/testdata/deployednettemplates/recipes/alphanet-extension/node.json b/test/testdata/deployednettemplates/recipes/alphanet-extension/node.json
new file mode 100644
index 0000000000..d3b429ee32
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/alphanet-extension/node.json
@@ -0,0 +1,10 @@
+{
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableBlockStats": true,
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+}
diff --git a/test/testdata/deployednettemplates/recipes/alphanet-extension/nonPartNode.json b/test/testdata/deployednettemplates/recipes/alphanet-extension/nonPartNode.json
new file mode 100644
index 0000000000..5b0a52d9d9
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/alphanet-extension/nonPartNode.json
@@ -0,0 +1,5 @@
+{
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+}
diff --git a/test/testdata/deployednettemplates/recipes/alphanet-extension/recipe.json b/test/testdata/deployednettemplates/recipes/alphanet-extension/recipe.json
new file mode 100644
index 0000000000..a2f88f63b4
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/alphanet-extension/recipe.json
@@ -0,0 +1,7 @@
+{
+ "GenesisFile":"genesis.json",
+ "NetworkFile":"net.json",
+ "ConfigFile": "../../configs/reference.json",
+ "HostTemplatesFile": "../../hosttemplates/hosttemplates.json",
+ "TopologyFile": "topology.json"
+}
diff --git a/test/testdata/deployednettemplates/recipes/alphanet-extension/relay.json b/test/testdata/deployednettemplates/recipes/alphanet-extension/relay.json
new file mode 100644
index 0000000000..db8fb939d8
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/alphanet-extension/relay.json
@@ -0,0 +1,11 @@
+{
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableBlockStats": true,
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+}
diff --git a/test/testdata/deployednettemplates/recipes/alphanet-extension/topology.json b/test/testdata/deployednettemplates/recipes/alphanet-extension/topology.json
new file mode 100644
index 0000000000..cbf980c94d
--- /dev/null
+++ b/test/testdata/deployednettemplates/recipes/alphanet-extension/topology.json
@@ -0,0 +1,88 @@
+{
+ "Hosts": [
+ {
+ "Name": "R1-alphanet",
+ "Template": "AWS-US-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N1-alphanet",
+ "Template": "AWS-US-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N2-alphanet",
+ "Template": "AWS-US-WEST-1-c5d.4xl"
+ },
+ {
+ "Name": "N3-alphanet",
+ "Template": "AWS-SA-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N4-alphanet",
+ "Template": "AWS-EU-NORTH-1-c5d.4xl"
+ },
+ {
+ "Name": "N5-alphanet",
+ "Template": "AWS-AP-SOUTHEAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N6-alphanet",
+ "Template": "AWS-US-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N7-alphanet",
+ "Template": "AWS-US-WEST-1-c5d.4xl"
+ },
+ {
+ "Name": "N8-alphanet",
+ "Template": "AWS-SA-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N9-alphanet",
+ "Template": "AWS-EU-NORTH-1-c5d.4xl"
+ },
+ {
+ "Name": "N10-alphanet",
+ "Template": "AWS-AP-SOUTHEAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N11-alphanet",
+ "Template": "AWS-US-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N12-alphanet",
+ "Template": "AWS-US-WEST-1-c5d.4xl"
+ },
+ {
+ "Name": "N13-alphanet",
+ "Template": "AWS-SA-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N14-alphanet",
+ "Template": "AWS-EU-NORTH-1-c5d.4xl"
+ },
+ {
+ "Name": "N15-alphanet",
+ "Template": "AWS-AP-SOUTHEAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N16-alphanet",
+ "Template": "AWS-US-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N17-alphanet",
+ "Template": "AWS-US-WEST-1-c5d.4xl"
+ },
+ {
+ "Name": "N18-alphanet",
+ "Template": "AWS-SA-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N19-alphanet",
+ "Template": "AWS-EU-NORTH-1-c5d.4xl"
+ },
+ {
+ "Name": "N20-alphanet",
+ "Template": "AWS-AP-SOUTHEAST-1-c5d.4xl"
+ }
+ ]
+}
diff --git a/test/testdata/deployednettemplates/recipes/alphanet/Makefile b/test/testdata/deployednettemplates/recipes/alphanet/Makefile
index 13130934de..4cb3c207d0 100644
--- a/test/testdata/deployednettemplates/recipes/alphanet/Makefile
+++ b/test/testdata/deployednettemplates/recipes/alphanet/Makefile
@@ -1,4 +1,4 @@
-PARAMS=-w 8 -R 1 -N 4 -n 8 -H 2 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
+PARAMS=-w 20 -R 5 -N 20 -n 20 -H 10 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json
all: topology.json net.json genesis.json
diff --git a/test/testdata/deployednettemplates/recipes/alphanet/gen_topology.py b/test/testdata/deployednettemplates/recipes/alphanet/gen_topology.py
index 7298256d8a..ae4344210f 100644
--- a/test/testdata/deployednettemplates/recipes/alphanet/gen_topology.py
+++ b/test/testdata/deployednettemplates/recipes/alphanet/gen_topology.py
@@ -1,15 +1,14 @@
import json
import os
-node_types = {"R":1, "N":4, "NPN":2}
-node_size = {"R":"-m5d.4xl", "N":"-m5d.4xl", "NPN":"-m5d.4xl"}
+node_types = {"R":5, "N":20, "NPN":10}
+node_size = {"R":"-c5d.4xl", "N":"-c5d.4xl", "NPN":"-c5d.4xl"}
regions = [
- "AWS-US-EAST-2",
- "AWS-US-WEST-2",
- "AWS-EU-CENTRAL-1",
- "AWS-EU-WEST-2",
- "AWS-AP-SOUTHEAST-1",
- "AWS-AP-SOUTHEAST-2"
+ "AWS-US-EAST-1",
+ "AWS-US-WEST-1",
+ "AWS-SA-EAST-1",
+ "AWS-EU-NORTH-1",
+ "AWS-AP-SOUTHEAST-1"
]
network = "alphanet"
diff --git a/test/testdata/deployednettemplates/recipes/alphanet/genesis.json b/test/testdata/deployednettemplates/recipes/alphanet/genesis.json
index 1d78dd7821..d0c1b7e41c 100644
--- a/test/testdata/deployednettemplates/recipes/alphanet/genesis.json
+++ b/test/testdata/deployednettemplates/recipes/alphanet/genesis.json
@@ -1,59 +1,159 @@
{
- "NetworkName": "alphanet",
+ "NetworkName": "",
"VersionModifier": "",
- "ConsensusProtocol": "alpha1",
+ "ConsensusProtocol": "alpha4",
"FirstPartKeyRound": 0,
- "LastPartKeyRound": 3000000,
+ "LastPartKeyRound": 50000,
"PartKeyDilution": 0,
"Wallets": [
{
"Name": "Wallet1",
- "Stake": 6.25,
+ "Stake": 2.5,
"Online": true
},
{
"Name": "Wallet2",
- "Stake": 6.25,
+ "Stake": 2.5,
"Online": true
},
{
"Name": "Wallet3",
- "Stake": 6.25,
+ "Stake": 2.5,
"Online": true
},
{
"Name": "Wallet4",
- "Stake": 6.25,
+ "Stake": 2.5,
"Online": true
},
{
"Name": "Wallet5",
- "Stake": 6.25,
+ "Stake": 2.5,
"Online": true
},
{
"Name": "Wallet6",
- "Stake": 6.25,
+ "Stake": 2.5,
"Online": true
},
{
"Name": "Wallet7",
- "Stake": 6.25,
+ "Stake": 2.5,
"Online": true
},
{
"Name": "Wallet8",
- "Stake": 6.25,
+ "Stake": 2.5,
"Online": true
},
{
"Name": "Wallet9",
- "Stake": 25,
- "Online": false
+ "Stake": 2.5,
+ "Online": true
},
{
"Name": "Wallet10",
- "Stake": 25,
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet11",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet12",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet13",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet14",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet15",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet16",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet17",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet18",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet19",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet20",
+ "Stake": 2.5,
+ "Online": true
+ },
+ {
+ "Name": "Wallet21",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet22",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet23",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet24",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet25",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet26",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet27",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet28",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet29",
+ "Stake": 5,
+ "Online": false
+ },
+ {
+ "Name": "Wallet30",
+ "Stake": 5,
"Online": false
}
],
diff --git a/test/testdata/deployednettemplates/recipes/alphanet/net.json b/test/testdata/deployednettemplates/recipes/alphanet/net.json
index e75a91d293..0fed024323 100644
--- a/test/testdata/deployednettemplates/recipes/alphanet/net.json
+++ b/test/testdata/deployednettemplates/recipes/alphanet/net.json
@@ -20,6 +20,86 @@
}
]
},
+ {
+ "Name": "R2-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay2",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R3-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay3",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R4-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay4",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "R5-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "relay5",
+ "Wallets": null,
+ "NetAddress": "{{NetworkPort}}",
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}"
+ }
+ ]
+ },
{
"Name": "N1-alphanet",
"Group": "",
@@ -41,15 +121,22 @@
"EnableService": false,
"EnableBlockStats": true,
"ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
- },
+ }
+ ]
+ },
+ {
+ "Name": "N2-alphanet",
+ "Group": "",
+ "Nodes": [
{
- "Name": "node5",
+ "Name": "node2",
"Wallets": [
{
"Name": "Wallet2",
"ParticipationOnly": false
}
],
+ "APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
@@ -62,11 +149,11 @@
]
},
{
- "Name": "N2-alphanet",
+ "Name": "N3-alphanet",
"Group": "",
"Nodes": [
{
- "Name": "node2",
+ "Name": "node3",
"Wallets": [
{
"Name": "Wallet3",
@@ -82,15 +169,22 @@
"EnableService": false,
"EnableBlockStats": true,
"ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
- },
+ }
+ ]
+ },
+ {
+ "Name": "N4-alphanet",
+ "Group": "",
+ "Nodes": [
{
- "Name": "node6",
+ "Name": "node4",
"Wallets": [
{
"Name": "Wallet4",
"ParticipationOnly": false
}
],
+ "APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
@@ -103,11 +197,11 @@
]
},
{
- "Name": "N3-alphanet",
+ "Name": "N5-alphanet",
"Group": "",
"Nodes": [
{
- "Name": "node3",
+ "Name": "node5",
"Wallets": [
{
"Name": "Wallet5",
@@ -123,15 +217,22 @@
"EnableService": false,
"EnableBlockStats": true,
"ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
- },
+ }
+ ]
+ },
+ {
+ "Name": "N6-alphanet",
+ "Group": "",
+ "Nodes": [
{
- "Name": "node7",
+ "Name": "node6",
"Wallets": [
{
"Name": "Wallet6",
"ParticipationOnly": false
}
],
+ "APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
@@ -144,11 +245,11 @@
]
},
{
- "Name": "N4-alphanet",
+ "Name": "N7-alphanet",
"Group": "",
"Nodes": [
{
- "Name": "node4",
+ "Name": "node7",
"Wallets": [
{
"Name": "Wallet7",
@@ -164,7 +265,13 @@
"EnableService": false,
"EnableBlockStats": true,
"ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
- },
+ }
+ ]
+ },
+ {
+ "Name": "N8-alphanet",
+ "Group": "",
+ "Nodes": [
{
"Name": "node8",
"Wallets": [
@@ -173,6 +280,7 @@
"ParticipationOnly": false
}
],
+ "APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
"EnableTelemetry": true,
"TelemetryURI": "{{TelemetryURI}}",
@@ -185,11 +293,11 @@
]
},
{
- "Name": "NPN1-alphanet",
+ "Name": "N9-alphanet",
"Group": "",
"Nodes": [
{
- "Name": "nonParticipatingNode1",
+ "Name": "node9",
"Wallets": [
{
"Name": "Wallet9",
@@ -198,20 +306,22 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
- "EnableTelemetry": false,
- "EnableMetrics": false,
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
"EnableService": false,
- "EnableBlockStats": false,
- "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
}
]
},
{
- "Name": "NPN2-alphanet",
+ "Name": "N10-alphanet",
"Group": "",
"Nodes": [
{
- "Name": "nonParticipatingNode2",
+ "Name": "node10",
"Wallets": [
{
"Name": "Wallet10",
@@ -220,6 +330,468 @@
],
"APIEndpoint": "{{APIEndpoint}}",
"APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N11-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node11",
+ "Wallets": [
+ {
+ "Name": "Wallet11",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N12-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node12",
+ "Wallets": [
+ {
+ "Name": "Wallet12",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N13-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node13",
+ "Wallets": [
+ {
+ "Name": "Wallet13",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N14-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node14",
+ "Wallets": [
+ {
+ "Name": "Wallet14",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N15-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node15",
+ "Wallets": [
+ {
+ "Name": "Wallet15",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N16-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node16",
+ "Wallets": [
+ {
+ "Name": "Wallet16",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N17-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node17",
+ "Wallets": [
+ {
+ "Name": "Wallet17",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N18-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node18",
+ "Wallets": [
+ {
+ "Name": "Wallet18",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N19-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node19",
+ "Wallets": [
+ {
+ "Name": "Wallet19",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "N20-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "node20",
+ "Wallets": [
+ {
+ "Name": "Wallet20",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": true,
+ "TelemetryURI": "{{TelemetryURI}}",
+ "EnableMetrics": true,
+ "MetricsURI": "{{MetricsURI}}",
+ "EnableService": false,
+ "EnableBlockStats": true,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}"
+ }
+ ]
+ },
+ {
+ "Name": "NPN1-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode1",
+ "Wallets": [
+ {
+ "Name": "Wallet21",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN2-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode2",
+ "Wallets": [
+ {
+ "Name": "Wallet22",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN3-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode3",
+ "Wallets": [
+ {
+ "Name": "Wallet23",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN4-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode4",
+ "Wallets": [
+ {
+ "Name": "Wallet24",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN5-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode5",
+ "Wallets": [
+ {
+ "Name": "Wallet25",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN6-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode6",
+ "Wallets": [
+ {
+ "Name": "Wallet26",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN7-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode7",
+ "Wallets": [
+ {
+ "Name": "Wallet27",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN8-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode8",
+ "Wallets": [
+ {
+ "Name": "Wallet28",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN9-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode9",
+ "Wallets": [
+ {
+ "Name": "Wallet29",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
+ "EnableTelemetry": false,
+ "EnableMetrics": false,
+ "EnableService": false,
+ "EnableBlockStats": false,
+ "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }"
+ }
+ ]
+ },
+ {
+ "Name": "NPN10-alphanet",
+ "Group": "",
+ "Nodes": [
+ {
+ "Name": "nonParticipatingNode10",
+ "Wallets": [
+ {
+ "Name": "Wallet30",
+ "ParticipationOnly": false
+ }
+ ],
+ "APIEndpoint": "{{APIEndpoint}}",
+ "APIToken": "{{APIToken}}",
"EnableTelemetry": false,
"EnableMetrics": false,
"EnableService": false,
diff --git a/test/testdata/deployednettemplates/recipes/alphanet/topology.json b/test/testdata/deployednettemplates/recipes/alphanet/topology.json
index 8760eae203..35cb3a0984 100644
--- a/test/testdata/deployednettemplates/recipes/alphanet/topology.json
+++ b/test/testdata/deployednettemplates/recipes/alphanet/topology.json
@@ -2,31 +2,143 @@
"Hosts": [
{
"Name": "R1-alphanet",
- "Template": "AWS-US-EAST-2-m5d.4xl"
+ "Template": "AWS-US-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "R2-alphanet",
+ "Template": "AWS-US-WEST-1-c5d.4xl"
+ },
+ {
+ "Name": "R3-alphanet",
+ "Template": "AWS-SA-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "R4-alphanet",
+ "Template": "AWS-EU-NORTH-1-c5d.4xl"
+ },
+ {
+ "Name": "R5-alphanet",
+ "Template": "AWS-AP-SOUTHEAST-1-c5d.4xl"
},
{
"Name": "N1-alphanet",
- "Template": "AWS-US-EAST-2-m5d.4xl"
+ "Template": "AWS-US-EAST-1-c5d.4xl"
},
{
"Name": "N2-alphanet",
- "Template": "AWS-US-WEST-2-m5d.4xl"
+ "Template": "AWS-US-WEST-1-c5d.4xl"
},
{
"Name": "N3-alphanet",
- "Template": "AWS-EU-CENTRAL-1-m5d.4xl"
+ "Template": "AWS-SA-EAST-1-c5d.4xl"
},
{
"Name": "N4-alphanet",
- "Template": "AWS-EU-WEST-2-m5d.4xl"
+ "Template": "AWS-EU-NORTH-1-c5d.4xl"
+ },
+ {
+ "Name": "N5-alphanet",
+ "Template": "AWS-AP-SOUTHEAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N6-alphanet",
+ "Template": "AWS-US-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N7-alphanet",
+ "Template": "AWS-US-WEST-1-c5d.4xl"
+ },
+ {
+ "Name": "N8-alphanet",
+ "Template": "AWS-SA-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N9-alphanet",
+ "Template": "AWS-EU-NORTH-1-c5d.4xl"
+ },
+ {
+ "Name": "N10-alphanet",
+ "Template": "AWS-AP-SOUTHEAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N11-alphanet",
+ "Template": "AWS-US-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N12-alphanet",
+ "Template": "AWS-US-WEST-1-c5d.4xl"
+ },
+ {
+ "Name": "N13-alphanet",
+ "Template": "AWS-SA-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N14-alphanet",
+ "Template": "AWS-EU-NORTH-1-c5d.4xl"
+ },
+ {
+ "Name": "N15-alphanet",
+ "Template": "AWS-AP-SOUTHEAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N16-alphanet",
+ "Template": "AWS-US-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N17-alphanet",
+ "Template": "AWS-US-WEST-1-c5d.4xl"
+ },
+ {
+ "Name": "N18-alphanet",
+ "Template": "AWS-SA-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "N19-alphanet",
+ "Template": "AWS-EU-NORTH-1-c5d.4xl"
+ },
+ {
+ "Name": "N20-alphanet",
+ "Template": "AWS-AP-SOUTHEAST-1-c5d.4xl"
},
{
"Name": "NPN1-alphanet",
- "Template": "AWS-US-EAST-2-m5d.4xl"
+ "Template": "AWS-US-EAST-1-c5d.4xl"
},
{
"Name": "NPN2-alphanet",
- "Template": "AWS-US-WEST-2-m5d.4xl"
+ "Template": "AWS-US-WEST-1-c5d.4xl"
+ },
+ {
+ "Name": "NPN3-alphanet",
+ "Template": "AWS-SA-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "NPN4-alphanet",
+ "Template": "AWS-EU-NORTH-1-c5d.4xl"
+ },
+ {
+ "Name": "NPN5-alphanet",
+ "Template": "AWS-AP-SOUTHEAST-1-c5d.4xl"
+ },
+ {
+ "Name": "NPN6-alphanet",
+ "Template": "AWS-US-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "NPN7-alphanet",
+ "Template": "AWS-US-WEST-1-c5d.4xl"
+ },
+ {
+ "Name": "NPN8-alphanet",
+ "Template": "AWS-SA-EAST-1-c5d.4xl"
+ },
+ {
+ "Name": "NPN9-alphanet",
+ "Template": "AWS-EU-NORTH-1-c5d.4xl"
+ },
+ {
+ "Name": "NPN10-alphanet",
+ "Template": "AWS-AP-SOUTHEAST-1-c5d.4xl"
}
]
}
diff --git a/tools/debug/algodump/main.go b/tools/debug/algodump/main.go
index e3ebba9223..f61944ff1a 100644
--- a/tools/debug/algodump/main.go
+++ b/tools/debug/algodump/main.go
@@ -99,7 +99,7 @@ func (dh *dumpHandler) Handle(msg network.IncomingMessage) network.OutgoingMessa
data = fmt.Sprintf("proposal %s", shortdigest(crypto.Digest(p.Block.Hash())))
case protocol.TxnTag:
- dec := protocol.NewDecoderBytes(msg.Data)
+ dec := protocol.NewMsgpDecoderBytes(msg.Data)
for {
var stx transactions.SignedTxn
err := dec.Decode(&stx)
diff --git a/tools/debug/doberman/logo.go b/tools/debug/doberman/logo.go
index 163b73951c..a274110742 100644
--- a/tools/debug/doberman/logo.go
+++ b/tools/debug/doberman/logo.go
@@ -16,7 +16,7 @@
package main
-// data, err := ioutil.ReadFile("algorand-logo.png")
+// data, err := os.ReadFile("algorand-logo.png")
// fmt.Printf("%#v\n", data)
var logo = []byte{0x89, 0x50, 0x4e, 0x47, 0xd, 0xa, 0x1a, 0xa, 0x0, 0x0, 0x0, 0xd, 0x49, 0x48, 0x44, 0x52, 0x0, 0x0, 0x0, 0xf0, 0x0, 0x0, 0x0, 0xf0, 0x8, 0x2, 0x0, 0x0, 0x0, 0xb1, 0x37, 0x7e, 0xc5, 0x0, 0x0, 0xf, 0xa1, 0x49, 0x44, 0x41, 0x54, 0x78, 0xda, 0xec, 0x9d, 0x7b, 0x6c, 0x14, 0xd5, 0x17, 0xc7, 0x67, 0xbb, 0xdb, 0xd7, 0x6e, 0x2d, 0x65, 0xfb, 0xb2, 0x80, 0xf, 0x5a, 0xa0, 0x22, 0x18, 0x68, 0x8, 0xa2, 0x88, 0x1a, 0x63, 0x85, 0x50, 0xd2, 0x8a, 0x50, 0x81, 0xaa, 0x4, 0x62, 0xf0, 0x5, 0x46, 0x57, 0x14, 0x9, 0xad, 0x9, 0x1a, 0x6c, 0xa2, 0xc5, 0x12, 0xf8, 0x3, 0xf1, 0x81, 0x5, 0x6b, 0x45, 0x9a, 0xa, 0x85, 0x95, 0x50, 0x8b, 0x3c, 0x82, 0xba, 0x46, 0xd0, 0x3e, 0x10, 0x69, 0x30, 0xb5, 0x58, 0x42, 0x8b, 0x85, 0x6e, 0xb, 0xdd, 0xdd, 0xb6, 0xfb, 0x9c, 0xfd, 0xe5, 0x97, 0x4d, 0xaa, 0x11, 0x28, 0xdd, 0x73, 0x66, 0xf6, 0x31, 0xfd, 0x7e, 0xfe, 0x2, 0xc2, 0x9d, 0x33, 0x7b, 0xe7, 0xb3, 0x67, 0xcf, 0xdc, 0xb9, 0x73, 0xaf, 0xc6, 0xeb, 0xf5, 0xa, 0x0, 0x28, 0x85, 0x8, 0x74, 0x1, 0x80, 0xd0, 0x0, 0x40, 0x68, 0x0, 0x20, 0x34, 0x0, 0x10, 0x1a, 0x40, 0x68, 0x0, 0x20, 0x34, 0x0, 0x10, 0x1a, 0x0, 0x8, 0xd, 0x0, 0x84, 0x6, 0x10, 0x1a, 0x0, 0x8, 0xd, 0x0, 0x84, 0x6, 0x0, 0x42, 0x3, 0x0, 0xa1, 0x1, 0x84, 0x6, 0x0, 0x42, 0x3, 0x0, 0xa1, 0x1, 0x80, 0xd0, 0x0, 0x40, 0x68, 0x0, 0xa1, 0x1, 0x80, 0xd0, 0x0, 0x40, 0x68, 0x0, 0x20, 0x34, 0x0, 0x10, 0x1a, 0x40, 0x68, 0x0, 0x20, 0x34, 0x0, 0x10, 0x1a, 0x0, 0x8, 0xd, 0x0, 0x84, 0x6, 0x10, 0x1a, 0x0, 0x8, 0xd, 0x0, 0x84, 0x6, 0x0, 0x42, 0x3, 0x0, 0xa1, 0x1, 0x84, 0x6, 0x0, 0x42, 0x3, 0x0, 0xa1, 0x1, 0x80, 0xd0, 0x0, 0x42, 0x3, 0x0, 0xa1, 0x1, 0x80, 0xd0, 0x0, 0x40, 0x68, 0x0, 0x20, 0x34, 0x80, 0xd0, 0x0, 0x40, 0x68, 0x0, 0x20, 0x34, 0x0, 0x10, 0x1a, 0x0, 0x8, 0xd, 0x20, 0x34, 0x0, 0x10, 0x1a, 0x0, 0x8, 0xd, 0x0, 0x84, 0x6, 0x0, 0x42, 0x3, 0x8, 0xd, 0x0, 0x84, 0x6, 0x0, 0x42, 0x3, 0x0, 0xa1, 0x1, 0x80, 0xd0, 0x40, 0xf9, 0x68, 0xd0, 0x5, 0x41, 0xa7, 0xa9, 0xa9, 0x69, 0xf7, 0xee, 0xdd, 0x11, 0x11, 0xfe, 0x25, 0x17, 0x9d, 0x4e, 0xb7, 0x66, 0xcd, 0x1a, 0xf4, 0xde, 0x7f, 0x50, 0x79, 0xbd, 0x5e, 0xf4, 0x42, 0x70, 0x99, 0x35, 0x6b, 0x96, 0xc9, 0x64, 0x22, 0x34, 0xb4, 0xdb, 0xed, 0xd1, 0xd1, 0xd1, 0xe8, 0x40, 0x94, 0x1c, 0x21, 0xc4, 0xf1, 0xe3, 0xc7, 0x69, 0x36, 0xb, 0x82, 0x70, 0xfa, 0xf4, 0x69, 0x74, 0x20, 0x84, 0xe, 0x21, 0xdc, 0x6e, 0x77, 0x5e, 0x5e, 0x1e, 0xb9, 0xf9, 0xd7, 0x5f, 0x7f, 0x8d, 0x3e, 0x84, 0xd0, 0x21, 0xc4, 0x87, 0x1f, 0x7e, 0x68, 0xb1, 0x58, 0xc8, 0xcd, 0xcb, 0xca, 0xca, 0xd0, 0x87, 0xa8, 0xa1, 0x43, 0x28, 0x3d, 0x47, 0x46, 0x46, 0x32, 0xf, 0x62, 0xb5, 0x5a, 0xe3, 0xe2, 0xe2, 0xd0, 0x99, 0xc8, 0xd0, 0xc1, 0x67, 0xf3, 0xe6, 0xcd, 0xfc, 0x83, 0xb4, 0xb5, 0xb5, 0xa1, 0x27, 0x21, 0x74, 0xf0, 0x69, 0x68, 0x68, 0x90, 0x64, 0xd0, 0xed, 0xb3, 0xcf, 0x3e, 0x43, 0x67, 0xa2, 0xe4, 0x8, 0x3e, 0x23, 0x47, 0x8e, 0xbc, 0x7a, 0xf5, 0x2a, 0xff, 0x38, 0x71, 0x71, 0x71, 0x16, 0x8b, 0x45, 0xa5, 0x52, 0xa1, 0x4b, 0x91, 0xa1, 0x83, 0xc6, 0xfb, 0xef, 0xbf, 0x2f, 0x89, 0xcd, 0x82, 0x20, 0xd8, 0x6c, 0xb6, 0xfa, 0xfa, 0x7a, 0x74, 0x29, 0x32, 0x74, 0xd0, 0xe8, 0xeb, 0xeb, 0x4b, 0x48, 0x48, 0x70, 0xb9, 0x5c, 0x52, 0x1d, 0x30, 0x37, 0x37, 0xd7, 0x68, 0x34, 0xa2, 0x63, 0x91, 0xa1, 0x83, 0xc3, 0xb, 0x2f, 0xbc, 0x20, 0xa1, 0xcd, 0x82, 0x20, 0xd4, 0xd4, 0xd4, 0xa0, 0x57, 0x91, 0xa1, 0x83, 0x43, 0x6f, 0x6f, 0xaf, 0x1c, 0xa3, 0x6c, 0xb8, 0x88, 0xc8, 0xd0, 0xc1, 0xe1, 0xe5, 0x97, 0x5f, 0x96, 0xe3, 0xb0, 0x3f, 0xfc, 0xf0, 0x3, 0xfa, 0x16, 0x19, 0x3a, 0xd0, 0xb4, 0xb6, 0xb6, 0x8e, 0x1d, 0x3b, 0x56, 0x8e, 0x23, 0xcf, 0x9e, 0x3d, 0xbb, 0xb6, 0xb6, 0x16, 0x3d, 0xc, 0xa1, 0x3, 0x87, 0xcb, 0xe5, 0xba, 0xeb, 0xae, 0xbb, 0xce, 0x9d, 0x3b, 0x27, 0xc7, 0xc1, 0xe3, 0xe3, 0xe3, 0x7b, 0x7a, 0x7a, 0xd0, 0xc9, 0x28, 0x39, 0x2, 0xc7, 0xd2, 0xa5, 0x4b, 0x65, 0xb2, 0x59, 0x10, 0x4, 0x8b, 0xc5, 0x2, 0xa1, 0x21, 0x74, 0xe0, 0xe8, 0xea, 0xea, 0xaa, 0xac, 0xac, 0x94, 0x35, 0x44, 0x45, 0x45, 0x5, 0xfa, 0x19, 0x42, 0x7, 0x88, 0xe5, 0xcb, 0x97, 0xcb, 0x1d, 0xe2, 0xf0, 0xe1, 0xc3, 0xe8, 0x67, 0xd4, 0xd0, 0x81, 0xe0, 0xe2, 0xc5, 0x8b, 0xa3, 0x47, 0x8f, 0x96, 0x3b, 0x4a, 0x52, 0x52, 0x52, 0x67, 0x67, 0x27, 0x7a, 0x1b, 0x19, 0x5a, 0x76, 0x66, 0xcf, 0x9e, 0x1d, 0x80, 0x28, 0x66, 0xb3, 0xb9, 0xa3, 0xa3, 0x3, 0xbd, 0xd, 0xa1, 0xe5, 0xa5, 0xba, 0xba, 0xfa, 0xcc, 0x99, 0x33, 0x81, 0x89, 0xb5, 0x6f, 0xdf, 0x3e, 0x74, 0x38, 0x4a, 0xe, 0x19, 0x71, 0xbb, 0xdd, 0x49, 0x49, 0x49, 0x1, 0x1b, 0x7f, 0xc8, 0xce, 0xce, 0xfe, 0xee, 0xbb, 0xef, 0x90, 0xa1, 0x81, 0x5c, 0x94, 0x96, 0x96, 0x6, 0x72, 0x34, 0xed, 0xd8, 0xb1, 0x63, 0x1e, 0x8f, 0x7, 0x19, 0x1a, 0x19, 0x5a, 0xae, 0xf4, 0x1c, 0x15, 0x15, 0x15, 0xe0, 0xee, 0x6d, 0x6f, 0x6f, 0x1f, 0x35, 0x6a, 0x14, 0x32, 0x34, 0x90, 0x9e, 0x4d, 0x9b, 0x36, 0x91, 0x6d, 0x26, 0x4f, 0xd8, 0x3f, 0x7a, 0xf4, 0x28, 0x4a, 0xe, 0x20, 0x3d, 0x3f, 0xff, 0xfc, 0xf3, 0xda, 0xb5, 0x6b, 0xc9, 0xcd, 0xd, 0x6, 0x43, 0x52, 0x52, 0x12, 0xa1, 0xe1, 0xf6, 0xed, 0xdb, 0x51, 0x72, 0xa0, 0xe4, 0x90, 0x9e, 0x98, 0x98, 0x18, 0x87, 0xc3, 0x41, 0x6b, 0xab, 0xd5, 0x6a, 0x7b, 0x7b, 0x7b, 0x73, 0x73, 0x73, 0xf, 0x1c, 0x38, 0x40, 0x68, 0xee, 0x74, 0x3a, 0xf9, 0x2f, 0x93, 0x23, 0x43, 0x83, 0x7f, 0x78, 0xeb, 0xad, 0xb7, 0xc8, 0x36, 0xfb, 0x6a, 0x15, 0xce, 0xc3, 0xc5, 0x8b, 0x17, 0x2f, 0x22, 0x43, 0x3, 0xc9, 0x70, 0xb9, 0x5c, 0x5a, 0xad, 0xd6, 0xed, 0x76, 0xd3, 0x9a, 0xa7, 0xa4, 0xa4, 0x5c, 0xba, 0x74, 0xc9, 0xeb, 0xf5, 0x8a, 0xa2, 0x18, 0x15, 0x15, 0x25, 0x8a, 0xa2, 0xbf, 0x47, 0x28, 0x2d, 0x2d, 0x5d, 0xbd, 0x7a, 0xf5, 0xb0, 0xed, 0x7f, 0xac, 0x3e, 0x2a, 0x31, 0x45, 0x45, 0x45, 0x64, 0x9b, 0x5, 0x41, 0x28, 0x29, 0x29, 0xf1, 0xdd, 0x14, 0xaa, 0xd5, 0x6a, 0x9d, 0x4e, 0x67, 0xb5, 0x5a, 0xfd, 0x3d, 0xc2, 0xd6, 0xad, 0x5b, 0xf5, 0x7a, 0xbd, 0xbf, 0xad, 0xbc, 0x5e, 0x6f, 0x6a, 0x6a, 0x6a, 0x4e, 0x4e, 0xe, 0x32, 0x34, 0xf8, 0x7, 0x9b, 0xcd, 0x16, 0x1f, 0x1f, 0x4f, 0xee, 0x52, 0xbd, 0x5e, 0xdf, 0xd5, 0xd5, 0x35, 0xf0, 0xd7, 0xec, 0xec, 0xec, 0x23, 0x47, 0x8e, 0x4, 0xec, 0xe4, 0xcd, 0x66, 0xb3, 0x5e, 0xaf, 0xf, 0xf7, 0x15, 0x11, 0x50, 0x43, 0x4b, 0xc9, 0xd2, 0xa5, 0x4b, 0x39, 0x9, 0xa2, 0xae, 0xae, 0x6e, 0xa0, 0xb9, 0xd7, 0xeb, 0xd, 0xe4, 0xf2, 0xcf, 0x35, 0x35, 0x35, 0x89, 0x89, 0x89, 0x4a, 0x58, 0xdf, 0xc3, 0xb, 0x24, 0xa2, 0xb1, 0xb1, 0x91, 0x73, 0x21, 0xbe, 0xf8, 0xe2, 0x8b, 0x6b, 0x8f, 0x19, 0x98, 0xe5, 0x9f, 0xc7, 0x8f, 0x1f, 0x2f, 0x8a, 0xa2, 0x32, 0xae, 0x2, 0x32, 0xb4, 0x34, 0x88, 0xa2, 0xf8, 0xf8, 0xe3, 0x8f, 0x93, 0x9b, 0xa7, 0xa5, 0xa5, 0x3d, 0xfd, 0xf4, 0xd3, 0xd7, 0xfe, 0xfb, 0x13, 0x4f, 0x3c, 0x11, 0x80, 0x93, 0xdf, 0xbf, 0x7f, 0xbf, 0x62, 0xd6, 0x5e, 0x82, 0xd0, 0xd2, 0x70, 0xe0, 0xc0, 0x81, 0xf3, 0xe7, 0xcf, 0x93, 0x9b, 0x1b, 0x8d, 0xc6, 0xeb, 0x2a, 0x55, 0x58, 0x58, 0x28, 0xf7, 0x99, 0xe7, 0xe7, 0xe7, 0x4f, 0x9c, 0x38, 0x51, 0x31, 0x17, 0x2, 0x37, 0x85, 0xd2, 0xa4, 0x67, 0xbd, 0x5e, 0x4f, 0x9e, 0x87, 0x34, 0x66, 0xcc, 0x98, 0xb, 0x17, 0x2e, 0xdc, 0xa8, 0x20, 0xf4, 0x77, 0xef, 0x15, 0x7f, 0x69, 0x6a, 0x6a, 0x52, 0x92, 0xd0, 0xc8, 0xd0, 0x12, 0x30, 0x77, 0xee, 0x5c, 0xce, 0xac, 0xba, 0x41, 0x16, 0xe2, 0x57, 0xa9, 0x54, 0xb2, 0x96, 0xd1, 0xf3, 0xe6, 0xcd, 0x53, 0x92, 0xcd, 0xb8, 0x29, 0x94, 0x80, 0xd6, 0xd6, 0x56, 0x4e, 0xff, 0x3f, 0xf2, 0xc8, 0x23, 0x83, 0x1f, 0x7f, 0xe5, 0xca, 0x95, 0x32, 0x5d, 0xfa, 0xc8, 0xc8, 0x48, 0xb7, 0xdb, 0xad, 0x98, 0xdb, 0x41, 0xdc, 0x14, 0x4a, 0xc3, 0x83, 0xf, 0x3e, 0x48, 0x6e, 0xab, 0x56, 0xab, 0x8d, 0x46, 0xe3, 0xe0, 0x55, 0xdf, 0x4b, 0x2f, 0xbd, 0x24, 0xd3, 0x99, 0xaf, 0x5f, 0xbf, 0x5e, 0xad, 0x56, 0x2b, 0x6c, 0x29, 0x5e, 0x3c, 0x29, 0x64, 0xb1, 0x73, 0xe7, 0xce, 0x1b, 0x95, 0xbf, 0x43, 0xe1, 0xd5, 0x57, 0x5f, 0xbd, 0xe9, 0x52, 0x77, 0xe3, 0xc6, 0x8d, 0x93, 0xe3, 0xcc, 0xb5, 0x5a, 0x6d, 0x51, 0x51, 0x91, 0xf2, 0xae, 0x8, 0x6e, 0xa, 0x59, 0xdc, 0x72, 0xcb, 0x2d, 0x36, 0x9b, 0x8d, 0x98, 0x4b, 0x34, 0x1a, 0x97, 0xcb, 0xe5, 0xf5, 0x7a, 0x6f, 0x9a, 0x23, 0x27, 0x4c, 0x98, 0xd0, 0xdc, 0xdc, 0x2c, 0xed, 0x99, 0xd7, 0xd5, 0xd5, 0x65, 0x65, 0x65, 0x29, 0x6f, 0xa5, 0x74, 0x94, 0x1c, 0x74, 0x8a, 0x8a, 0x8a, 0xc8, 0x36, 0xfb, 0x86, 0xea, 0x86, 0x38, 0x97, 0xff, 0xb9, 0xe7, 0x9e, 0x93, 0xf6, 0xcc, 0x8b, 0x8b, 0x8b, 0x15, 0x69, 0x33, 0x32, 0x34, 0x1d, 0x97, 0xcb, 0xa5, 0xd3, 0xe9, 0xc8, 0x2b, 0x3d, 0x4f, 0x9f, 0x3e, 0xfd, 0xe4, 0xc9, 0x93, 0x43, 0xfc, 0xcf, 0x27, 0x4e, 0x9c, 0xb8, 0xef, 0xbe, 0xfb, 0x24, 0x3c, 0x79, 0x9b, 0xcd, 0xa6, 0xd3, 0xe9, 0x14, 0x79, 0x5d, 0x90, 0xa1, 0xe9, 0x77, 0x54, 0x9c, 0x75, 0xcb, 0xfd, 0x5a, 0x2c, 0x74, 0xca, 0x94, 0x29, 0x12, 0x9e, 0xf9, 0x7, 0x1f, 0x7c, 0xa0, 0x54, 0x9b, 0x5, 0xc, 0xdb, 0xd1, 0xb8, 0x74, 0xe9, 0x12, 0xa7, 0xcf, 0xb, 0xb, 0xb, 0xfd, 0x8d, 0x38, 0x75, 0xea, 0x54, 0x49, 0x2e, 0x77, 0x62, 0x62, 0xa2, 0xc7, 0xe3, 0x51, 0xd8, 0x50, 0x1d, 0x86, 0xed, 0xb8, 0x29, 0x20, 0x37, 0x37, 0x97, 0xdc, 0x5c, 0xa7, 0xd3, 0xbd, 0xf3, 0xce, 0x3b, 0xfe, 0x56, 0x7a, 0xb, 0x17, 0x2e, 0x94, 0xe4, 0xe4, 0xcb, 0xcb, 0xcb, 0x23, 0x22, 0x22, 0x14, 0xbc, 0x6b, 0x16, 0x84, 0xf6, 0x9b, 0x37, 0xdf, 0x7c, 0x73, 0xe8, 0xe5, 0xef, 0xb5, 0x6c, 0xdf, 0xbe, 0x5d, 0xa3, 0xd1, 0xf8, 0xa5, 0x94, 0xd7, 0xeb, 0xcd, 0xcf, 0xcf, 0xe7, 0x9f, 0xf9, 0x94, 0x29, 0x53, 0x72, 0x72, 0x72, 0x94, 0x7d, 0xd7, 0x84, 0x9b, 0x42, 0xff, 0xbb, 0x8c, 0x91, 0xde, 0xc6, 0x8e, 0x1d, 0x4b, 0x5e, 0x25, 0x5a, 0xab, 0xd5, 0xf6, 0xf7, 0xf7, 0x73, 0xce, 0xbc, 0xad, 0xad, 0x2d, 0x0, 0xcb, 0x46, 0x22, 0x43, 0x87, 0x13, 0xcc, 0x4d, 0x52, 0x36, 0x6c, 0xd8, 0x40, 0x6e, 0x9b, 0x91, 0x91, 0xc1, 0x9, 0x9d, 0x99, 0x99, 0xa9, 0x78, 0x9b, 0x91, 0xa1, 0xfd, 0xa3, 0xa7, 0xa7, 0x47, 0xaf, 0xd7, 0x13, 0x5e, 0x5c, 0xe5, 0xa7, 0x67, 0xdf, 0x48, 0xdf, 0xaf, 0xbf, 0xfe, 0x4a, 0x6e, 0x7e, 0xe5, 0xca, 0x95, 0x11, 0x23, 0x46, 0x28, 0x7e, 0xcf, 0x59, 0x64, 0x68, 0x3f, 0xc8, 0xcd, 0xcd, 0x25, 0xdb, 0xac, 0xd1, 0x68, 0x8e, 0x1d, 0x3b, 0x46, 0x4e, 0x1f, 0x7, 0xf, 0x1e, 0xe4, 0xd8, 0xbc, 0x7c, 0xf9, 0xf2, 0x84, 0x84, 0x84, 0xe1, 0xb0, 0x83, 0x32, 0x32, 0xf4, 0x50, 0xf9, 0xf1, 0xc7, 0x1f, 0x39, 0xf3, 0x90, 0xf6, 0xec, 0xd9, 0xb3, 0x60, 0xc1, 0x2, 0x5a, 0x5b, 0x51, 0x14, 0x93, 0x93, 0x93, 0xbb, 0xbb, 0xbb, 0x69, 0xcd, 0x93, 0x93, 0x93, 0x3b, 0x3a, 0x3a, 0xe4, 0x9e, 0x57, 0xd, 0xa1, 0xc3, 0x8c, 0x31, 0x63, 0xc6, 0xb4, 0xb7, 0xb7, 0xd3, 0xda, 0xe, 0xac, 0xb6, 0x41, 0xcb, 0x91, 0x5b, 0xb6, 0x6c, 0x31, 0x18, 0xc, 0x9c, 0x93, 0xb7, 0xdb, 0xed, 0x81, 0x79, 0x3d, 0x11, 0x25, 0x47, 0x78, 0x50, 0x59, 0x59, 0x49, 0xb6, 0x59, 0x10, 0x4, 0x93, 0xc9, 0x44, 0x1e, 0x1e, 0xe9, 0xeb, 0xeb, 0x63, 0xda, 0xec, 0xab, 0x58, 0x86, 0xc9, 0x95, 0x82, 0xd0, 0x43, 0xfa, 0xc5, 0xe7, 0x4c, 0x4a, 0xbe, 0xfd, 0xf6, 0xdb, 0x39, 0x3, 0x14, 0x65, 0x65, 0x65, 0xfc, 0x8f, 0xf0, 0xd1, 0x47, 0x1f, 0xd, 0x93, 0x8b, 0x85, 0x92, 0xe3, 0xe6, 0x94, 0x97, 0x97, 0x2f, 0x5b, 0xb6, 0x8c, 0xdc, 0x9c, 0xf3, 0xd2, 0x5e, 0x7f, 0x7f, 0xbf, 0x56, 0xab, 0x95, 0x20, 0x6f, 0x45, 0x44, 0xc, 0x93, 0xb5, 0xd0, 0x91, 0xa1, 0x6f, 0x82, 0xcb, 0xe5, 0xe2, 0xd8, 0xbc, 0x70, 0xe1, 0x42, 0xce, 0x4b, 0x7b, 0x9c, 0x67, 0xec, 0xff, 0xf9, 0x91, 0x61, 0x3e, 0x94, 0x81, 0xd0, 0xa, 0xa1, 0xa0, 0xa0, 0x80, 0xdc, 0x56, 0xa3, 0xd1, 0xec, 0xd8, 0xb1, 0x83, 0xfc, 0x1b, 0xb8, 0x61, 0xc3, 0x6, 0x9, 0x97, 0x2, 0xdb, 0xb1, 0x63, 0xc7, 0xb0, 0xb8, 0x60, 0x98, 0x3a, 0x37, 0x8, 0x9f, 0x7c, 0xf2, 0x9, 0xa7, 0x6f, 0x8b, 0x8a, 0x8a, 0xc8, 0xa1, 0x45, 0x51, 0x94, 0x76, 0x99, 0xe7, 0x49, 0x93, 0x26, 0xd, 0x87, 0x4b, 0x86, 0x1a, 0x7a, 0x30, 0x62, 0x63, 0x63, 0xed, 0x76, 0x3b, 0xad, 0x6d, 0x5c, 0x5c, 0x1c, 0x61, 0xed, 0xd0, 0x1, 0xf2, 0xf3, 0xf3, 0xf7, 0xec, 0xd9, 0x23, 0xe1, 0x67, 0x89, 0x89, 0x89, 0x19, 0xe, 0x55, 0x7, 0x4a, 0x8e, 0x1b, 0xb2, 0x75, 0xeb, 0x56, 0xb2, 0xcd, 0xbe, 0x17, 0x60, 0xc9, 0x6d, 0xfb, 0xfb, 0xfb, 0x25, 0xdf, 0x74, 0xd0, 0x6e, 0xb7, 0xff, 0x7b, 0x69, 0x53, 0x94, 0x1c, 0xc3, 0xb, 0x87, 0xc3, 0x11, 0x13, 0x13, 0x43, 0xee, 0xd5, 0x59, 0xb3, 0x66, 0x71, 0xa2, 0x2f, 0x5e, 0xbc, 0x58, 0x8e, 0x6b, 0xbd, 0x71, 0xe3, 0x46, 0xc5, 0x5f, 0x38, 0x8, 0x7d, 0x7d, 0x56, 0xac, 0x58, 0xc1, 0x51, 0xc7, 0xe9, 0x74, 0x92, 0xdf, 0xa, 0x91, 0x6f, 0xe7, 0xd9, 0xe9, 0xd3, 0xa7, 0x2b, 0xfe, 0xc2, 0x61, 0x5d, 0x8e, 0xeb, 0xd0, 0xda, 0xda, 0xca, 0xd9, 0x4e, 0xaa, 0xa4, 0xa4, 0x84, 0x7c, 0x3f, 0xe7, 0xf5, 0x7a, 0xf3, 0xf2, 0xf2, 0x64, 0xfa, 0x5c, 0xa7, 0x4f, 0x9f, 0x56, 0xfc, 0xb5, 0xc3, 0x4d, 0xe1, 0x75, 0x98, 0x36, 0x6d, 0x5a, 0x7d, 0x7d, 0x3d, 0xad, 0x6d, 0x54, 0x54, 0x54, 0x6f, 0x6f, 0xaf, 0x46, 0x43, 0xcc, 0x14, 0x87, 0xe, 0x1d, 0x9a, 0x33, 0x67, 0x8e, 0x7c, 0x1f, 0xad, 0xb1, 0xb1, 0x51, 0xda, 0x57, 0x6e, 0x71, 0x53, 0x18, 0xea, 0x5c, 0xbe, 0x7c, 0x99, 0x6c, 0xb3, 0x20, 0x8, 0xef, 0xbd, 0xf7, 0x1e, 0xd9, 0x66, 0x41, 0x10, 0xae, 0xbb, 0x4a, 0xb4, 0x84, 0x7c, 0xff, 0xfd, 0xf7, 0x18, 0xe5, 0x18, 0x5e, 0x3c, 0xf4, 0xd0, 0x43, 0xe4, 0xb6, 0xf1, 0xf1, 0xf1, 0x6, 0x83, 0x81, 0xfc, 0xa3, 0x57, 0x53, 0x53, 0x63, 0x36, 0x9b, 0x65, 0xfd, 0x74, 0x83, 0xac, 0x74, 0x8a, 0x92, 0x43, 0x81, 0x30, 0x27, 0x3d, 0x1b, 0x8d, 0x46, 0xf2, 0xc3, 0xea, 0xf6, 0xf6, 0xf6, 0xdb, 0x6e, 0xbb, 0x4d, 0xee, 0xcb, 0xa1, 0x52, 0xa9, 0x9c, 0x4e, 0x27, 0xe7, 0x37, 0x4, 0x42, 0x87, 0xd, 0xdd, 0xdd, 0xdd, 0x69, 0x69, 0x69, 0x4e, 0xa7, 0x93, 0xd6, 0x3c, 0x2b, 0x2b, 0x8b, 0x53, 0xab, 0xa4, 0xa7, 0xa7, 0xff, 0xf5, 0xd7, 0x5f, 0x1, 0xf8, 0x98, 0xbf, 0xfd, 0xf6, 0xdb, 0x3d, 0xf7, 0xdc, 0x83, 0x92, 0x63, 0x58, 0x14, 0x1b, 0x64, 0x9b, 0x55, 0x2a, 0xd5, 0x4d, 0x17, 0xc6, 0x1d, 0x84, 0xdd, 0xbb, 0x77, 0x7, 0xc6, 0x66, 0x41, 0x10, 0xbe, 0xf9, 0xe6, 0x1b, 0x25, 0x5f, 0x45, 0xc, 0x39, 0xfb, 0x38, 0x7c, 0xf8, 0x30, 0xa7, 0x1b, 0xe7, 0xcf, 0x9f, 0xcf, 0x89, 0x3e, 0x62, 0xc4, 0x88, 0x80, 0x5d, 0xf1, 0x3b, 0xee, 0xb8, 0x3, 0x73, 0x39, 0x94, 0xf, 0x67, 0xa8, 0x4e, 0xad, 0x56, 0x77, 0x75, 0x75, 0x91, 0xa5, 0xac, 0xaa, 0xaa, 0x5a, 0xb4, 0x68, 0x51, 0x20, 0x3f, 0xac, 0xc3, 0xe1, 0x88, 0x8a, 0x8a, 0x42, 0xc9, 0xa1, 0x58, 0xe, 0x1d, 0x3a, 0xc4, 0x29, 0x7f, 0x6b, 0x6b, 0x6b, 0xc9, 0x36, 0x7b, 0x3c, 0x1e, 0xce, 0x7c, 0x6b, 0x1a, 0x7f, 0xff, 0xfd, 0x37, 0x6a, 0x68, 0xc5, 0x22, 0x8a, 0x22, 0x67, 0xf4, 0x37, 0x3d, 0x3d, 0xfd, 0xd1, 0x47, 0x1f, 0x25, 0x37, 0x7f, 0xf7, 0xdd, 0x77, 0xc9, 0x93, 0xe0, 0xc8, 0xab, 0xd4, 0x55, 0x54, 0x54, 0xa0, 0x86, 0x56, 0x2c, 0x9b, 0x36, 0x6d, 0xe2, 0x74, 0x60, 0x5b, 0x5b, 0x1b, 0x39, 0x34, 0x73, 0xd4, 0x79, 0xdd, 0xba, 0x75, 0x69, 0x69, 0x69, 0x84, 0x86, 0x7a, 0xbd, 0x1e, 0x93, 0x93, 0x94, 0x89, 0xc5, 0x62, 0xe1, 0x28, 0xb5, 0x62, 0xc5, 0xa, 0x4e, 0x74, 0xce, 0x98, 0x77, 0x4c, 0x4c, 0x8c, 0xc3, 0xe1, 0x20, 0x4f, 0xa2, 0x3a, 0x7b, 0xf6, 0x2c, 0x84, 0x56, 0x20, 0xf3, 0xe7, 0xcf, 0xe7, 0x8, 0x6d, 0xb5, 0x5a, 0xc9, 0xb3, 0xea, 0xfe, 0xf8, 0xe3, 0xf, 0x4e, 0xe8, 0xb2, 0xb2, 0x32, 0xaf, 0xd7, 0xdb, 0xd2, 0xd2, 0x42, 0x6b, 0xbe, 0x6c, 0xd9, 0x32, 0x8, 0xad, 0x34, 0xdc, 0x6e, 0x37, 0x47, 0x29, 0xa6, 0x13, 0x9c, 0x49, 0x42, 0xb1, 0xb1, 0xb1, 0x3, 0xc7, 0xa1, 0xcd, 0xdb, 0x4e, 0x4d, 0x4d, 0x55, 0xe4, 0x35, 0x1d, 0xd6, 0x37, 0x85, 0x9c, 0x79, 0x6d, 0x91, 0x91, 0x91, 0x9f, 0x7e, 0xfa, 0x29, 0x79, 0xd0, 0x73, 0xed, 0xda, 0xb5, 0xa7, 0x4e, 0x9d, 0x22, 0x47, 0xdf, 0xb2, 0x65, 0xcb, 0xc0, 0x9f, 0x9f, 0x7c, 0xf2, 0x49, 0xc2, 0x11, 0x98, 0x9b, 0x10, 0xe0, 0xa6, 0x30, 0xe4, 0xd8, 0xb8, 0x71, 0x23, 0xa7, 0xdf, 0x4a, 0x4a, 0x4a, 0xc8, 0xa1, 0x6d, 0x36, 0x1b, 0x67, 0xa5, 0x39, 0xdf, 0x94, 0xf, 0x1f, 0xa2, 0x28, 0x92, 0x4b, 0x97, 0xba, 0xba, 0x3a, 0x94, 0x1c, 0xa, 0xc1, 0xe1, 0x70, 0x70, 0x26, 0xe8, 0xa4, 0xa4, 0xa4, 0x70, 0xa2, 0x73, 0x86, 0xf9, 0x4, 0x41, 0x68, 0x6e, 0x6e, 0xfe, 0x4f, 0xe1, 0x4e, 0x3b, 0xce, 0x82, 0x5, 0xb, 0x20, 0x34, 0xee, 0x5, 0xff, 0x4f, 0x7d, 0x7d, 0x3d, 0xf9, 0x5e, 0xf0, 0xc4, 0x89, 0x13, 0x9c, 0xd0, 0x8f, 0x3d, 0xf6, 0xd8, 0xb5, 0xc7, 0xbc, 0xf3, 0xce, 0x3b, 0x31, 0x78, 0x37, 0x7c, 0x85, 0x66, 0xe, 0x2f, 0x94, 0x96, 0x96, 0x72, 0xa2, 0xd3, 0xe4, 0xf3, 0x91, 0x90, 0x90, 0xe0, 0x70, 0x38, 0xae, 0xfd, 0x2e, 0xd1, 0x96, 0xae, 0x53, 0xa9, 0x54, 0x76, 0xbb, 0x1d, 0x42, 0x87, 0x3d, 0x4b, 0x96, 0x2c, 0xe1, 0x8, 0xed, 0x76, 0xbb, 0xc9, 0xa1, 0x1b, 0x1b, 0x1b, 0x39, 0xa1, 0xab, 0xab, 0xab, 0xa5, 0xfd, 0x8a, 0xfa, 0xd6, 0x60, 0x87, 0xd0, 0x61, 0xcc, 0xd9, 0xb3, 0x67, 0x39, 0x4a, 0x7d, 0xfc, 0xf1, 0xc7, 0xe4, 0xd0, 0x1e, 0x8f, 0x87, 0xf6, 0x60, 0x6f, 0x60, 0xa0, 0xed, 0x46, 0x75, 0xe, 0x79, 0xfd, 0x90, 0x82, 0x82, 0x2, 0x8, 0x1d, 0xc6, 0x88, 0xa2, 0xc8, 0x59, 0x3a, 0x31, 0x21, 0x21, 0xc1, 0xe3, 0xf1, 0x90, 0xa3, 0x57, 0x56, 0x56, 0x72, 0xbe, 0x4b, 0xa7, 0x4e, 0x9d, 0x1a, 0xe4, 0xe0, 0x93, 0x27, 0x4f, 0x26, 0x1c, 0x73, 0xd4, 0xa8, 0x51, 0x10, 0x3a, 0x8c, 0xa9, 0xa9, 0xa9, 0xe1, 0x28, 0x65, 0x32, 0x99, 0xc8, 0xa1, 0xc9, 0x8f, 0xf4, 0x7c, 0xe4, 0xe6, 0xe6, 0xe, 0x7e, 0xfc, 0x6d, 0xdb, 0xb6, 0xd1, 0x8e, 0xdc, 0xd5, 0xd5, 0x5, 0xa1, 0xc3, 0x35, 0x3d, 0x73, 0x76, 0xcd, 0x79, 0xf8, 0xe1, 0x87, 0x7d, 0x7, 0xa1, 0x45, 0x4f, 0x49, 0x49, 0x21, 0x87, 0x56, 0xab, 0xd5, 0xdd, 0xdd, 0xdd, 0x83, 0x84, 0x16, 0x45, 0xf1, 0xcf, 0x3f, 0xff, 0xa4, 0x1d, 0xbc, 0xbc, 0xbc, 0x5c, 0x49, 0x57, 0x79, 0x18, 0x3d, 0x29, 0x7c, 0xfd, 0xf5, 0xd7, 0x39, 0x6f, 0x33, 0x7c, 0xf5, 0xd5, 0x57, 0xe4, 0x6d, 0x25, 0x2a, 0x2a, 0x2a, 0x2e, 0x5f, 0xbe, 0x4c, 0xe, 0x6d, 0x30, 0x18, 0x46, 0x8e, 0x1c, 0x39, 0x48, 0x68, 0x95, 0x4a, 0x95, 0x9e, 0x9e, 0x4e, 0xdb, 0x45, 0x65, 0xef, 0xde, 0xbd, 0x78, 0x52, 0x18, 0x7e, 0x98, 0xcd, 0x66, 0xce, 0xc3, 0xb9, 0x19, 0x33, 0x66, 0x70, 0xa2, 0xa7, 0xa6, 0xa6, 0x72, 0xd2, 0xb3, 0xd3, 0xe9, 0x1c, 0x4a, 0x94, 0x7b, 0xef, 0xbd, 0x97, 0x70, 0xfc, 0xe4, 0xe4, 0x64, 0x64, 0xe8, 0xf0, 0x63, 0xda, 0xb4, 0x69, 0xe4, 0x2d, 0x6, 0x55, 0x2a, 0x55, 0x6d, 0x6d, 0x2d, 0x39, 0xbb, 0x97, 0x95, 0x95, 0x71, 0x26, 0x4e, 0x18, 0xc, 0x86, 0x21, 0x2e, 0x2c, 0x46, 0xdb, 0xf, 0xbc, 0xb3, 0xb3, 0x93, 0xf3, 0xeb, 0x81, 0xc, 0x1d, 0x4, 0x98, 0xbf, 0xaa, 0xcf, 0x3e, 0xfb, 0x2c, 0x39, 0xb4, 0xdb, 0xed, 0xe6, 0x6c, 0x92, 0x12, 0x13, 0x13, 0x33, 0xf4, 0x3b, 0x4, 0x72, 0x19, 0xbd, 0x77, 0xef, 0x5e, 0xdc, 0x14, 0x86, 0x13, 0x9, 0x9, 0x9, 0x9c, 0x59, 0x75, 0x9c, 0xd0, 0xab, 0x56, 0xad, 0xe2, 0x7c, 0x97, 0x8e, 0x1f, 0x3f, 0xee, 0xd7, 0x6d, 0x68, 0x7c, 0x7c, 0x3c, 0x21, 0xca, 0xbc, 0x79, 0xf3, 0x20, 0x74, 0xd8, 0x50, 0x58, 0x58, 0xc8, 0x51, 0x6a, 0xdf, 0xbe, 0x7d, 0xe4, 0x91, 0x8d, 0xb, 0x17, 0x2e, 0x70, 0x42, 0xbf, 0xfd, 0xf6, 0xdb, 0xfe, 0x46, 0x24, 0x4f, 0x7b, 0x72, 0xb9, 0x5c, 0x10, 0x3a, 0xc, 0xe8, 0xec, 0xec, 0xe4, 0x28, 0xb5, 0x6a, 0xd5, 0x2a, 0x4e, 0xf4, 0x99, 0x33, 0x67, 0x92, 0x43, 0x6b, 0x34, 0x9a, 0x21, 0xde, 0xb, 0xfe, 0x1b, 0xf2, 0x2a, 0xc0, 0x1d, 0x1d, 0x1d, 0x10, 0x3a, 0xc, 0x28, 0x2e, 0x2e, 0xe6, 0x8, 0xdd, 0xd9, 0xd9, 0x19, 0xac, 0xef, 0x52, 0x71, 0x71, 0x31, 0x21, 0x28, 0x79, 0x7d, 0x82, 0x6f, 0xbf, 0xfd, 0x16, 0x42, 0x87, 0x3a, 0x57, 0xae, 0x5c, 0xe1, 0x28, 0xe5, 0x5b, 0x47, 0x94, 0x86, 0xc7, 0xe3, 0xe1, 0xbc, 0x61, 0xa5, 0xd5, 0x6a, 0x3d, 0x1e, 0xf, 0xad, 0xd4, 0xd1, 0xe9, 0x74, 0x84, 0x88, 0xf, 0x3c, 0xf0, 0x0, 0x84, 0xe, 0x75, 0x38, 0xf3, 0xe8, 0x23, 0x23, 0x23, 0xed, 0x76, 0x3b, 0xb9, 0x7a, 0x5e, 0xbd, 0x7a, 0x35, 0xe7, 0xbb, 0x54, 0x55, 0x55, 0x45, 0xfe, 0xd4, 0x33, 0x66, 0xcc, 0xa0, 0x5, 0xed, 0xeb, 0xeb, 0x83, 0xd0, 0xa1, 0x4b, 0x53, 0x53, 0x13, 0x47, 0x29, 0xdf, 0x3b, 0xd5, 0x34, 0x7a, 0x7a, 0x7a, 0x38, 0xa1, 0x27, 0x4f, 0x9e, 0x4c, 0xe, 0x2d, 0x8a, 0x62, 0x55, 0x55, 0x15, 0x2d, 0xee, 0xce, 0x9d, 0x3b, 0x21, 0x74, 0x88, 0xe2, 0x74, 0x3a, 0x39, 0x43, 0x75, 0x19, 0x19, 0x19, 0x9c, 0x69, 0x1b, 0x59, 0x59, 0x59, 0xe4, 0xd0, 0x2a, 0x95, 0xaa, 0xa5, 0xa5, 0x85, 0x1c, 0xda, 0x7, 0xed, 0x99, 0xe8, 0xb8, 0x71, 0xe3, 0x14, 0x70, 0xe9, 0x95, 0xf9, 0xa4, 0x30, 0x3b, 0x3b, 0xfb, 0xea, 0xd5, 0xab, 0xe4, 0xe6, 0xbe, 0x1d, 0x2f, 0x69, 0xd3, 0x36, 0x4c, 0x26, 0x53, 0x43, 0x43, 0x3, 0x39, 0xf4, 0xdc, 0xb9, 0x73, 0xd3, 0xd3, 0xd3, 0x39, 0x93, 0xa8, 0x4, 0x41, 0xa0, 0xd5, 0x5a, 0xe4, 0xe7, 0x32, 0x78, 0x52, 0x28, 0x7b, 0x7a, 0x56, 0xab, 0xd5, 0xe4, 0xe, 0x61, 0x2e, 0x58, 0x41, 0x2e, 0x61, 0x7d, 0x98, 0xcd, 0x66, 0x7e, 0xf, 0x90, 0xb7, 0xa0, 0xb5, 0x58, 0x2c, 0xc8, 0xd0, 0x21, 0x47, 0x4e, 0x4e, 0x8e, 0xc7, 0xe3, 0x21, 0x37, 0xf7, 0x4d, 0x7a, 0x26, 0x3f, 0xd8, 0xe3, 0xbc, 0x3, 0x9b, 0x97, 0x97, 0x97, 0x98, 0x98, 0xc8, 0xef, 0x1, 0xf2, 0xb6, 0x18, 0xcc, 0xbd, 0xcd, 0x91, 0xa1, 0xa5, 0xa7, 0xae, 0xae, 0x8e, 0xd3, 0x1b, 0xbe, 0x49, 0xcf, 0xe4, 0x1b, 0xb2, 0x5b, 0x6f, 0xbd, 0x95, 0x1c, 0x7a, 0xf4, 0xe8, 0xd1, 0x12, 0xf6, 0x3, 0xad, 0x68, 0xc9, 0xcc, 0xcc, 0xc4, 0x4d, 0x61, 0x68, 0x91, 0x91, 0x91, 0x41, 0x56, 0x2a, 0x29, 0x29, 0x89, 0xb3, 0x3, 0x2c, 0xf9, 0x9d, 0x11, 0x1f, 0xad, 0xad, 0xad, 0x12, 0xf6, 0xc3, 0x53, 0x4f, 0x3d, 0x45, 0x38, 0x87, 0xe8, 0xe8, 0x68, 0x8, 0x1d, 0x42, 0xec, 0xda, 0xb5, 0x8b, 0xa3, 0xd4, 0x99, 0x33, 0x67, 0xc8, 0xa1, 0x6d, 0x36, 0x1b, 0x27, 0xf4, 0x33, 0xcf, 0x3c, 0x23, 0x6d, 0x57, 0x9c, 0x3c, 0x79, 0x92, 0x76, 0x26, 0x83, 0xbf, 0xb9, 0x8, 0xa1, 0x3, 0x7, 0xf9, 0xcd, 0x67, 0x1f, 0x73, 0xe6, 0xcc, 0x9, 0x7c, 0x46, 0x1c, 0xa0, 0xa7, 0xa7, 0x47, 0xda, 0xde, 0x20, 0x7f, 0xc1, 0x56, 0xae, 0x5c, 0x9, 0xa1, 0x43, 0x82, 0xea, 0xea, 0x6a, 0x8e, 0x52, 0x8d, 0x8d, 0x8d, 0xe4, 0xd0, 0xcd, 0xcd, 0xcd, 0x9c, 0xd0, 0x4b, 0x96, 0x2c, 0x91, 0xbc, 0x37, 0x3c, 0x1e, 0xf, 0xed, 0x19, 0x78, 0x56, 0x56, 0x16, 0x84, 0xe, 0x3e, 0xcc, 0x85, 0x71, 0x99, 0xe9, 0x39, 0x36, 0x36, 0x96, 0x1c, 0x5a, 0xad, 0x56, 0x73, 0x9e, 0xb1, 0xf, 0xc2, 0xf3, 0xcf, 0x3f, 0x4f, 0x38, 0x9f, 0xa8, 0xa8, 0x28, 0xc, 0xdb, 0x5, 0x9f, 0x17, 0x5f, 0x7c, 0x91, 0xf3, 0x70, 0x6e, 0xd7, 0xae, 0x5d, 0xe4, 0xa1, 0xba, 0x6d, 0xdb, 0xb6, 0x91, 0x37, 0x49, 0x11, 0x4, 0x61, 0xfd, 0xfa, 0xf5, 0xd1, 0xd1, 0xd1, 0xcc, 0x27, 0x29, 0xd7, 0xa5, 0xa0, 0xa0, 0x80, 0xd0, 0xca, 0xe9, 0x74, 0xfe, 0xf2, 0xcb, 0x2f, 0x18, 0xb6, 0xb, 0x26, 0xcc, 0x2d, 0x70, 0xde, 0x78, 0xe3, 0xd, 0x72, 0x68, 0x97, 0xcb, 0x35, 0xc4, 0x17, 0xfe, 0x6e, 0x34, 0xae, 0x22, 0x5f, 0xb7, 0x58, 0xad, 0x56, 0xda, 0x59, 0xad, 0x59, 0xb3, 0x6, 0x25, 0x47, 0x30, 0xe1, 0xfc, 0xe2, 0xc7, 0xc7, 0xc7, 0x73, 0xa6, 0x6d, 0x2c, 0x5e, 0xbc, 0x98, 0xf3, 0x5d, 0x32, 0x99, 0x4c, 0x72, 0x14, 0x1b, 0x3, 0x64, 0x66, 0x66, 0x92, 0xa7, 0xb2, 0xa0, 0xe4, 0x8, 0x5a, 0xb1, 0xc1, 0xf9, 0xc5, 0xff, 0xf2, 0xcb, 0x2f, 0xc9, 0xd3, 0x36, 0x5a, 0x5a, 0x5a, 0x38, 0xab, 0x7b, 0x4d, 0x9c, 0x38, 0x71, 0xe6, 0xcc, 0x99, 0x72, 0x14, 0x1b, 0x3, 0xd0, 0x56, 0xd, 0x6e, 0x69, 0x69, 0x21, 0x67, 0x77, 0x94, 0x1c, 0xc1, 0x4c, 0xcf, 0x1a, 0x8d, 0x86, 0x13, 0x9a, 0xb9, 0x61, 0xe6, 0xef, 0xbf, 0xff, 0x2e, 0x77, 0xe7, 0x1c, 0x3c, 0x78, 0x90, 0x76, 0x6e, 0xd, 0xd, 0xd, 0xc8, 0xd0, 0x41, 0xe0, 0xb5, 0xd7, 0x5e, 0xe3, 0xa4, 0xe7, 0x23, 0x47, 0x8e, 0x90, 0xef, 0x5, 0xcf, 0x9d, 0x3b, 0xf7, 0xf9, 0xe7, 0x9f, 0x93, 0x43, 0x6f, 0xde, 0xbc, 0x79, 0xd2, 0xa4, 0x49, 0x72, 0xf7, 0xf, 0xf9, 0x15, 0x7, 0xa3, 0xd1, 0x88, 0xc, 0x1d, 0x68, 0xda, 0xdb, 0xdb, 0x39, 0x1f, 0x7c, 0xdd, 0xba, 0x75, 0xe4, 0xd0, 0xa2, 0x28, 0x4e, 0x9d, 0x3a, 0x95, 0x1c, 0x3a, 0x2e, 0x2e, 0x8e, 0xb3, 0xc8, 0xb4, 0x5f, 0x8c, 0x1f, 0x3f, 0x9e, 0x70, 0x86, 0x69, 0x69, 0x69, 0xb8, 0x29, 0xc, 0x34, 0x9c, 0x5d, 0x2b, 0xa3, 0xa3, 0xa3, 0x39, 0x2f, 0xee, 0x1f, 0x3d, 0x7a, 0x94, 0xf3, 0x5d, 0xa, 0xe4, 0x32, 0xe3, 0xaf, 0xbc, 0xf2, 0xa, 0xed, 0x24, 0xad, 0x56, 0x2b, 0x84, 0xe, 0x1c, 0xe4, 0xb9, 0xa, 0x3e, 0xf6, 0xef, 0xdf, 0x4f, 0xe, 0xdd, 0xdd, 0xdd, 0xcd, 0xd9, 0x70, 0xe8, 0xfe, 0xfb, 0xef, 0xe7, 0x8c, 0xab, 0xf8, 0xfb, 0x4b, 0x42, 0x7e, 0x15, 0xed, 0xa7, 0x9f, 0x7e, 0x82, 0xd0, 0x1, 0xa2, 0xbf, 0xbf, 0x9f, 0xb3, 0xbe, 0xd6, 0xdd, 0x77, 0xdf, 0xcd, 0x51, 0x6a, 0xc2, 0x84, 0x9, 0x9c, 0xef, 0xd2, 0xf9, 0xf3, 0xe7, 0x3, 0xdc, 0x5d, 0xb4, 0x55, 0x49, 0x17, 0x2d, 0x5a, 0x14, 0x8e, 0x6e, 0xa8, 0x38, 0x2b, 0xcc, 0x2, 0x10, 0x6a, 0x44, 0xa0, 0xb, 0x0, 0x84, 0x6, 0x0, 0x42, 0x3, 0x0, 0xa1, 0x1, 0x80, 0xd0, 0x0, 0x42, 0x3, 0x0, 0xa1, 0x1, 0x80, 0xd0, 0x0, 0x40, 0x68, 0x0, 0x20, 0x34, 0x80, 0xd0, 0x0, 0x40, 0x68, 0x0, 0x20, 0x34, 0x0, 0x10, 0x1a, 0x0, 0x8, 0xd, 0x20, 0x34, 0x0, 0x10, 0x1a, 0x0, 0x8, 0xd, 0x0, 0x84, 0x6, 0x0, 0x42, 0x3, 0x8, 0xd, 0x0, 0x84, 0x6, 0x0, 0x42, 0x3, 0x0, 0xa1, 0x1, 0x80, 0xd0, 0x0, 0x42, 0x3, 0x0, 0xa1, 0x1, 0x80, 0xd0, 0x0, 0x40, 0x68, 0x0, 0x20, 0x34, 0x80, 0xd0, 0x0, 0x40, 0x68, 0x0, 0x20, 0x34, 0x0, 0x10, 0x1a, 0x0, 0x8, 0xd, 0x20, 0x34, 0x0, 0x10, 0x1a, 0x0, 0x8, 0xd, 0x0, 0x84, 0x6, 0x10, 0x1a, 0x0, 0x8, 0xd, 0x0, 0x84, 0x6, 0x0, 0x42, 0x3, 0x0, 0xa1, 0x1, 0x84, 0x6, 0x0, 0x42, 0x3, 0x0, 0xa1, 0x1, 0x80, 0xd0, 0x0, 0x40, 0x68, 0x0, 0xa1, 0x1, 0x80, 0xd0, 0x0, 0x40, 0x68, 0x0, 0x20, 0x34, 0x0, 0x10, 0x1a, 0x40, 0x68, 0x0, 0x20, 0x34, 0x0, 0x10, 0x1a, 0x0, 0x8, 0xd, 0x0, 0x84, 0x6, 0xca, 0xe7, 0x7f, 0x1, 0x0, 0x0, 0xff, 0xff, 0x1a, 0xd5, 0xb5, 0x9c, 0xcd, 0x97, 0x3e, 0x9f, 0x0, 0x0, 0x0, 0x0, 0x49, 0x45, 0x4e, 0x44, 0xae, 0x42, 0x60, 0x82}
diff --git a/tools/debug/doberman/main.go b/tools/debug/doberman/main.go
index eb8eb447b9..70899f3c81 100644
--- a/tools/debug/doberman/main.go
+++ b/tools/debug/doberman/main.go
@@ -23,7 +23,6 @@ import (
"flag"
"fmt"
"io"
- "io/ioutil"
"os"
"os/exec"
"strings"
@@ -57,7 +56,7 @@ func main() {
}
// write logo
- tf, err := ioutil.TempFile("", "algorand-logo.png")
+ tf, err := os.CreateTemp("", "algorand-logo.png")
if err != nil {
panic(err)
}
diff --git a/tools/debug/logfilter/main_test.go b/tools/debug/logfilter/main_test.go
index 9058f611c8..45ab8605fc 100644
--- a/tools/debug/logfilter/main_test.go
+++ b/tools/debug/logfilter/main_test.go
@@ -19,7 +19,6 @@ package main
import (
"bytes"
- "io/ioutil"
"os"
"path/filepath"
"strings"
@@ -48,7 +47,7 @@ func TestLogFilterExamples(t *testing.T) {
for _, exampleFileName := range exampleFiles {
// load the expected result file.
expectedOutFile := strings.Replace(exampleFileName, ".in", ".out.expected", 1)
- expectedOutBytes, err := ioutil.ReadFile(expectedOutFile)
+ expectedOutBytes, err := os.ReadFile(expectedOutFile)
require.NoError(t, err)
expectedErrorCode := 0
if strings.Contains(string(expectedOutBytes), "FAIL") {
diff --git a/tools/network/cloudflare/cloudflare.go b/tools/network/cloudflare/cloudflare.go
index 414f812323..714fb9635b 100644
--- a/tools/network/cloudflare/cloudflare.go
+++ b/tools/network/cloudflare/cloudflare.go
@@ -19,7 +19,7 @@ package cloudflare
import (
"context"
"fmt"
- "io/ioutil"
+ "io"
"net/http"
"strings"
)
@@ -170,7 +170,7 @@ func (d *DNS) CreateDNSRecord(ctx context.Context, recordType string, name strin
if !parsedResponse.Success {
request, _ := createDNSRecordRequest(d.zoneID, d.authToken, recordType, name, content, ttl, priority, proxied)
requestBody, _ := request.GetBody()
- bodyBytes, _ := ioutil.ReadAll(requestBody)
+ bodyBytes, _ := io.ReadAll(requestBody)
return fmt.Errorf("failed to create DNS record. Request url = '%v', body = %s, parsed response : %#v, response headers = %#v", request.URL, string(bodyBytes), parsedResponse, response.Header)
}
return nil
@@ -195,7 +195,7 @@ func (d *DNS) CreateSRVRecord(ctx context.Context, name string, target string, t
if !parsedResponse.Success {
request, _ := createSRVRecordRequest(d.zoneID, d.authToken, name, service, protocol, weight, port, ttl, priority, target)
requestBody, _ := request.GetBody()
- bodyBytes, _ := ioutil.ReadAll(requestBody)
+ bodyBytes, _ := io.ReadAll(requestBody)
return fmt.Errorf("failed to create SRV record. Request url = '%v', body = %s, parsedResponse = %#v, response headers = %#v", request.URL, string(bodyBytes), parsedResponse, response.Header)
}
return nil
@@ -220,7 +220,7 @@ func (d *DNS) DeleteDNSRecord(ctx context.Context, recordID string) error {
if !parsedResponse.Success {
request, _ := deleteDNSRecordRequest(d.zoneID, d.authToken, recordID)
requestBody, _ := request.GetBody()
- bodyBytes, _ := ioutil.ReadAll(requestBody)
+ bodyBytes, _ := io.ReadAll(requestBody)
return fmt.Errorf("failed to delete DNS record. Request url = '%v', body = %s, parsedResponse = %#v, response headers = %#v", request.URL, string(bodyBytes), parsedResponse, response.Header)
}
return nil
@@ -246,7 +246,7 @@ func (d *DNS) UpdateDNSRecord(ctx context.Context, recordID string, recordType s
if !parsedResponse.Success {
request, _ := updateDNSRecordRequest(d.zoneID, d.authToken, recordID, recordType, name, content, ttl, priority, proxied)
requestBody, _ := request.GetBody()
- bodyBytes, _ := ioutil.ReadAll(requestBody)
+ bodyBytes, _ := io.ReadAll(requestBody)
return fmt.Errorf("failed to update DNS record. Request url = '%v', body = %s, parsedResponse = %#v, response headers = %#v", request.URL, string(bodyBytes), parsedResponse, response.Header)
}
@@ -272,7 +272,7 @@ func (d *DNS) UpdateSRVRecord(ctx context.Context, recordID string, name string,
if !parsedResponse.Success {
request, _ := updateSRVRecordRequest(d.zoneID, d.authToken, recordID, name, service, protocol, weight, port, ttl, priority, target)
requestBody, _ := request.GetBody()
- bodyBytes, _ := ioutil.ReadAll(requestBody)
+ bodyBytes, _ := io.ReadAll(requestBody)
return fmt.Errorf("failed to update SRV record. Request url = '%v', body = %s, parsedResponse = %#v, response headers = %#v", request.URL, string(bodyBytes), parsedResponse, response.Header)
}
return nil
@@ -303,7 +303,7 @@ func (c *Cred) GetZones(ctx context.Context) (zones []Zone, err error) {
if !parsedResponse.Success {
request, _ := getZonesRequest(c.authToken)
requestBody, _ := request.GetBody()
- bodyBytes, _ := ioutil.ReadAll(requestBody)
+ bodyBytes, _ := io.ReadAll(requestBody)
return nil, fmt.Errorf("failed to retrieve zone records. Request url = '%v', body = %s, parsedResponse = %#v, response headers = %#v", request.URL, string(bodyBytes), parsedResponse, response.Header)
}
@@ -360,7 +360,7 @@ func (d *DNS) ExportZone(ctx context.Context) (exportedZoneBytes []byte, err err
return nil, err
}
defer response.Body.Close()
- body, err := ioutil.ReadAll(response.Body)
+ body, err := io.ReadAll(response.Body)
if err != nil {
return nil, err
}
diff --git a/tools/network/cloudflare/createRecord.go b/tools/network/cloudflare/createRecord.go
index 747dc57ae1..c68747f5bd 100644
--- a/tools/network/cloudflare/createRecord.go
+++ b/tools/network/cloudflare/createRecord.go
@@ -20,7 +20,7 @@ import (
"bytes"
"encoding/json"
"fmt"
- "io/ioutil"
+ "io"
"net/http"
"net/url"
)
@@ -144,7 +144,7 @@ type CreateDNSRecordResult struct {
// parseCreateDNSRecordResponse parses the response that was received as a result of a ListDNSRecordRequest
func parseCreateDNSRecordResponse(response *http.Response) (*CreateDNSRecordResponse, error) {
defer response.Body.Close()
- body, err := ioutil.ReadAll(response.Body)
+ body, err := io.ReadAll(response.Body)
if err != nil {
return nil, err
}
diff --git a/tools/network/cloudflare/deleteRecord.go b/tools/network/cloudflare/deleteRecord.go
index 9770be8988..f0bf90ce5c 100644
--- a/tools/network/cloudflare/deleteRecord.go
+++ b/tools/network/cloudflare/deleteRecord.go
@@ -19,7 +19,7 @@ package cloudflare
import (
"encoding/json"
"fmt"
- "io/ioutil"
+ "io"
"net/http"
"net/url"
)
@@ -55,7 +55,7 @@ type DeleteDNSRecordResult struct {
// ParseDeleteDNSRecordResponse parses the response that was received as a result of a ListDNSRecordRequest
func parseDeleteDNSRecordResponse(response *http.Response) (*DeleteDNSRecordResponse, error) {
defer response.Body.Close()
- body, err := ioutil.ReadAll(response.Body)
+ body, err := io.ReadAll(response.Body)
if err != nil {
return nil, err
}
diff --git a/tools/network/cloudflare/listRecords.go b/tools/network/cloudflare/listRecords.go
index 263e8adf8d..1617b61188 100644
--- a/tools/network/cloudflare/listRecords.go
+++ b/tools/network/cloudflare/listRecords.go
@@ -19,7 +19,7 @@ package cloudflare
import (
"encoding/json"
"fmt"
- "io/ioutil"
+ "io"
"net/http"
"net/url"
)
@@ -120,7 +120,7 @@ type DNSRecordResponseEntry struct {
// parseListDNSRecordResponse parses the response that was received as a result of a ListDNSRecordRequest
func parseListDNSRecordResponse(response *http.Response) (*ListDNSRecordResponse, error) {
defer response.Body.Close()
- body, err := ioutil.ReadAll(response.Body)
+ body, err := io.ReadAll(response.Body)
if err != nil {
return nil, err
}
diff --git a/tools/network/cloudflare/zones.go b/tools/network/cloudflare/zones.go
index d73829ea5f..f5aa4b9ac2 100644
--- a/tools/network/cloudflare/zones.go
+++ b/tools/network/cloudflare/zones.go
@@ -19,7 +19,7 @@ package cloudflare
import (
"encoding/json"
"fmt"
- "io/ioutil"
+ "io"
"net/http"
"net/url"
)
@@ -71,7 +71,7 @@ type GetZonesResultItem struct {
func parseGetZonesResponse(response *http.Response) (*GetZonesResult, error) {
defer response.Body.Close()
- body, err := ioutil.ReadAll(response.Body)
+ body, err := io.ReadAll(response.Body)
if err != nil {
return nil, err
}
diff --git a/tools/teal/algotmpl/main.go b/tools/teal/algotmpl/main.go
index 2d24841a57..7568dda92f 100644
--- a/tools/teal/algotmpl/main.go
+++ b/tools/teal/algotmpl/main.go
@@ -20,7 +20,6 @@ package main
import (
"fmt"
- "io/ioutil"
"os"
"path/filepath"
"strings"
@@ -107,7 +106,7 @@ type param struct {
}
func initCommandsFromDir(dirname string) error {
- files, err := ioutil.ReadDir(dirname)
+ files, err := os.ReadDir(dirname)
if err != nil {
return err
}
@@ -137,7 +136,7 @@ func initCommandsFromDir(dirname string) error {
if err != nil {
return err
}
- data, err := ioutil.ReadFile(fullpath)
+ data, err := os.ReadFile(fullpath)
if err != nil {
return err
}
diff --git a/tools/teal/dkey/dsign/main.go b/tools/teal/dkey/dsign/main.go
index ff8a6067bb..6f380d8a0b 100644
--- a/tools/teal/dkey/dsign/main.go
+++ b/tools/teal/dkey/dsign/main.go
@@ -23,7 +23,7 @@ package main
import (
"encoding/base64"
"fmt"
- "io/ioutil"
+ "io"
"os"
"github.com/algorand/go-algorand/crypto"
@@ -47,7 +47,7 @@ func main() {
keyfname := os.Args[1]
lsigfname := os.Args[2]
- kdata, err := ioutil.ReadFile(keyfname)
+ kdata, err := os.ReadFile(keyfname)
failFast(err)
var seed crypto.Seed
copy(seed[:], kdata)
@@ -56,10 +56,10 @@ func main() {
if len(os.Args) == 4 {
// In this mode, interpret lsig-file as raw program bytes and produce a signature
// over the data file
- pdata, err := ioutil.ReadFile(lsigfname)
+ pdata, err := os.ReadFile(lsigfname)
failFast(err)
- ddata, err := ioutil.ReadFile(os.Args[3])
+ ddata, err := os.ReadFile(os.Args[3])
failFast(err)
dsig := sec.Sign(logic.Msg{
@@ -71,13 +71,13 @@ func main() {
} else {
// In this mode, interpret lsig-file as a LogicSig struct and sign the
// txid of the transaction passed over stdin
- pdata, err := ioutil.ReadFile(lsigfname)
+ pdata, err := os.ReadFile(lsigfname)
failFast(err)
var lsig transactions.LogicSig
err = protocol.Decode(pdata, &lsig)
failFast(err)
- txdata, err := ioutil.ReadAll(os.Stdin)
+ txdata, err := io.ReadAll(os.Stdin)
failFast(err)
var txn transactions.SignedTxn
err = protocol.Decode(txdata, &txn)
diff --git a/tools/teal/tealcut/main.go b/tools/teal/tealcut/main.go
index 96958c42af..7615cdc979 100644
--- a/tools/teal/tealcut/main.go
+++ b/tools/teal/tealcut/main.go
@@ -22,7 +22,6 @@ import (
"encoding/binary"
"encoding/hex"
"fmt"
- "io/ioutil"
"os"
"strconv"
"strings"
@@ -56,7 +55,7 @@ func main() {
}
var splitbytes [8]byte
binary.BigEndian.PutUint64(splitbytes[:], splitnum)
- data, err := ioutil.ReadFile(os.Args[1])
+ data, err := os.ReadFile(os.Args[1])
if err != nil {
panic(err)
}
diff --git a/util/codecs/json.go b/util/codecs/json.go
index 071c2f30c6..2d2c21134e 100644
--- a/util/codecs/json.go
+++ b/util/codecs/json.go
@@ -21,7 +21,6 @@ import (
"encoding/json"
"fmt"
"io"
- "io/ioutil"
"os"
"reflect"
"strings"
@@ -79,7 +78,7 @@ func SaveNonDefaultValuesToFile(filename string, object, defaultObject interface
// When done, ensure last value line doesn't include comma
// Write string array to file.
- file, err := ioutil.TempFile("", "encsndv")
+ file, err := os.CreateTemp("", "encsndv")
if err != nil {
return err
}
@@ -94,7 +93,7 @@ func SaveNonDefaultValuesToFile(filename string, object, defaultObject interface
}
// Read lines from encoded file into string array
- content, err := ioutil.ReadFile(name)
+ content, err := os.ReadFile(name)
if err != nil {
return err
}
diff --git a/util/io.go b/util/io.go
index 081c1d5681..43068f39c9 100644
--- a/util/io.go
+++ b/util/io.go
@@ -19,7 +19,6 @@ package util
import (
"fmt"
"io"
- "io/ioutil"
"os"
"path/filepath"
"strings"
@@ -83,7 +82,7 @@ func ExeDir() (string, error) {
// GetFirstLineFromFile retrieves the first line of the specified file.
func GetFirstLineFromFile(netFile string) (string, error) {
- addrStr, err := ioutil.ReadFile(netFile)
+ addrStr, err := os.ReadFile(netFile)
if err != nil {
return "", err
}
@@ -130,7 +129,7 @@ func copyFolder(source string, dest string, info os.FileInfo, includeFilter Incl
return fmt.Errorf("error creating destination folder: %v", err)
}
- contents, err := ioutil.ReadDir(source)
+ contents, err := os.ReadDir(source)
if err != nil {
return err
}
diff --git a/util/metrics/metrics_test.go b/util/metrics/metrics_test.go
index fddb9eda60..2e2828b1fb 100644
--- a/util/metrics/metrics_test.go
+++ b/util/metrics/metrics_test.go
@@ -18,7 +18,7 @@ package metrics
import (
"fmt"
- "io/ioutil"
+ "io"
"net"
"net/http"
"strings"
@@ -65,7 +65,7 @@ func (p *MetricTest) createListener(endpoint string) int {
func (p *MetricTest) testMetricsHandler(w http.ResponseWriter, r *http.Request) {
// read the entire request:
- body, err := ioutil.ReadAll(r.Body)
+ body, err := io.ReadAll(r.Body)
if err != nil {
return
}
diff --git a/util/metrics/tagcounter.go b/util/metrics/tagcounter.go
index d4de0d53cc..80689e7a8b 100644
--- a/util/metrics/tagcounter.go
+++ b/util/metrics/tagcounter.go
@@ -104,16 +104,13 @@ func (tc *TagCounter) Add(tag string, val uint64) {
var st []uint64
if len(tc.storage) > 0 {
st = tc.storage[len(tc.storage)-1]
- //fmt.Printf("new tag %v, old block\n", tag)
}
if tc.storagePos > (len(st) - 1) {
- //fmt.Printf("new tag %v, new block\n", tag)
st = make([]uint64, 16)
tc.storagePos = 0
tc.storage = append(tc.storage, st)
}
newtags[tag] = &(st[tc.storagePos])
- //fmt.Printf("tag %v = %p\n", tag, newtags[tag])
tc.storagePos++
tc.tags = newtags
tc.tagptr.Store(newtags)
@@ -155,7 +152,8 @@ func (tc *TagCounter) WriteMetric(buf *strings.Builder, parentLabels string) {
buf.WriteRune('}')
}
buf.WriteRune(' ')
- buf.WriteString(strconv.FormatUint(*tagcount, 10))
+ count := atomic.LoadUint64(tagcount)
+ buf.WriteString(strconv.FormatUint(count, 10))
buf.WriteRune('\n')
}
}
@@ -179,6 +177,7 @@ func (tc *TagCounter) AddMetric(values map[string]float64) {
} else {
name = tc.Name + "_" + tag
}
- values[sanitizeTelemetryName(name)] = float64(*tagcount)
+ count := atomic.LoadUint64(tagcount)
+ values[sanitizeTelemetryName(name)] = float64(count)
}
}
diff --git a/util/sleep_linux_32.go b/util/sleep_linux_32.go
index 1d155fac03..50a0e696c2 100644
--- a/util/sleep_linux_32.go
+++ b/util/sleep_linux_32.go
@@ -31,5 +31,5 @@ func NanoSleep(d time.Duration) {
Nsec: int32(d.Nanoseconds() % time.Second.Nanoseconds()),
Sec: int32(d.Nanoseconds() / time.Second.Nanoseconds()),
}
- syscall.Nanosleep(timeSpec, nil) // nolint:errcheck
+ syscall.Nanosleep(timeSpec, nil) // nolint:errcheck // ignoring error
}
diff --git a/util/sleep_linux_64.go b/util/sleep_linux_64.go
index 2897ceaa17..b2f7a69dbe 100644
--- a/util/sleep_linux_64.go
+++ b/util/sleep_linux_64.go
@@ -30,5 +30,5 @@ func NanoSleep(d time.Duration) {
Nsec: d.Nanoseconds() % time.Second.Nanoseconds(),
Sec: d.Nanoseconds() / time.Second.Nanoseconds(),
}
- syscall.Nanosleep(timeSpec, nil) // nolint:errcheck
+ syscall.Nanosleep(timeSpec, nil) // nolint:errcheck // ignoring error
}
diff --git a/util/tokens/tokens.go b/util/tokens/tokens.go
index 7b6d33d472..930f030bbe 100644
--- a/util/tokens/tokens.go
+++ b/util/tokens/tokens.go
@@ -19,7 +19,7 @@ package tokens
import (
"crypto/rand"
"fmt"
- "io/ioutil"
+ "os"
"path/filepath"
"github.com/algorand/go-algorand/util"
@@ -59,7 +59,7 @@ func GetAndValidateAPIToken(dataDir, tokenFilename string) (string, error) {
// writeAPITokenToDisk persists the APIToken to the datadir
func writeAPITokenToDisk(dataDir, tokenFilename, apiToken string) error {
filepath := tokenFilepath(dataDir, tokenFilename)
- return ioutil.WriteFile(filepath, []byte(apiToken), 0644)
+ return os.WriteFile(filepath, []byte(apiToken), 0644)
}
// GenerateAPIToken writes a cryptographically secure APIToken to disk