From e2e4d4418d3c105aa3e5e0725a6825c268a07480 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Thu, 24 Mar 2022 20:57:02 -0500 Subject: [PATCH 01/85] basic python setup and github attempt with blackbox --- .github/workflows/build.yml | 86 +++++++++++++++++++++++++++++++++++++ .gitignore | 10 +++++ Makefile | 20 +++++++++ README.md | 1 + pyproject.toml | 7 +++ requirements.txt | 1 + setup.py | 25 +++++++++++ tests/__init__.py | 0 tests/blackbox_test.py | 3 ++ 9 files changed, 153 insertions(+) create mode 100644 .github/workflows/build.yml create mode 100644 Makefile create mode 100644 pyproject.toml create mode 100644 requirements.txt create mode 100644 setup.py create mode 100644 tests/__init__.py create mode 100644 tests/blackbox_test.py diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 00000000..01d96897 --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,86 @@ +name: "Build workflow" +on: + pull_request: + push: + tags: + - v** + branches: + - main + +env: + CONFIG: dev + GENESIS: genesis/dev/genesis.json + +jobs: + build-test: + runs-on: ubuntu-20.04 + container: python:${{ matrix.python }} + strategy: + matrix: + python: ['3.10'] + steps: + - run: python3 --version + - name: Check out code + uses: actions/checkout@v2 + with: + fetch-depth: 0 + - name: Install pip dependencies + run: make pip + - name: Build and Test + run: make build-and-test + run-integration-tests: + runs-on: ubuntu-20.04 + steps: + - name: Check out code + uses: actions/checkout@v2 + with: + fetch-depth: 0 + - name: Install required os level applications + run: | + sudo apt update -y + sudo apt install -y curl git nodejs python-is-python3 python3-pip + sudo apt -y install ca-certificates curl gnupg lsb-release + sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg + sudo echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu \ + $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + sudo apt -y install docker-ce docker-ce-cli containerd.io + - name: Setup docker compose + run: | + sudo curl -L "https://github.com/docker/compose/releases/download/1.29.2/docker-compose-$(uname -s)-$(uname -m)" -o /usr/local/bin/docker-compose + sudo chmod +x /usr/local/bin/docker-compose + sudo ln -s /usr/local/bin/docker-compose /usr/bin/docker-compose + docker-compose --version + - name: Check cache for Docker layers + uses: satackey/action-docker-layer-caching@v0.0.11 + # Ignore the failure of a step and avoid terminating the job. + continue-on-error: true + with: + key: docker-layer-caching-${{ github.workflow }}-${{ hashFiles(env.CONFIG, env.GENESIS) }}-{hash} + restore-keys: docker-layer-caching-${{ github.workflow }}-${{ hashFiles(env.CONFIG, env.GENESIS) }}- + - name: Create sandbox + uses: lucasvanmol/algorand-sandbox-action@v1 + with: + config: ${{ env.CONFIG }} + - name: Setup integration test environment + run: make pip build # integration-env-up + - name: Run integration tests + run: make blackbox + # upload-to-pypi: + # runs-on: ubuntu-20.04 + # container: python:3.9 + # needs: ['build-test'] + # if: ${{ github.event_name == 'push' && startsWith(github.ref, 'refs/tags') }} + # steps: + # - name: Check out code + # uses: actions/checkout@v2 + # with: + # fetch-depth: 0 + # - name: Install dependencies + # run: pip install wheel + # - name: Build package + # run: python setup.py sdist bdist_wheel + # - name: Release + # uses: pypa/gh-action-pypi-publish@release/v1 + # with: + # user: __token__ + # password: ${{ secrets.PYPI_API_TOKEN }} diff --git a/.gitignore b/.gitignore index b6e47617..a069990d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,13 @@ +##### graviton specific ignores ##### + +# Integration tests +.sandbox + +# Emacs detritus +*~ + +##### github recommends for Python ##### + # Byte-compiled / optimized / DLL files __pycache__/ *.py[cod] diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..592193a8 --- /dev/null +++ b/Makefile @@ -0,0 +1,20 @@ +# Github Actions + +env-up: + bash -x .sandbox/sandbox up dev + +env-down: + .sandbox/sandbox down dev + +# Build + +pip: + pip install -r requirements.txt + pip install -e . + + +build-and-test: + pytest tests/blackbox_test.py + +blackbox: + echo "hello blackbox!" diff --git a/README.md b/README.md index 987d1215..fddc6076 100644 --- a/README.md +++ b/README.md @@ -1,2 +1,3 @@ # graviton + verify your TEAL program by experiment and observation diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..53c55f08 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,7 @@ +[build-system] +requires = ["setuptools", "wheel"] +build-backend = "setuptools.build_meta" + +[metadata] +name = "graviton" +version = "0.0.1" diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 00000000..4f6bf643 --- /dev/null +++ b/requirements.txt @@ -0,0 +1 @@ +pytest==7.1.1 diff --git a/setup.py b/setup.py new file mode 100644 index 00000000..e4c636bf --- /dev/null +++ b/setup.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python3 + +import setuptools + +with open("README.md", "r") as fh: + long_description = fh.read() + +setuptools.setup( + name="graviton", + version="0.0.1", + author="Algorand", + author_email="pypiservice@algorand.com", + description="verify your TEAL program by experiment and observation", + long_description=long_description, + long_description_content_type="text/markdown", + url="https://github.com/algorand/graviton", + packages=setuptools.find_packages(), + install_requires=["py-algorand-sdk"], + classifiers=[ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent", + ], + python_requires=">=3.10", +) diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/blackbox_test.py b/tests/blackbox_test.py new file mode 100644 index 00000000..697d490f --- /dev/null +++ b/tests/blackbox_test.py @@ -0,0 +1,3 @@ +def test_noop(): + assert True, "oh no" + From a9bc043a4adc708c9bdd4ed4192ed18d50f0fb25 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Thu, 24 Mar 2022 21:01:57 -0500 Subject: [PATCH 02/85] try again with what I assumed was a superfluous apt update --- .github/workflows/build.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 01d96897..12433596 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -43,6 +43,7 @@ jobs: sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg sudo echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu \ $(lsb_release -cs) stable" | sudo tee /etc/apt/sources.list.d/docker.list > /dev/null + sudo apt update sudo apt -y install docker-ce docker-ce-cli containerd.io - name: Setup docker compose run: | From 9ad712d1f4ca927ea852c0b9aeaba95bf6733195 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Thu, 24 Mar 2022 21:13:25 -0500 Subject: [PATCH 03/85] sandbox smoke test --- .github/workflows/build.yml | 23 ++--------------------- Makefile | 2 ++ 2 files changed, 4 insertions(+), 21 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 12433596..54b3e85b 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -63,25 +63,6 @@ jobs: with: config: ${{ env.CONFIG }} - name: Setup integration test environment - run: make pip build # integration-env-up + run: make pip build-and-test - name: Run integration tests - run: make blackbox - # upload-to-pypi: - # runs-on: ubuntu-20.04 - # container: python:3.9 - # needs: ['build-test'] - # if: ${{ github.event_name == 'push' && startsWith(github.ref, 'refs/tags') }} - # steps: - # - name: Check out code - # uses: actions/checkout@v2 - # with: - # fetch-depth: 0 - # - name: Install dependencies - # run: pip install wheel - # - name: Build package - # run: python setup.py sdist bdist_wheel - # - name: Release - # uses: pypa/gh-action-pypi-publish@release/v1 - # with: - # user: __token__ - # password: ${{ secrets.PYPI_API_TOKEN }} + run: make blackbox \ No newline at end of file diff --git a/Makefile b/Makefile index 592193a8..7653eb2a 100644 --- a/Makefile +++ b/Makefile @@ -18,3 +18,5 @@ build-and-test: blackbox: echo "hello blackbox!" + ls + ./sandbox test From 6d8272afdb8598d153c0afb87e6d49ed89e74df5 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Thu, 24 Mar 2022 21:47:07 -0500 Subject: [PATCH 04/85] teal files for blackbox tests --- blackbox/teal/app_exp.teal | 15 ++++++++ blackbox/teal/app_oldfac.teal | 34 +++++++++++++++++++ blackbox/teal/app_slow_fibonacci.teal | 41 ++++++++++++++++++++++ blackbox/teal/app_square.teal | 18 ++++++++++ blackbox/teal/app_square_byref.teal | 25 ++++++++++++++ blackbox/teal/app_string_mult.teal | 47 ++++++++++++++++++++++++++ blackbox/teal/app_swap.teal | 31 +++++++++++++++++ blackbox/teal/lsig_exp.teal | 10 ++++++ blackbox/teal/lsig_oldfac.teal | 29 ++++++++++++++++ blackbox/teal/lsig_slow_fibonacci.teal | 36 ++++++++++++++++++++ blackbox/teal/lsig_square.teal | 13 +++++++ blackbox/teal/lsig_square_byref.teal | 20 +++++++++++ blackbox/teal/lsig_string_mult.teal | 43 +++++++++++++++++++++++ blackbox/teal/lsig_swap.teal | 26 ++++++++++++++ 14 files changed, 388 insertions(+) create mode 100644 blackbox/teal/app_exp.teal create mode 100644 blackbox/teal/app_oldfac.teal create mode 100644 blackbox/teal/app_slow_fibonacci.teal create mode 100644 blackbox/teal/app_square.teal create mode 100644 blackbox/teal/app_square_byref.teal create mode 100644 blackbox/teal/app_string_mult.teal create mode 100644 blackbox/teal/app_swap.teal create mode 100644 blackbox/teal/lsig_exp.teal create mode 100644 blackbox/teal/lsig_oldfac.teal create mode 100644 blackbox/teal/lsig_slow_fibonacci.teal create mode 100644 blackbox/teal/lsig_square.teal create mode 100644 blackbox/teal/lsig_square_byref.teal create mode 100644 blackbox/teal/lsig_string_mult.teal create mode 100644 blackbox/teal/lsig_swap.teal diff --git a/blackbox/teal/app_exp.teal b/blackbox/teal/app_exp.teal new file mode 100644 index 00000000..6685b3a1 --- /dev/null +++ b/blackbox/teal/app_exp.teal @@ -0,0 +1,15 @@ +#pragma version 6 +callsub exp_0 +store 0 +load 0 +itob +log +load 0 +return + +// exp +exp_0: +pushint 2 // 2 +pushint 10 // 10 +exp +retsub \ No newline at end of file diff --git a/blackbox/teal/app_oldfac.teal b/blackbox/teal/app_oldfac.teal new file mode 100644 index 00000000..9ddf7619 --- /dev/null +++ b/blackbox/teal/app_oldfac.teal @@ -0,0 +1,34 @@ +#pragma version 6 +intcblock 1 +txna ApplicationArgs 0 +btoi +callsub oldfac_0 +store 1 +load 1 +itob +log +load 1 +return + +// oldfac +oldfac_0: +store 0 +load 0 +pushint 2 // 2 +< +bnz oldfac_0_l2 +load 0 +load 0 +intc_0 // 1 +- +load 0 +swap +callsub oldfac_0 +swap +store 0 +* +b oldfac_0_l3 +oldfac_0_l2: +intc_0 // 1 +oldfac_0_l3: +retsub \ No newline at end of file diff --git a/blackbox/teal/app_slow_fibonacci.teal b/blackbox/teal/app_slow_fibonacci.teal new file mode 100644 index 00000000..34f721f7 --- /dev/null +++ b/blackbox/teal/app_slow_fibonacci.teal @@ -0,0 +1,41 @@ +#pragma version 6 +intcblock 1 +txna ApplicationArgs 0 +btoi +callsub slowfibonacci_0 +store 1 +load 1 +itob +log +load 1 +return + +// slow_fibonacci +slowfibonacci_0: +store 0 +load 0 +intc_0 // 1 +<= +bnz slowfibonacci_0_l2 +load 0 +pushint 2 // 2 +- +load 0 +swap +callsub slowfibonacci_0 +swap +store 0 +load 0 +intc_0 // 1 +- +load 0 +swap +callsub slowfibonacci_0 +swap +store 0 ++ +b slowfibonacci_0_l3 +slowfibonacci_0_l2: +load 0 +slowfibonacci_0_l3: +retsub \ No newline at end of file diff --git a/blackbox/teal/app_square.teal b/blackbox/teal/app_square.teal new file mode 100644 index 00000000..268e7ed7 --- /dev/null +++ b/blackbox/teal/app_square.teal @@ -0,0 +1,18 @@ +#pragma version 6 +txna ApplicationArgs 0 +btoi +callsub square_0 +store 1 +load 1 +itob +log +load 1 +return + +// square +square_0: +store 0 +load 0 +pushint 2 // 2 +exp +retsub \ No newline at end of file diff --git a/blackbox/teal/app_square_byref.teal b/blackbox/teal/app_square_byref.teal new file mode 100644 index 00000000..d8678132 --- /dev/null +++ b/blackbox/teal/app_square_byref.teal @@ -0,0 +1,25 @@ +#pragma version 6 +txna ApplicationArgs 0 +btoi +store 2 +pushint 2 // 2 +callsub squarebyref_0 +pushint 1337 // 1337 +store 1 +load 1 +itob +log +load 1 +return + +// square_byref +squarebyref_0: +store 0 +load 0 +load 0 +loads +load 0 +loads +* +stores +retsub \ No newline at end of file diff --git a/blackbox/teal/app_string_mult.teal b/blackbox/teal/app_string_mult.teal new file mode 100644 index 00000000..d358d447 --- /dev/null +++ b/blackbox/teal/app_string_mult.teal @@ -0,0 +1,47 @@ +#pragma version 6 +intcblock 1 +txna ApplicationArgs 0 +store 5 +pushint 5 // 5 +txna ApplicationArgs 1 +btoi +callsub stringmult_0 +store 4 +load 4 +log +load 4 +len +return + +// string_mult +stringmult_0: +store 1 +store 0 +intc_0 // 1 +store 2 +load 0 +loads +store 3 +load 0 +pushbytes 0x // "" +stores +stringmult_0_l1: +load 2 +load 1 +<= +bz stringmult_0_l3 +load 0 +load 0 +loads +load 3 +concat +stores +load 2 +intc_0 // 1 ++ +store 2 +b stringmult_0_l1 +stringmult_0_l3: +load 0 +loads +retsub \ No newline at end of file diff --git a/blackbox/teal/app_swap.teal b/blackbox/teal/app_swap.teal new file mode 100644 index 00000000..a450781b --- /dev/null +++ b/blackbox/teal/app_swap.teal @@ -0,0 +1,31 @@ +#pragma version 6 +txna ApplicationArgs 0 +store 4 +txna ApplicationArgs 1 +store 5 +pushint 4 // 4 +pushint 5 // 5 +callsub swap_0 +pushint 1337 // 1337 +store 3 +load 3 +itob +log +load 3 +return + +// swap +swap_0: +store 1 +store 0 +load 0 +loads +store 2 +load 0 +load 1 +loads +stores +load 1 +load 2 +stores +retsub \ No newline at end of file diff --git a/blackbox/teal/lsig_exp.teal b/blackbox/teal/lsig_exp.teal new file mode 100644 index 00000000..e14669bf --- /dev/null +++ b/blackbox/teal/lsig_exp.teal @@ -0,0 +1,10 @@ +#pragma version 6 +callsub exp_0 +return + +// exp +exp_0: +pushint 2 // 2 +pushint 10 // 10 +exp +retsub \ No newline at end of file diff --git a/blackbox/teal/lsig_oldfac.teal b/blackbox/teal/lsig_oldfac.teal new file mode 100644 index 00000000..ed8ec0bf --- /dev/null +++ b/blackbox/teal/lsig_oldfac.teal @@ -0,0 +1,29 @@ +#pragma version 6 +intcblock 1 +arg 0 +btoi +callsub oldfac_0 +return + +// oldfac +oldfac_0: +store 0 +load 0 +pushint 2 // 2 +< +bnz oldfac_0_l2 +load 0 +load 0 +intc_0 // 1 +- +load 0 +swap +callsub oldfac_0 +swap +store 0 +* +b oldfac_0_l3 +oldfac_0_l2: +intc_0 // 1 +oldfac_0_l3: +retsub \ No newline at end of file diff --git a/blackbox/teal/lsig_slow_fibonacci.teal b/blackbox/teal/lsig_slow_fibonacci.teal new file mode 100644 index 00000000..a1d8df8e --- /dev/null +++ b/blackbox/teal/lsig_slow_fibonacci.teal @@ -0,0 +1,36 @@ +#pragma version 6 +intcblock 1 +arg 0 +btoi +callsub slowfibonacci_0 +return + +// slow_fibonacci +slowfibonacci_0: +store 0 +load 0 +intc_0 // 1 +<= +bnz slowfibonacci_0_l2 +load 0 +pushint 2 // 2 +- +load 0 +swap +callsub slowfibonacci_0 +swap +store 0 +load 0 +intc_0 // 1 +- +load 0 +swap +callsub slowfibonacci_0 +swap +store 0 ++ +b slowfibonacci_0_l3 +slowfibonacci_0_l2: +load 0 +slowfibonacci_0_l3: +retsub \ No newline at end of file diff --git a/blackbox/teal/lsig_square.teal b/blackbox/teal/lsig_square.teal new file mode 100644 index 00000000..a87bea8a --- /dev/null +++ b/blackbox/teal/lsig_square.teal @@ -0,0 +1,13 @@ +#pragma version 6 +arg 0 +btoi +callsub square_0 +return + +// square +square_0: +store 0 +load 0 +pushint 2 // 2 +exp +retsub \ No newline at end of file diff --git a/blackbox/teal/lsig_square_byref.teal b/blackbox/teal/lsig_square_byref.teal new file mode 100644 index 00000000..d3f1ad2a --- /dev/null +++ b/blackbox/teal/lsig_square_byref.teal @@ -0,0 +1,20 @@ +#pragma version 6 +arg 0 +btoi +store 0 +pushint 0 // 0 +callsub squarebyref_0 +pushint 1337 // 1337 +return + +// square_byref +squarebyref_0: +store 1 +load 1 +load 1 +loads +load 1 +loads +* +stores +retsub \ No newline at end of file diff --git a/blackbox/teal/lsig_string_mult.teal b/blackbox/teal/lsig_string_mult.teal new file mode 100644 index 00000000..bfb4f56a --- /dev/null +++ b/blackbox/teal/lsig_string_mult.teal @@ -0,0 +1,43 @@ +#pragma version 6 +intcblock 1 +arg 0 +store 0 +pushint 0 // 0 +arg 1 +btoi +callsub stringmult_0 +len +return + +// string_mult +stringmult_0: +store 2 +store 1 +intc_0 // 1 +store 3 +load 1 +loads +store 4 +load 1 +pushbytes 0x // "" +stores +stringmult_0_l1: +load 3 +load 2 +<= +bz stringmult_0_l3 +load 1 +load 1 +loads +load 4 +concat +stores +load 3 +intc_0 // 1 ++ +store 3 +b stringmult_0_l1 +stringmult_0_l3: +load 1 +loads +retsub \ No newline at end of file diff --git a/blackbox/teal/lsig_swap.teal b/blackbox/teal/lsig_swap.teal new file mode 100644 index 00000000..1464e8a5 --- /dev/null +++ b/blackbox/teal/lsig_swap.teal @@ -0,0 +1,26 @@ +#pragma version 6 +arg 0 +store 0 +arg 1 +store 1 +pushint 0 // 0 +pushint 1 // 1 +callsub swap_0 +pushint 1337 // 1337 +return + +// swap +swap_0: +store 3 +store 2 +load 2 +loads +store 4 +load 2 +load 3 +loads +stores +load 3 +load 4 +stores +retsub \ No newline at end of file From 882d186c3b6244b22d8e06e5b2e463b68e0d1f6d Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Thu, 24 Mar 2022 21:56:19 -0500 Subject: [PATCH 05/85] integration and unit test stubs - can we pass git actions? --- .gitignore | 3 + Makefile | 3 +- README.md | 400 ++++++++++++++- blackbox/__init__.py | 0 tests/clients.py | 24 + tests/integration_test.py | 607 +++++++++++++++++++++++ tests/{blackbox_test.py => unit_test.py} | 0 7 files changed, 1034 insertions(+), 3 deletions(-) create mode 100644 blackbox/__init__.py create mode 100644 tests/clients.py create mode 100644 tests/integration_test.py rename tests/{blackbox_test.py => unit_test.py} (100%) diff --git a/.gitignore b/.gitignore index a069990d..b29ff5ad 100644 --- a/.gitignore +++ b/.gitignore @@ -3,6 +3,9 @@ # Integration tests .sandbox +# Comma Seperated Value reports +*.csv + # Emacs detritus *~ diff --git a/Makefile b/Makefile index 7653eb2a..20b6e2f0 100644 --- a/Makefile +++ b/Makefile @@ -14,9 +14,10 @@ pip: build-and-test: - pytest tests/blackbox_test.py + pytest tests/unit_test.py blackbox: echo "hello blackbox!" ls ./sandbox test + pytest tests/integration_test.py diff --git a/README.md b/README.md index fddc6076..9d9a779a 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,399 @@ -# graviton +# TEAL Blackbox Toolkit: Program Reporting and Testing via Dry Runs + +**NOTE: to get math formulas to render here using Chrome, add the [xhub extension](https://chrome.google.com/webstore/detail/xhub/anidddebgkllnnnnjfkmjcaallemhjee/related) and reload** + +## Blackbox Testing Howto + +### What is TEAL Blackbox Testing? + +TEAL Blackbox Testing lets you treat your TEAL program as a black box that for every received input produces an output and other observable effects. You can create reports that summarize those effects, and turn the _reports_ into _program invariant conjectures_ which you then check with _sequence assertions_. + +### Why Blackbox Testing? + +Here are some use cases: + +* by allowing you to assert that certain invariants hold over a large set of inputs you gain greater confidence that your TEAL programs and AVM smart contracts work as designed +* when tweaking, refactoring or optimizing your TEAL source, ensure that no regressions have occured +* allows AVM developers to practice the art of TTDD (TEAL Test Driven Development) + +## Simple TEAL Blackbox Toolkit Example: Program for $`x^2`$ + +Consider the following [TEAL program](https://github.com/algorand/py-algorand-sdk/blob/23c21170cfb19652d5da854e499dca47eabb20e8/x/blackbox/teal/lsig_square.teal) that purportedly computes $`x^2`$: + +```plain +#pragma version 6 +arg 0 +btoi +callsub square_0 +return + +// square +square_0: +store 0 +load 0 +pushint 2 // 2 +exp +retsub +``` + +You'd like to write some unit tests to validate that it computes what you think it should, and also make **assertions** about the: + +* program's opcode cost +* program's stack +* stack's height +* scratch variables +* final log message (this is especially useful for [ABI-compliant programs](https://developer.algorand.org/docs/get-details/dapps/smart-contracts/ABI/)) +* status (**PASS**, **REJECT** or _erroring_) +* error conditions that are and aren't encountered + +Even better, before making fine-grained assertions you'd like to get a sense of what the program is doing on a large set of inputs so you can discover program invariants. The TEAL Blackbox Toolkit's recommended approach for enabling these goals is to: + +* start by making basic assertions and validate them using dry runs (see "**Basic Assertions**" section below) +* execute the program on a run-sequence of inputs and explore the results (see "**EDRA: Exploratory Dry Run Analysis**" section below) +* create invariants for the entire run-sequence and assert that the invariants hold (see "**Advanced: Asserting Invariants on a Dry Run Sequence**" section below) + +> Becoming a TEAL Blackbox Toolkit Ninja involves 10 steps as described below +### Dry Run Environment Setup + +**STEP 1**. Start with a running local node and make note of Algod's port number (for our [standard sandbox](https://github.com/algorand/sandbox) this is `4001`) + +**STEP 2**. Set the `ALGOD_PORT` value in [x/testnet.py](https://github.com/algorand/py-algorand-sdk/blob/5faf79ddb56327a0e036ff4e21a39b52535751ae/x/testnet.py#L6) to this port number. (The port is set to `60000` by default because [SDK-testing](https://github.com/algorand/algorand-sdk-testing) bootstraps with this setting on Circle and also to avoid conflicting locally with the typical sandbox setup) + +### TEAL Program for Testing: Logic Sig v. App + +**STEP 3**. Next, you'll need to figure out if your TEAL program should be a Logic Signature or an Application. Each of these program _modes_ has its merits, but I won't get into the pros/cons here. From a Blackbox Test's perspective, the main difference is how each receives its arguments from the program executor. Logic sigs rely on the [arg opcode](https://developer.algorand.org/docs/get-details/dapps/avm/teal/opcodes/#arg-n) while apps rely on [txna ApplicationArgs i](https://developer.algorand.org/docs/get-details/dapps/avm/teal/opcodes/#txna-f-i). In our $`x^2`$ **logic sig** example, you can see on [line 2](https://github.com/algorand/py-algorand-sdk/blob/23c21170cfb19652d5da854e499dca47eabb20e8/x/blackbox/teal/lsig_square.teal#L2) that the `arg` opcode is used. Because each argument opcode (`arg` versus `ApplicationArgs`) is exclusive to one mode, any program that takes input will execute succesfully in _one mode only_. + +**STEP 4**. Write the TEAL program that you want to test. You can inline the test as described here or follow the approach of `x/blackbox/blackbox_test.py` and save under `x/blackbox/teal`. So following the inline +appraoch we begin our TEAL Blackbox script with an inline teal source variable: + +```python +teal = """#pragma version 6 +arg 0 +btoi +callsub square_0 +return + +// square +square_0: +store 0 +load 0 +pushint 2 // 2 +exp +retsub""" +``` + +### The TEAL Blackbox Toolkit's Utitlity Classes + +The TEAL Blackbox Toolkit comes with the following utility classes: + +* `DryRunExecutor` - facility to execute dry run's on apps and logic sigs +* `DryRunTransactionResult` - class encapsulating a single app or logic sig dry run transaction and for making assertions about the dry run +* `SequenceAssertion` - class for asserting invariants about a _sequence_ of dry run executions in a declarative fashion + +### Basic Assertions + +When executing a dry run using `DryRunExecutor` you'll get back `DryRunTransactionResult` objects. Such objects have +**assertable properties** which can be used to validate the dry run. + +**STEP 4**. Back to our $`x^2`$ example, and assuming the `teal` variable is defined [as above](#teal). You can run the following: + +```python +algod = get_algod() +x = 9 +args = (x,) +dryrun_result = DryRunExecutor.dryrun_logicsig(algod, teal, args) +assert dryrun_result.status() == "PASS" +assert dryrun_result.stack_top() == x ** 2 +``` + +Some available _assertable properties_ are: + +* `stack_top()` +* `last_log()` +* `cost()` +* `status()` +* `final_scratch()` +* `error()` +* `max_stack_height()` + +See the [DryRunTransactionResult class comment](https://github.com/algorand/py-algorand-sdk/blob/b2a3366b7bc976e0610429c186b7968a7f1bbc76/algosdk/testing/teal_blackbox.py#L371) for more assertable properties and details. + +### Printing out the TEAL Stack Trace for a Failing Assertion + +**STEP 5**. The `DryRunTransactionResult`'s `report()` method lets you print out +a handy report in the case of a failing assertion. Let's intentionally break the test case above by claiming that $`x^2 = x^3`$ for $`x=2`$ and print out this _report_ when our silly assertion fails: + +```python +algod = get_algod() +x = 2 +args = (x,) +dryrun_result = DryRunExecutor.dryrun_logicsig(algod, teal, args) + +# This one's ok +expected, actual = "PASS", dryrun_result.status() +assert expected == actual, dryrun_result.report(args, f"expected {expected} but got {actual}") + +# This one's absurd! x^3 != x^2 +expected, actual = x ** 3, dryrun_result.stack_stop() +assert expected == actual, dryrun_result.report(args, f"expected {expected} but got {actual}") +``` + +If we run the test we'll get the following printout (this is for pytest, but other testing frameworks should be similar): +```sh +E AssertionError: =============== +E <<<<<<<<<<>>>>>>>>>>>> +E =============== +E App Trace: +E step | PC# | L# | Teal | Scratch | Stack +E --------+-------+------+-------------------+-----------+---------------------- +E 1 | 1 | 1 | #pragma version 6 | | [] +E 2 | 2 | 2 | arg_0 | | [0x0000000000000002] +E 3 | 3 | 3 | btoi | | [2] +E 4 | 7 | 6 | label1: | | [2] +E 5 | 9 | 7 | store 0 | 0->2 | [] +E 6 | 11 | 8 | load 0 | | [2] +E 7 | 13 | 9 | pushint 2 | | [2, 2] +E 8 | 14 | 10 | exp | | [4] +E 9 | 6 | 4 | callsub label1 | | [4] +E 10 | 15 | 11 | retsub | | [4] +E =============== +E MODE: Mode.Signature +E TOTAL COST: None +E =============== +E FINAL MESSAGE: PASS +E =============== +E Messages: ['PASS'] +E Logs: [] +E =============== +E -----BlackBoxResult(steps_executed=10)----- +E TOTAL STEPS: 10 +E FINAL STACK: [4] +E FINAL STACK TOP: 4 +E MAX STACK HEIGHT: 2 +E FINAL SCRATCH: {0: 2} +E SLOTS USED: [0] +E FINAL AS ROW: {'steps': 10, ' top_of_stack': 4, 'max_stack_height': 2, 's@000': 2} +E =============== +E Global Delta: +E [] +E =============== +E Local Delta: +E [] +E =============== +E TXN AS ROW: {' Run': 0, ' cost': None, ' final_log': None, ' final_message': 'PASS', ' Status': 'PASS', 'steps': 10, ' top_of_stack': 4, 'max_stack_height': 2, 's@000': 2, 'Arg_00': 2} +E =============== +E <<<<<<<<<<>>>>>>>>>>>> +E =============== +E assert 8 == 4 +``` + +In particular, we can: + +* Track the program execution by viewing its **App Trace** + * 2 was assigned to **scratch slot #0** at step 5 + * the stack ended up with **4** on top + * the run **PASS**'ed +* Read the message parameter that was provided and which explains in English what went wrong: `<<<<<<<<<<>>>>>>>>>>>>` + +### EDRA: Exploratory Dry Run Analysis + +Let's expand our investigation from a single dry-run to multiple runs or a **run sequence**. In other words, given a sequence of inputs, observe _assertable properties_ for the corresponding +executions, and conjecture some program invariants. To aid in the investigation we'll generate a report in CSV format (Comma Separated Values) where: + +* columns represent _assertable properties_ of dry-runs, and +* rows represents dry-run executions for specific inputs + +**STEP 6**. Back to our $`x^2`$ example, here's how to generate a report with 1 row for each of the inputs `0, 1, ... , 15`: + +```python +algod = get_algod() +inputs = [(x,) for x in range(16)] +dryrun_results = DryRunExecutor.dryrun_logicsig_on_sequence(algod, teal, inputs) +csv = DryRunTransactionResult.csv_report(inputs, dryrun_results) +print(csv) +``` + +Note: that each element in the `inputs` array `(x,)` is itself a tuple as `args` given to a dry run execution need to be `Iterable` (remember, that these will be passed to a TEAL program which may take one, several, or no inputs at all). +At this point, you'll be able to look at your [dry run sequence results](https://github.com/algorand/py-algorand-sdk/blob/1bc7b8fcf21401608cece65507c36d1f6dbad531/algosdk/testing/teal_blackbox.py#L713) and conduct some analysis. For the $`x^2`$ example if you load the CSV in Google sheets and reformat a bit it will look like: + +image + +Perusing the above, it looks right: + +* column `D` **Arg 00** has the input $`x`$ (it's the argument at index 0) +* column `A` contains the **Run** number +* column `E` **top of stack** does indeed store $`x^2`$ at the program's termination +* column `B` **Status** of each runs **PASS**es _except for **Run 1** with **Arg 00** = 0_. (The first run **REJECT**s because $`0^2 = 0`$ and TEAL programs reject when the top of the stack is 0) +* column `G` shows scratch slot **s@000** which stores the value of $`x`$ (except for the case $`x = 0`$ in which appears empty; in fact, slots always default to the zero value and an **artifact** of dry-runs is that they do not report when 0-values get stored into previously empty slots as no state change actually occurs) +* column `F` **max stack height** is always 2. The final observation makes sense because there is no branching or looping in the program. + +**STEP 7**. We can re-cast these observed effects in `Columns E, B, G, F` as **program invariant conjectures** written in Python as follows: + +* `dryrun_result.stack_top() == x ** 2` +* `dryrun_result.max_stack_height() == 2` +* `dryrun_result.status() == ("REJECT" if x == 0 else "PASS")` +* `dryrun_result.final_scratch() == ({} if x == 0 else {0: x})` + +### Advanced: Asserting Invariants on a Dry Run Sequence + +The final and most advanced topic of this Howto is to turn _program invariant conjectures_ into +**sequence assertions**. That is, let's take the information we gleaned in our EDRA CSV report, +and create an integration test out of it. There are two ways to achieve this goal: + +* Procedural sequence assertions +* Declarative sequence assertions + +#### Procedural Blackbox Dry Run Sequence Assertions + +**STEP 8**. The procedural approach takes the _program invariant conjectures_ and simply asserts them +inside of a for loop that iterates over the inputs and dry runs. One can call each dry run +execution independently, or use `DryRunExecutor`'s convenience methods `dryrun_app_on_sequence()` and +`dryrun_logicsig_on_sequence()`. For example, let's assert that the above invariants hold for all +$`x \leq 100`$: + +```python +algod = get_algod() +inputs = [(x,) for x in range(101)] +dryrun_results = DryRunExecutor.dryrun_logicsig_on_sequence(algod, teal, inputs) +for i, dryrun_result in enumerate(dryrun_results): + args = inputs[i] + x = args[0] + assert dryrun_result.stack_top() == x ** 2 + assert dryrun_result.max_stack_height() == 2 + assert dryrun_result.status() == ("REJECT" if x == 0 else "PASS") + assert dryrun_result.final_scratch() == ({} if x == 0 else {0: x}) +``` + +#### Declarative Blackbox Dry Run Sequence Assertions + +**STEP 9**. The TEAL Blackbox Toolkit also allows for declarative style test writing. +Let's look at some sample assertions for our `lsig_square` TEAL program: + +```python + "lsig_square": { + "inputs": [(i,) for i in range(100)], + "assertions": { + DRProp.stackTop: lambda args: args[0] ** 2, + DRProp.maxStackHeight: 2, + DRProp.status: lambda i: "REJECT" if i[0] = 0 else "PASS", + DRProp.finalScratch: lambda args: ({} if args[0] else {0: args[0]}), + }, + }, +``` + +In the parlance of the TEAL Blackbox Toolkit, a set of such declarative assertions +is called a **test scenario**. Scenarios are dict's containing two keys `inputs` and `assertions` and follow [certain conventions](https://github.com/algorand/py-algorand-sdk/blob/3d3992ccc9b3758f28e68d2c00408d2e1363a3bb/algosdk/testing/teal_blackbox.py#L942). In particular: + +* **inputs** are lists of tuples, each tuple representing the `args` to be fed into a single dry run execution +* **assertions** are dicts that map [DryRunProperty](https://github.com/algorand/py-algorand-sdk/blob/3d3992ccc9b3758f28e68d2c00408d2e1363a3bb/algosdk/testing/teal_blackbox.py#L20)'s to actual assertions +* here is a [live example scenario](https://github.com/algorand/py-algorand-sdk/blob/c6e91b86acf545b66a94d27581d6cfa6318206fc/x/blackbox/blackbox_test.py#L442) for $`x^2`$ + +In English, letting $`x`$ be the input variable for our square function, the above **test scenario**: + +* provides a list of 100 tuples of the form $`(x)`$ that will serve as args. + * IE: $`(0), (1), (2), ... , (99)`$ +* establishes 4 different _sequence assertions_ as follows: + * the **stack's top** will contain $`x^2`$ + * the **max stack height** during execution is always 2 + * the executions' **status** is **PASS** except for the case $`x=0`$ + * the **final scratch** will have $`x`$ stored at slot `0` except for that strange $`x=0`$ case (recall the [0-val scratch slot artifact](#0val-artifact)) + +Declarative sequence assertions make use of the following: + +* `DryRunProperty` (aka `DRProp`): an enum that acts as a key in a scenario's assertions dict +* class `SequenceAssertion` + * its constructor takes in a predicate (there are [4 kinds of predicates](#predicate)) and returns a callable that is used for runtime assertions + * method `inputs_and_assertions()` validates a scenario and extracts out its assertions + * method `dryrun_assert()` evaluates the dry-run sequence using the constructed `SequenceAssertion` + +To employ the declarative test scenario above write the following: + +```python +algod = get_algod() + +scenario = { + "inputs": [(i,) for i in range(100)], + "assertions": { + DRProp.stackTop: lambda args: args[0] ** 2, + DRProp.maxStackHeight: 2, + DRProp.status: lambda i: "REJECT" if i[0] = 0 else "PASS", + DRProp.finalScratch: lambda args: ({} if args[0] else {0: args[0]}), + }, +}, +mode = ExecutionMode.Signature + +# Validate the scenario and dig out inputs/assertions: +inputs, assertions = SequenceAssertion.inputs_and_assertions(scenario, mode) + +# Execute the dry runs and obtain sequence of DryRunTransactionResults: +dryrun_results = Executor.dryrun_logicsig_on_sequence(algod, teal, inputs) + +# Sequence assertions: +for i, prop_n_predicate in enumerate(assertions.items()): + property, predicate = prop_n_predicate + assertion = SequenceAssertion(predicate) + assertion.dryrun_assert(inputs, dryrun_results, property) +``` + +**STEP 10**. _**Deep Dive into Sequence Assertion via Exercises**_ + +There are 4 kinds of Sequence Assertions _aka_ predicates + +1. _simple python types_ - these are useful in the case of _constant_ assertions. For example above, it was +asserted that `maxStackHeight` was _**ALWAYS**_ 2 by just using `2` in the declaration `DRProp.maxStackHeight: 2,` +2. _1-variable functions_ -these are useful when you have a python "simulator" for the assertable property. For example above it was asserted that `stackTop` was +$`x^2`$ by using a lambda expression for $`x^2`$ in the declaration `DRProp.stackTop: lambda args: args[0] ** 2,` +3. _dictionaries_ of type `Dict[Tuple, Any]` - these are useful when you want to assert a discrete set of input-output pairs. For example, if you have 4 inputs that you want to assert are being squared, you could use + +```python +DRProp.stackTop: { + (2,): 4, + (7,): 49, + (13,): 169, + (11,): 121 +}, +``` + +>Note that this case illustrates why `args` should be tuples intead of lists. In order to specify a map from args to expected, we need to make `args` a key +>in a dictionary: Python dictionary keys must be hashable and lists are **not hashable** while tuples _are_ hashable. + +4. _2-variable functions_ -these are useful when your assertion is more subtle than out-and-out equality. For example, suppose you want to assert that the `cost` of each run is _between_ $`2n \pm 5`$ where $`n`$ is the first arg of the input. Then you could declare `DRProp.cost: lambda args, actual: 2*args[0] - 5 <= actual <= 2*args[0] + 5` + +#### **EXERCISE A** +Convert each of the lambda expressions used above to dictionaries that assert the same thing. +#### **EXERCISE B** +Use 2-variable functions in order to _ignore_ the +weird $`x=0`$ cases above. + +#### _PARTIAL SOLUTIONS to EXERCISES_ + +**Exercise A Partial Solution**. For `DRProp.status`'s declaration you could define the `dict` using dictionary comprehension syntax as follows: + +```python +DRProp.status: {(x,): "PASS" if x else "REJECT" for x in range(100)}, +``` + +**Exercise B Partial Solution**. For `DRProp.status`'s declaration you could ignore the case $`x=0`$ as follows: + +```python +DRProp.status: lambda args, actual: "PASS" == actual if args[0] else True, +``` + +## Slow and Bad Fibonacci - Another Example Report + +[This](https://docs.google.com/spreadsheets/d/1ax-jQdYCkKT61Z6SPeGm5BqAMybgkWJa-Dv0yVjgFSA/edit?usp=sharing) is an example of `app_slow_fibonacci.teal`'s Dryrun stats: +image +A few items to take note of: + +* $`n`$ is given by **Arg_00** +* the app was **REJECT**ed for $`n = 0`$ because `fibonacci(0) == 0` is left at the top of the stack +* the app was **REJECT**ed for $`n > 7`$ because of exceeding budget +* the app **errored** only for $`n > 16`$ because of exceeding _dynamic_ budget +* the **cost** is growing exponentially (poor algorithm design) +* the **top of stack** contains `fibonacci(n)` except in the error case +* the **final_log** contains `hex(fibonacci(n))` except in the error and reject cases +* **max stack height** is $`2n`$ except for $`n=0`$ and the error case +* you can see the final values of scratch slots **s@000** and **s@001** which are respectively $`n`$ and `fibonacci(n)` + +You can see how [sequence assertions can be made](https://github.com/algorand/py-algorand-sdk/blob/77addfc236e78e41e2fd761fd59b506d8d344346/x/blackbox/blackbox_test.py#L324) on this function. -verify your TEAL program by experiment and observation diff --git a/blackbox/__init__.py b/blackbox/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/clients.py b/tests/clients.py new file mode 100644 index 00000000..ccbaee38 --- /dev/null +++ b/tests/clients.py @@ -0,0 +1,24 @@ +from algosdk.v2client.algod import AlgodClient +from algosdk.kmd import KMDClient +from algosdk.v2client.indexer import IndexerClient + +DEVNET_TOKEN = "a" * 64 +ALGOD_PORT = 60000 +KMD_PORT = 60001 +INDEXER_PORTS = range(59_996, 60_000) + + +def get_algod() -> AlgodClient: + return AlgodClient(DEVNET_TOKEN, f"http://localhost:{ALGOD_PORT}") + + +def get_kmd() -> KMDClient: + return KMDClient(DEVNET_TOKEN, f"http://localhost:{KMD_PORT}") + + +def get_indexer(port: int) -> IndexerClient: + assert ( + port in INDEXER_PORTS + ), f"port for available indexers must be in {INDEXER_PORTS} but was provided {port}" + + return IndexerClient(DEVNET_TOKEN, f"http://localhost:{port}") diff --git a/tests/integration_test.py b/tests/integration_test.py new file mode 100644 index 00000000..5a62d86a --- /dev/null +++ b/tests/integration_test.py @@ -0,0 +1,607 @@ +from pathlib import Path + +import pytest + +from algosdk.testing.dryrun import Helper as DryRunHelper + +# from algosdk.testing.teal_blackbox import ( +# DryRunEncoder as Encoder, +# DryRunExecutor as Executor, +# DryRunProperty as DRProp, +# DryRunTransactionResult as DRR, +# ExecutionMode, +# SequenceAssertion, +# ) + +from tests.clients import get_algod + + +def test_algod(): + assert get_algod().status(), "somehow got nothing out of Algod's status" + + +# def fac_with_overflow(n): +# if n < 2: +# return 1 +# if n > 20: +# return 2432902008176640000 +# return n * fac_with_overflow(n - 1) + + +# def fib(n): +# a, b = 0, 1 +# for _ in range(n): +# a, b = b, a + b +# return a + + +# def fib_cost(args): +# cost = 17 +# for n in range(1, args[0] + 1): +# cost += 31 * fib(n - 1) +# return cost + + +# def test_singleton_assertions(): +# algod = get_algod() +# algod_status = algod.status() +# assert algod_status + +# teal_fmt = """#pragma version 6 +# {} 0 +# btoi +# callsub square_0 +# {} +# return + +# // square +# square_0: +# store 0 +# load 0 +# pushint 2 // 2 +# exp +# retsub""" + +# teal_app, teal_lsig = list( +# map(lambda s: teal_fmt.format(s, ""), ["txna ApplicationArgs", "arg"]) +# ) + +# teal_app_log, bad_teal_lsig = list( +# map( +# lambda s: teal_fmt.format( +# s, +# """store 1 +# load 1 +# itob +# log +# load 1""", +# ), +# ["txna ApplicationArgs", "arg"], +# ) +# ) + +# x = 9 +# args = [x] + +# app_res, app_log_res = list( +# map( +# lambda teal: Executor.dryrun_app(algod, teal, args), +# [teal_app, teal_app_log], +# ) +# ) +# lsig_res, bad_lsig_res = list( +# map( +# lambda teal: Executor.dryrun_logicsig(algod, teal, args), +# [teal_lsig, bad_teal_lsig], +# ) +# ) + +# assert isinstance(app_res, DRR) +# assert isinstance(app_log_res, DRR) +# assert isinstance(lsig_res, DRR) +# assert isinstance(bad_lsig_res, DRR) + +# assert app_res.mode == ExecutionMode.Application +# assert app_log_res.mode == ExecutionMode.Application +# assert lsig_res.mode == ExecutionMode.Signature +# assert bad_lsig_res.mode == ExecutionMode.Signature + +# def prop_assert(dr_resp, actual, expected): +# assert expected == actual, dr_resp.report( +# args, f"expected {expected} but got {actual}" +# ) + +# prop_assert(app_res, app_res.cost(), 9) +# prop_assert(app_log_res, app_log_res.cost(), 14) +# prop_assert(lsig_res, lsig_res.cost(), None) + +# prop_assert(app_res, app_res.last_log(), None) +# prop_assert( +# app_log_res, app_log_res.last_log(), (x ** 2).to_bytes(8, "big").hex() +# ) +# prop_assert(app_log_res, app_log_res.last_log(), Encoder.hex(x ** 2)) +# prop_assert(lsig_res, lsig_res.last_log(), None) + +# prop_assert(app_res, app_res.final_scratch(), {0: x}) +# prop_assert(app_log_res, app_log_res.final_scratch(), {0: x, 1: x ** 2}) +# prop_assert(lsig_res, lsig_res.final_scratch(), {0: x}) +# prop_assert(bad_lsig_res, bad_lsig_res.final_scratch(), {0: x, 1: x ** 2}) + +# prop_assert(app_res, app_res.stack_top(), x ** 2) +# prop_assert(app_log_res, app_log_res.stack_top(), x ** 2) +# prop_assert(lsig_res, lsig_res.stack_top(), x ** 2) +# prop_assert(bad_lsig_res, bad_lsig_res.stack_top(), Encoder.hex0x(x ** 2)) + +# prop_assert(app_res, app_res.max_stack_height(), 2) +# prop_assert(app_log_res, app_log_res.max_stack_height(), 2) +# prop_assert(lsig_res, lsig_res.max_stack_height(), 2) +# prop_assert(bad_lsig_res, bad_lsig_res.max_stack_height(), 2) + +# prop_assert(app_res, app_res.status(), "PASS") +# prop_assert(app_log_res, app_log_res.status(), "PASS") +# prop_assert(lsig_res, lsig_res.status(), "PASS") +# prop_assert(bad_lsig_res, bad_lsig_res.status(), "REJECT") + +# prop_assert(app_res, app_res.passed(), True) +# prop_assert(app_log_res, app_log_res.passed(), True) +# prop_assert(lsig_res, lsig_res.passed(), True) +# prop_assert(bad_lsig_res, bad_lsig_res.passed(), False) + +# prop_assert(app_res, app_res.rejected(), False) +# prop_assert(app_log_res, app_log_res.rejected(), False) +# prop_assert(lsig_res, lsig_res.rejected(), False) +# prop_assert(bad_lsig_res, bad_lsig_res.rejected(), True) + +# prop_assert(app_res, app_res.error(), False) +# prop_assert(app_log_res, app_log_res.error(), False) +# prop_assert(lsig_res, lsig_res.error(), False) +# prop_assert(bad_lsig_res, bad_lsig_res.error(), True) +# assert bad_lsig_res.error( +# contains="logic 0 failed at line 7: log not allowed in current mode" +# ) +# prop_assert( +# bad_lsig_res, bad_lsig_res.error(contains="log not allowed"), True +# ) +# prop_assert( +# bad_lsig_res, bad_lsig_res.error(contains="WRONG PATTERN"), False +# ) + +# prop_assert(app_res, app_res.error_message(), None) +# prop_assert(app_log_res, app_log_res.error_message(), None) +# prop_assert(lsig_res, lsig_res.error_message(), None) +# assert ( +# "logic 0 failed at line 7: log not allowed in current mode" +# in bad_lsig_res.error_message() +# ) + + +# APP_SCENARIOS = { +# "app_exp": { +# "inputs": [()], +# # since only a single input, just assert a constant in each case +# "assertions": { +# DRProp.cost: 11, +# DRProp.lastLog: Encoder.hex(2 ** 10), +# # dicts have a special meaning as assertions. So in the case of "finalScratch" +# # which is supposed to _ALSO_ output a dict, we need to use a lambda as a work-around +# DRProp.finalScratch: lambda _: {0: 2 ** 10}, +# DRProp.stackTop: 2 ** 10, +# DRProp.maxStackHeight: 2, +# DRProp.status: "PASS", +# DRProp.passed: True, +# DRProp.rejected: False, +# DRProp.errorMessage: None, +# }, +# }, +# "app_square_byref": { +# "inputs": [(i,) for i in range(100)], +# "assertions": { +# DRProp.cost: lambda _, actual: 20 < actual < 22, +# DRProp.lastLog: Encoder.hex(1337), +# # due to dry-run artifact of not reporting 0-valued scratchvars, +# # we have a special case for n=0: +# DRProp.finalScratch: lambda args, actual: ( +# {2, 1337, (args[0] ** 2 if args[0] else 2)} +# ).issubset(set(actual.values())), +# DRProp.stackTop: 1337, +# DRProp.maxStackHeight: 3, +# DRProp.status: "PASS", +# DRProp.passed: True, +# DRProp.rejected: False, +# DRProp.errorMessage: None, +# }, +# }, +# "app_square": { +# "inputs": [(i,) for i in range(100)], +# "assertions": { +# DRProp.cost: 14, +# DRProp.lastLog: { +# # since execution REJECTS for 0, expect last log for this case to be None +# (i,): Encoder.hex(i * i) if i else None +# for i in range(100) +# }, +# DRProp.finalScratch: lambda args: ( +# {0: args[0], 1: args[0] ** 2} if args[0] else {} +# ), +# DRProp.stackTop: lambda args: args[0] ** 2, +# DRProp.maxStackHeight: 2, +# DRProp.status: lambda i: "PASS" if i[0] > 0 else "REJECT", +# DRProp.passed: lambda i: i[0] > 0, +# DRProp.rejected: lambda i: i[0] == 0, +# DRProp.errorMessage: None, +# }, +# }, +# "app_swap": { +# "inputs": [(1, 2), (1, "two"), ("one", 2), ("one", "two")], +# "assertions": { +# DRProp.cost: 27, +# DRProp.lastLog: Encoder.hex(1337), +# DRProp.finalScratch: lambda args: { +# 0: 4, +# 1: 5, +# 2: Encoder.hex0x(args[0]), +# 3: 1337, +# 4: Encoder.hex0x(args[1]), +# 5: Encoder.hex0x(args[0]), +# }, +# DRProp.stackTop: 1337, +# DRProp.maxStackHeight: 2, +# DRProp.status: "PASS", +# DRProp.passed: True, +# DRProp.rejected: False, +# DRProp.errorMessage: None, +# }, +# }, +# "app_string_mult": { +# "inputs": [("xyzw", i) for i in range(100)], +# "assertions": { +# DRProp.cost: lambda args: 30 + 15 * args[1], +# DRProp.lastLog: ( +# lambda args: Encoder.hex(args[0] * args[1]) +# if args[1] +# else None +# ), +# # due to dryrun 0-scratchvar artifact, special case for i == 0: +# DRProp.finalScratch: lambda args: ( +# { +# 0: 5, +# 1: args[1], +# 2: args[1] + 1, +# 3: Encoder.hex0x(args[0]), +# 4: Encoder.hex0x(args[0] * args[1]), +# 5: Encoder.hex0x(args[0] * args[1]), +# } +# if args[1] +# else { +# 0: 5, +# 2: args[1] + 1, +# 3: Encoder.hex0x(args[0]), +# } +# ), +# DRProp.stackTop: lambda args: len(args[0] * args[1]), +# DRProp.maxStackHeight: lambda args: 3 if args[1] else 2, +# DRProp.status: lambda args: ( +# "PASS" if 0 < args[1] < 45 else "REJECT" +# ), +# DRProp.passed: lambda args: 0 < args[1] < 45, +# DRProp.rejected: lambda args: 0 >= args[1] or args[1] >= 45, +# DRProp.errorMessage: None, +# }, +# }, +# "app_oldfac": { +# "inputs": [(i,) for i in range(25)], +# "assertions": { +# DRProp.cost: lambda args, actual: ( +# actual - 40 <= 17 * args[0] <= actual + 40 +# ), +# DRProp.lastLog: lambda args: ( +# Encoder.hex(fac_with_overflow(args[0])) +# if args[0] < 21 +# else None +# ), +# DRProp.finalScratch: lambda args: ( +# {0: args[0], 1: fac_with_overflow(args[0])} +# if 0 < args[0] < 21 +# else ( +# {0: min(21, args[0])} +# if args[0] +# else {1: fac_with_overflow(args[0])} +# ) +# ), +# DRProp.stackTop: lambda args: fac_with_overflow(args[0]), +# DRProp.maxStackHeight: lambda args: max(2, 2 * args[0]), +# DRProp.status: lambda args: "PASS" if args[0] < 21 else "REJECT", +# DRProp.passed: lambda args: args[0] < 21, +# DRProp.rejected: lambda args: args[0] >= 21, +# DRProp.errorMessage: lambda args, actual: ( +# actual is None if args[0] < 21 else "overflowed" in actual +# ), +# }, +# }, +# "app_slow_fibonacci": { +# "inputs": [(i,) for i in range(18)], +# "assertions": { +# DRProp.cost: lambda args: ( +# fib_cost(args) if args[0] < 17 else 70_000 +# ), +# DRProp.lastLog: lambda args: ( +# Encoder.hex(fib(args[0])) if 0 < args[0] < 17 else None +# ), +# DRProp.finalScratch: lambda args, actual: ( +# actual == {0: args[0], 1: fib(args[0])} +# if 0 < args[0] < 17 +# else (True if args[0] >= 17 else actual == {}) +# ), +# # we declare to "not care" about the top of the stack for n >= 17 +# DRProp.stackTop: lambda args, actual: ( +# actual == fib(args[0]) if args[0] < 17 else True +# ), +# # similarly, we don't care about max stack height for n >= 17 +# DRProp.maxStackHeight: lambda args, actual: ( +# actual == max(2, 2 * args[0]) if args[0] < 17 else True +# ), +# DRProp.status: lambda args: "PASS" +# if 0 < args[0] < 8 +# else "REJECT", +# DRProp.passed: lambda args: 0 < args[0] < 8, +# DRProp.rejected: lambda args: 0 >= args[0] or args[0] >= 8, +# DRProp.errorMessage: lambda args, actual: ( +# actual is None +# if args[0] < 17 +# else "dynamic cost budget exceeded" in actual +# ), +# }, +# }, +# } + + +# @pytest.mark.parametrize("filebase", APP_SCENARIOS.keys()) +# def test_app_with_report(filebase: str): +# mode, scenario = ExecutionMode.Application, APP_SCENARIOS[filebase] + +# # 0. Validate that the scenarios are well defined: +# inputs, assertions = SequenceAssertion.inputs_and_assertions( +# scenario, mode +# ) + +# algod = get_algod() + +# # 1. Read the TEAL from ./test/integration/teal/*.teal +# path = Path.cwd() / "test" / "integration" / "teal" +# case_name = filebase +# tealpath = path / f"{filebase}.teal" +# with open(tealpath, "r") as f: +# teal = f.read() + +# print( +# f"""Sandbox test and report {mode} for {case_name} from {tealpath}. TEAL is: +# ------- +# {teal} +# -------""" +# ) + +# # 2. Run the requests to obtain sequence of Dryrun responses: +# dryrun_results = Executor.dryrun_app_on_sequence(algod, teal, inputs) + +# # 3. Generate statistical report of all the runs: +# csvpath = path / f"{filebase}.csv" +# with open(csvpath, "w") as f: +# f.write(DRR.csv_report(inputs, dryrun_results)) + +# print(f"Saved Dry Run CSV report to {csvpath}") + +# # 4. Sequential assertions (if provided any) +# for i, type_n_assertion in enumerate(assertions.items()): +# assert_type, assertion = type_n_assertion + +# assert SequenceAssertion.mode_has_assertion( +# mode, assert_type +# ), f"assert_type {assert_type} is not applicable for {mode}. Please REMOVE or MODIFY" + +# assertion = SequenceAssertion( +# assertion, name=f"{case_name}[{i}]@{mode}-{assert_type}" +# ) +# print( +# f"{i+1}. Semantic assertion for {case_name}-{mode}: {assert_type} <<{assertion}>>" +# ) +# assertion.dryrun_assert(inputs, dryrun_results, assert_type) + + +# # NOTE: logic sig dry runs are missing some information when compared with app dry runs. +# # Therefore, certain assertions don't make sense for logic sigs explaining why some of the below are commented out: +# LOGICSIG_SCENARIOS = { +# "lsig_exp": { +# "inputs": [()], +# "assertions": { +# # DRA.cost: 11, +# # DRA.lastLog: lightly_encode_output(2 ** 10, logs=True), +# DRProp.finalScratch: lambda _: {}, +# DRProp.stackTop: 2 ** 10, +# DRProp.maxStackHeight: 2, +# DRProp.status: "PASS", +# DRProp.passed: True, +# DRProp.rejected: False, +# DRProp.errorMessage: None, +# }, +# }, +# "lsig_square_byref": { +# "inputs": [(i,) for i in range(100)], +# "assertions": { +# # DRA.cost: lambda _, actual: 20 < actual < 22, +# # DRA.lastLog: lightly_encode_output(1337, logs=True), +# # due to dry-run artifact of not reporting 0-valued scratchvars, +# # we have a special case for n=0: +# DRProp.finalScratch: lambda args: ( +# {0: args[0] ** 2} if args[0] else {} +# ), +# DRProp.stackTop: 1337, +# DRProp.maxStackHeight: 3, +# DRProp.status: "PASS", +# DRProp.passed: True, +# DRProp.rejected: False, +# DRProp.errorMessage: None, +# }, +# }, +# "lsig_square": { +# "inputs": [(i,) for i in range(100)], +# "assertions": { +# # DRA.cost: 14, +# # DRA.lastLog: {(i,): lightly_encode_output(i * i, logs=True) if i else None for i in range(100)}, +# DRProp.finalScratch: lambda args: ( +# {0: args[0]} if args[0] else {} +# ), +# DRProp.stackTop: lambda args: args[0] ** 2, +# DRProp.maxStackHeight: 2, +# DRProp.status: lambda i: "PASS" if i[0] > 0 else "REJECT", +# DRProp.passed: lambda i: i[0] > 0, +# DRProp.rejected: lambda i: i[0] == 0, +# DRProp.errorMessage: None, +# }, +# }, +# "lsig_swap": { +# "inputs": [(1, 2), (1, "two"), ("one", 2), ("one", "two")], +# "assertions": { +# # DRA.cost: 27, +# # DRA.lastLog: lightly_encode_output(1337, logs=True), +# DRProp.finalScratch: lambda args: { +# 0: Encoder.hex0x(args[1]), +# 1: Encoder.hex0x(args[0]), +# 3: 1, +# 4: Encoder.hex0x(args[0]), +# }, +# DRProp.stackTop: 1337, +# DRProp.maxStackHeight: 2, +# DRProp.status: "PASS", +# DRProp.passed: True, +# DRProp.rejected: False, +# DRProp.errorMessage: None, +# }, +# }, +# "lsig_string_mult": { +# "inputs": [("xyzw", i) for i in range(100)], +# "assertions": { +# # DRA.cost: lambda args: 30 + 15 * args[1], +# # DRA.lastLog: lambda args: lightly_encode_output(args[0] * args[1]) if args[1] else None, +# DRProp.finalScratch: lambda args: ( +# { +# 0: Encoder.hex0x(args[0] * args[1]), +# 2: args[1], +# 3: args[1] + 1, +# 4: Encoder.hex0x(args[0]), +# } +# if args[1] +# else { +# 3: args[1] + 1, +# 4: Encoder.hex0x(args[0]), +# } +# ), +# DRProp.stackTop: lambda args: len(args[0] * args[1]), +# DRProp.maxStackHeight: lambda args: 3 if args[1] else 2, +# DRProp.status: lambda args: "PASS" if args[1] else "REJECT", +# DRProp.passed: lambda args: bool(args[1]), +# DRProp.rejected: lambda args: not bool(args[1]), +# DRProp.errorMessage: None, +# }, +# }, +# "lsig_oldfac": { +# "inputs": [(i,) for i in range(25)], +# "assertions": { +# # DRA.cost: lambda args, actual: actual - 40 <= 17 * args[0] <= actual + 40, +# # DRA.lastLog: lambda args, actual: (actual is None) or (int(actual, base=16) == fac_with_overflow(args[0])), +# DRProp.finalScratch: lambda args: ( +# {0: min(args[0], 21)} if args[0] else {} +# ), +# DRProp.stackTop: lambda args: fac_with_overflow(args[0]), +# DRProp.maxStackHeight: lambda args: max(2, 2 * args[0]), +# DRProp.status: lambda args: "PASS" if args[0] < 21 else "REJECT", +# DRProp.passed: lambda args: args[0] < 21, +# DRProp.rejected: lambda args: args[0] >= 21, +# DRProp.errorMessage: lambda args, actual: ( +# actual is None +# if args[0] < 21 +# else "logic 0 failed at line 21: * overflowed" in actual +# ), +# }, +# }, +# "lsig_slow_fibonacci": { +# "inputs": [(i,) for i in range(18)], +# "assertions": { +# # DRA.cost: fib_cost, +# # DRA.lastLog: fib_last_log, +# # by returning True for n >= 15, we're declaring that we don't care about the scratchvar's for such cases: +# DRProp.finalScratch: lambda args, actual: ( +# actual == {0: args[0]} +# if 0 < args[0] < 15 +# else (True if args[0] else actual == {}) +# ), +# DRProp.stackTop: lambda args, actual: ( +# actual == fib(args[0]) if args[0] < 15 else True +# ), +# DRProp.maxStackHeight: lambda args, actual: ( +# actual == max(2, 2 * args[0]) if args[0] < 15 else True +# ), +# DRProp.status: lambda args: "PASS" +# if 0 < args[0] < 15 +# else "REJECT", +# DRProp.passed: lambda args: 0 < args[0] < 15, +# DRProp.rejected: lambda args: not (0 < args[0] < 15), +# DRProp.errorMessage: lambda args, actual: ( +# actual is None +# if args[0] < 15 +# else "dynamic cost budget exceeded" in actual +# ), +# }, +# }, +# } + + +# @pytest.mark.parametrize("filebase", LOGICSIG_SCENARIOS.keys()) +# def test_logicsig_with_report(filebase: str): +# mode, scenario = ExecutionMode.Signature, LOGICSIG_SCENARIOS[filebase] + +# # 0. Validate that the scenarios are well defined: +# inputs, assertions = SequenceAssertion.inputs_and_assertions( +# scenario, mode +# ) + +# algod = get_algod() + +# # 1. Read the TEAL from ./test/integration/teal/*.teal +# path = Path.cwd() / "test" / "integration" / "teal" +# case_name = filebase +# tealpath = path / f"{filebase}.teal" +# with open(tealpath, "r") as f: +# teal = f.read() + +# print( +# f"""Sandbox test and report {mode} for {case_name} from {tealpath}. TEAL is: +# ------- +# {teal} +# -------""" +# ) + +# # 2. Run the requests to obtain sequence of Dryrun resonses: +# dryrun_results = Executor.dryrun_logicsig_on_sequence(algod, teal, inputs) + +# # 3. Generate statistical report of all the runs: +# csvpath = path / f"{filebase}.csv" +# with open(csvpath, "w") as f: +# f.write(DRR.csv_report(inputs, dryrun_results)) + +# print(f"Saved Dry Run CSV report to {csvpath}") + +# # 4. Sequential assertions (if provided any) +# for i, type_n_assertion in enumerate(assertions.items()): +# assert_type, assertion = type_n_assertion + +# assert SequenceAssertion.mode_has_assertion( +# mode, assert_type +# ), f"assert_type {assert_type} is not applicable for {mode}. Please REMOVE of MODIFY" + +# assertion = SequenceAssertion( +# assertion, name=f"{case_name}[{i}]@{mode}-{assert_type}" +# ) +# print( +# f"{i+1}. Semantic assertion for {case_name}-{mode}: {assert_type} <<{assertion}>>" +# ) +# assertion.dryrun_assert(inputs, dryrun_results, assert_type) diff --git a/tests/blackbox_test.py b/tests/unit_test.py similarity index 100% rename from tests/blackbox_test.py rename to tests/unit_test.py From 6d214b7685af6a8815d934121061442b943fdf7e Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Thu, 24 Mar 2022 22:13:26 -0500 Subject: [PATCH 06/85] compromise on py 3.8 --- setup.py | 2 +- tests/clients.py | 23 ++++++++++++----------- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/setup.py b/setup.py index e4c636bf..fbd03bce 100644 --- a/setup.py +++ b/setup.py @@ -21,5 +21,5 @@ "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], - python_requires=">=3.10", + python_requires=">=3.8", ) diff --git a/tests/clients.py b/tests/clients.py index ccbaee38..f27e1805 100644 --- a/tests/clients.py +++ b/tests/clients.py @@ -1,24 +1,25 @@ from algosdk.v2client.algod import AlgodClient -from algosdk.kmd import KMDClient -from algosdk.v2client.indexer import IndexerClient + +# from algosdk.kmd import KMDClient +# from algosdk.v2client.indexer import IndexerClient DEVNET_TOKEN = "a" * 64 ALGOD_PORT = 60000 -KMD_PORT = 60001 -INDEXER_PORTS = range(59_996, 60_000) +# KMD_PORT = 60001 +# INDEXER_PORTS = range(59_996, 60_000) def get_algod() -> AlgodClient: return AlgodClient(DEVNET_TOKEN, f"http://localhost:{ALGOD_PORT}") -def get_kmd() -> KMDClient: - return KMDClient(DEVNET_TOKEN, f"http://localhost:{KMD_PORT}") +# def get_kmd() -> KMDClient: +# return KMDClient(DEVNET_TOKEN, f"http://localhost:{KMD_PORT}") -def get_indexer(port: int) -> IndexerClient: - assert ( - port in INDEXER_PORTS - ), f"port for available indexers must be in {INDEXER_PORTS} but was provided {port}" +# def get_indexer(port: int) -> IndexerClient: +# assert ( +# port in INDEXER_PORTS +# ), f"port for available indexers must be in {INDEXER_PORTS} but was provided {port}" - return IndexerClient(DEVNET_TOKEN, f"http://localhost:{port}") +# return IndexerClient(DEVNET_TOKEN, f"http://localhost:{port}") From c36b8dd30de12b3edebc1cffeb1275213272591c Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Thu, 24 Mar 2022 22:13:37 -0500 Subject: [PATCH 07/85] pwd too --- Makefile | 1 + 1 file changed, 1 insertion(+) diff --git a/Makefile b/Makefile index 20b6e2f0..cfb5356c 100644 --- a/Makefile +++ b/Makefile @@ -18,6 +18,7 @@ build-and-test: blackbox: echo "hello blackbox!" + pwd ls ./sandbox test pytest tests/integration_test.py From fc5a59c1208583cabc4f59259125a12dc3815c0a Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Thu, 24 Mar 2022 22:17:16 -0500 Subject: [PATCH 08/85] teals moved to tests/ directory --- {blackbox => tests}/teal/app_exp.teal | 0 {blackbox => tests}/teal/app_oldfac.teal | 0 {blackbox => tests}/teal/app_slow_fibonacci.teal | 0 {blackbox => tests}/teal/app_square.teal | 0 {blackbox => tests}/teal/app_square_byref.teal | 0 {blackbox => tests}/teal/app_string_mult.teal | 0 {blackbox => tests}/teal/app_swap.teal | 0 {blackbox => tests}/teal/lsig_exp.teal | 0 {blackbox => tests}/teal/lsig_oldfac.teal | 0 {blackbox => tests}/teal/lsig_slow_fibonacci.teal | 0 {blackbox => tests}/teal/lsig_square.teal | 0 {blackbox => tests}/teal/lsig_square_byref.teal | 0 {blackbox => tests}/teal/lsig_string_mult.teal | 0 {blackbox => tests}/teal/lsig_swap.teal | 0 14 files changed, 0 insertions(+), 0 deletions(-) rename {blackbox => tests}/teal/app_exp.teal (100%) rename {blackbox => tests}/teal/app_oldfac.teal (100%) rename {blackbox => tests}/teal/app_slow_fibonacci.teal (100%) rename {blackbox => tests}/teal/app_square.teal (100%) rename {blackbox => tests}/teal/app_square_byref.teal (100%) rename {blackbox => tests}/teal/app_string_mult.teal (100%) rename {blackbox => tests}/teal/app_swap.teal (100%) rename {blackbox => tests}/teal/lsig_exp.teal (100%) rename {blackbox => tests}/teal/lsig_oldfac.teal (100%) rename {blackbox => tests}/teal/lsig_slow_fibonacci.teal (100%) rename {blackbox => tests}/teal/lsig_square.teal (100%) rename {blackbox => tests}/teal/lsig_square_byref.teal (100%) rename {blackbox => tests}/teal/lsig_string_mult.teal (100%) rename {blackbox => tests}/teal/lsig_swap.teal (100%) diff --git a/blackbox/teal/app_exp.teal b/tests/teal/app_exp.teal similarity index 100% rename from blackbox/teal/app_exp.teal rename to tests/teal/app_exp.teal diff --git a/blackbox/teal/app_oldfac.teal b/tests/teal/app_oldfac.teal similarity index 100% rename from blackbox/teal/app_oldfac.teal rename to tests/teal/app_oldfac.teal diff --git a/blackbox/teal/app_slow_fibonacci.teal b/tests/teal/app_slow_fibonacci.teal similarity index 100% rename from blackbox/teal/app_slow_fibonacci.teal rename to tests/teal/app_slow_fibonacci.teal diff --git a/blackbox/teal/app_square.teal b/tests/teal/app_square.teal similarity index 100% rename from blackbox/teal/app_square.teal rename to tests/teal/app_square.teal diff --git a/blackbox/teal/app_square_byref.teal b/tests/teal/app_square_byref.teal similarity index 100% rename from blackbox/teal/app_square_byref.teal rename to tests/teal/app_square_byref.teal diff --git a/blackbox/teal/app_string_mult.teal b/tests/teal/app_string_mult.teal similarity index 100% rename from blackbox/teal/app_string_mult.teal rename to tests/teal/app_string_mult.teal diff --git a/blackbox/teal/app_swap.teal b/tests/teal/app_swap.teal similarity index 100% rename from blackbox/teal/app_swap.teal rename to tests/teal/app_swap.teal diff --git a/blackbox/teal/lsig_exp.teal b/tests/teal/lsig_exp.teal similarity index 100% rename from blackbox/teal/lsig_exp.teal rename to tests/teal/lsig_exp.teal diff --git a/blackbox/teal/lsig_oldfac.teal b/tests/teal/lsig_oldfac.teal similarity index 100% rename from blackbox/teal/lsig_oldfac.teal rename to tests/teal/lsig_oldfac.teal diff --git a/blackbox/teal/lsig_slow_fibonacci.teal b/tests/teal/lsig_slow_fibonacci.teal similarity index 100% rename from blackbox/teal/lsig_slow_fibonacci.teal rename to tests/teal/lsig_slow_fibonacci.teal diff --git a/blackbox/teal/lsig_square.teal b/tests/teal/lsig_square.teal similarity index 100% rename from blackbox/teal/lsig_square.teal rename to tests/teal/lsig_square.teal diff --git a/blackbox/teal/lsig_square_byref.teal b/tests/teal/lsig_square_byref.teal similarity index 100% rename from blackbox/teal/lsig_square_byref.teal rename to tests/teal/lsig_square_byref.teal diff --git a/blackbox/teal/lsig_string_mult.teal b/tests/teal/lsig_string_mult.teal similarity index 100% rename from blackbox/teal/lsig_string_mult.teal rename to tests/teal/lsig_string_mult.teal diff --git a/blackbox/teal/lsig_swap.teal b/tests/teal/lsig_swap.teal similarity index 100% rename from blackbox/teal/lsig_swap.teal rename to tests/teal/lsig_swap.teal From cfe8d089f8d7e525d79ecd212f20f79707dcde60 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Thu, 24 Mar 2022 22:29:08 -0500 Subject: [PATCH 09/85] markdownlint --- README.md | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 9d9a779a..ce65696b 100644 --- a/README.md +++ b/README.md @@ -1,3 +1,5 @@ + + # TEAL Blackbox Toolkit: Program Reporting and Testing via Dry Runs **NOTE: to get math formulas to render here using Chrome, add the [xhub extension](https://chrome.google.com/webstore/detail/xhub/anidddebgkllnnnnjfkmjcaallemhjee/related) and reload** @@ -53,6 +55,7 @@ Even better, before making fine-grained assertions you'd like to get a sense of * create invariants for the entire run-sequence and assert that the invariants hold (see "**Advanced: Asserting Invariants on a Dry Run Sequence**" section below) > Becoming a TEAL Blackbox Toolkit Ninja involves 10 steps as described below + ### Dry Run Environment Setup **STEP 1**. Start with a running local node and make note of Algod's port number (for our [standard sandbox](https://github.com/algorand/sandbox) this is `4001`) @@ -139,6 +142,7 @@ assert expected == actual, dryrun_result.report(args, f"expected {expected} but ``` If we run the test we'll get the following printout (this is for pytest, but other testing frameworks should be similar): + ```sh E AssertionError: =============== E <<<<<<<<<<>>>>>>>>>>>> @@ -203,7 +207,7 @@ executions, and conjecture some program invariants. To aid in the investigation * columns represent _assertable properties_ of dry-runs, and * rows represents dry-run executions for specific inputs -**STEP 6**. Back to our $`x^2`$ example, here's how to generate a report with 1 row for each of the inputs `0, 1, ... , 15`: +**STEP 6**. Back to our $`x^2`$ example, here's how to generate a report with 1 row for each of the inputs `0, 1, ... , 15`: ```python algod = get_algod() @@ -218,7 +222,7 @@ At this point, you'll be able to look at your [dry run sequence results](https:/ image -Perusing the above, it looks right: +Perusing the above, it looks right: * column `D` **Arg 00** has the input $`x`$ (it's the argument at index 0) * column `A` contains the **Run** number @@ -237,7 +241,7 @@ Perusing the above, it looks right: ### Advanced: Asserting Invariants on a Dry Run Sequence The final and most advanced topic of this Howto is to turn _program invariant conjectures_ into -**sequence assertions**. That is, let's take the information we gleaned in our EDRA CSV report, +**sequence assertions**. That is, let's take the information we gleaned in our EDRA CSV report, and create an integration test out of it. There are two ways to achieve this goal: * Procedural sequence assertions @@ -245,7 +249,7 @@ and create an integration test out of it. There are two ways to achieve this goa #### Procedural Blackbox Dry Run Sequence Assertions -**STEP 8**. The procedural approach takes the _program invariant conjectures_ and simply asserts them +**STEP 8**. The procedural approach takes the _program invariant conjectures_ and simply asserts them inside of a for loop that iterates over the inputs and dry runs. One can call each dry run execution independently, or use `DryRunExecutor`'s convenience methods `dryrun_app_on_sequence()` and `dryrun_logicsig_on_sequence()`. For example, let's assert that the above invariants hold for all @@ -266,7 +270,7 @@ for i, dryrun_result in enumerate(dryrun_results): #### Declarative Blackbox Dry Run Sequence Assertions -**STEP 9**. The TEAL Blackbox Toolkit also allows for declarative style test writing. +**STEP 9**. The TEAL Blackbox Toolkit also allows for declarative style test writing. Let's look at some sample assertions for our `lsig_square` TEAL program: ```python @@ -357,11 +361,16 @@ DRProp.stackTop: { >Note that this case illustrates why `args` should be tuples intead of lists. In order to specify a map from args to expected, we need to make `args` a key >in a dictionary: Python dictionary keys must be hashable and lists are **not hashable** while tuples _are_ hashable. + + 4. _2-variable functions_ -these are useful when your assertion is more subtle than out-and-out equality. For example, suppose you want to assert that the `cost` of each run is _between_ $`2n \pm 5`$ where $`n`$ is the first arg of the input. Then you could declare `DRProp.cost: lambda args, actual: 2*args[0] - 5 <= actual <= 2*args[0] + 5` #### **EXERCISE A** + Convert each of the lambda expressions used above to dictionaries that assert the same thing. + #### **EXERCISE B** + Use 2-variable functions in order to _ignore_ the weird $`x=0`$ cases above. @@ -396,4 +405,3 @@ A few items to take note of: * you can see the final values of scratch slots **s@000** and **s@001** which are respectively $`n`$ and `fibonacci(n)` You can see how [sequence assertions can be made](https://github.com/algorand/py-algorand-sdk/blob/77addfc236e78e41e2fd761fd59b506d8d344346/x/blackbox/blackbox_test.py#L324) on this function. - From b036daae02a40f829bb8e946765a84785d0a72e0 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Thu, 24 Mar 2022 22:29:23 -0500 Subject: [PATCH 10/85] tabulate --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index fbd03bce..b47860e3 100644 --- a/setup.py +++ b/setup.py @@ -15,7 +15,7 @@ long_description_content_type="text/markdown", url="https://github.com/algorand/graviton", packages=setuptools.find_packages(), - install_requires=["py-algorand-sdk"], + install_requires=["py-algorand-sdk", "tabulate==0.8.9"], classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", From 5a4dfcb33faccd75afc99db63aed3843ccd220ba Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Thu, 24 Mar 2022 22:38:33 -0500 Subject: [PATCH 11/85] don't try to publish inside the docker --- .github/workflows/build.yml | 8 +- Makefile | 7 +- blackbox/blackbox.py | 973 ++++++++++++++++++++++++++++++++++++ blackbox/dryrun.py | 947 +++++++++++++++++++++++++++++++++++ 4 files changed, 1929 insertions(+), 6 deletions(-) create mode 100644 blackbox/blackbox.py create mode 100644 blackbox/dryrun.py diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 54b3e85b..4ae90105 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -25,9 +25,9 @@ jobs: with: fetch-depth: 0 - name: Install pip dependencies - run: make pip - - name: Build and Test - run: make build-and-test + run: make pip-publish + - name: build-and-test + run: make unit-test run-integration-tests: runs-on: ubuntu-20.04 steps: @@ -63,6 +63,6 @@ jobs: with: config: ${{ env.CONFIG }} - name: Setup integration test environment - run: make pip build-and-test + run: make pip-test build-and-test - name: Run integration tests run: make blackbox \ No newline at end of file diff --git a/Makefile b/Makefile index cfb5356c..1b8f4888 100644 --- a/Makefile +++ b/Makefile @@ -8,12 +8,15 @@ env-down: # Build -pip: +pip-publish: pip install -r requirements.txt pip install -e . +pip-test: + pip install -r requirements.txt + pip install . -build-and-test: +unit-test: pytest tests/unit_test.py blackbox: diff --git a/blackbox/blackbox.py b/blackbox/blackbox.py new file mode 100644 index 00000000..0c29f61a --- /dev/null +++ b/blackbox/blackbox.py @@ -0,0 +1,973 @@ +from base64 import b64decode +import csv +from dataclasses import dataclass +from enum import Enum, auto +import io +from inspect import signature +from tabulate import tabulate +from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union + +from algosdk.v2client.algod import AlgodClient +from algosdk.testing.dryrun import ( + ZERO_ADDRESS, + assert_error, + assert_no_error, + Helper as DryRunHelper, +) + + +class ExecutionMode(Enum): + Signature = auto() + Application = auto() + + +class DryRunProperty(Enum): + cost = auto() + lastLog = auto() + finalScratch = auto() + stackTop = auto() + maxStackHeight = auto() + status = auto() + rejected = auto() + passed = auto() + error = auto() + errorMessage = auto() + globalStateHas = auto() + localStateHas = auto() + + +DRProp = DryRunProperty + + +@dataclass +class TealVal: + i: int = 0 + b: str = "" + is_b: bool = None + hide_empty: bool = True + + @classmethod + def from_stack(cls, d: dict) -> "TealVal": + return TealVal(d["uint"], d["bytes"], d["type"] == 1, hide_empty=False) + + @classmethod + def from_scratch(cls, d: dict) -> "TealVal": + return TealVal(d["uint"], d["bytes"], len(d["bytes"]) > 0) + + def is_empty(self) -> bool: + return not (self.i or self.b) + + def __str__(self) -> str: + if self.hide_empty and self.is_empty(): + return "" + + assert self.is_b is not None, f"can't handle StackVariable with empty type" + return f"0x{b64decode(self.b).hex()}" if self.is_b else str(self.i) + + def as_python_type(self) -> Union[int, str, None]: + if self.is_b is None: + return None + return str(self) if self.is_b else self.i + + +@dataclass +class BlackboxResults: + steps_executed: int + program_counters: List[int] + teal_line_numbers: List[int] + teal_source_lines: List[str] + stack_evolution: List[list] + scratch_evolution: List[dict] + final_scratch_state: Dict[int, TealVal] + slots_used: List[int] + raw_stacks: List[list] + + @classmethod + def scrape( + cls, + trace, + lines, + scratch_colon: str = "->", + scratch_verbose: bool = False, + ) -> "BlackboxResults": + pcs = [t["pc"] for t in trace] + line_nums = [t["line"] for t in trace] + + def line_or_err(i, ln): + line = lines[ln - 1] + err = trace[i].get("error") + return err if err else line + + tls = [line_or_err(i, ln) for i, ln in enumerate(line_nums)] + N = len(pcs) + assert N == len(tls), f"mismatch of lengths in pcs v. tls ({N} v. {len(tls)})" + + # process stack var's + raw_stacks = [ + [TealVal.from_stack(s) for s in x] for x in [t["stack"] for t in trace] + ] + stacks = [f"[{', '.join(map(str,stack))}]" for stack in raw_stacks] + assert N == len( + stacks + ), f"mismatch of lengths in tls v. stacks ({N} v. {len(stacks)})" + + # process scratch var's + scratches = [ + [TealVal.from_scratch(s) for s in x] + for x in [t.get("scratch", []) for t in trace] + ] + scratches = [ + {i: s for i, s in enumerate(scratch) if not s.is_empty()} + for scratch in scratches + ] + slots_used = sorted(set().union(*(s.keys() for s in scratches))) + final_scratch_state = scratches[-1] + if not scratch_verbose: + + def compute_delta(prev, curr): + pks, cks = set(prev.keys()), set(curr.keys()) + new_keys = cks - pks + if new_keys: + return {k: curr[k] for k in new_keys} + return {k: v for k, v in curr.items() if prev[k] != v} + + scratch_deltas = [scratches[0]] + for i in range(1, len(scratches)): + scratch_deltas.append(compute_delta(scratches[i - 1], scratches[i])) + + scratches = [ + [f"{i}{scratch_colon}{v}" for i, v in scratch.items()] + for scratch in scratch_deltas + ] + else: + scratches = [ + [ + f"{i}{scratch_colon}{scratch[i]}" if i in scratch else "" + for i in slots_used + ] + for scratch in scratches + ] + + assert N == len( + scratches + ), f"mismatch of lengths in tls v. scratches ({N} v. {len(scratches)})" + + bbr = cls( + N, + pcs, + line_nums, + tls, + stacks, + scratches, + final_scratch_state, + slots_used, + raw_stacks, + ) + bbr.assert_well_defined() + return bbr + + def assert_well_defined(self): + assert all( + self.steps_executed == len(x) + for x in ( + self.program_counters, + self.teal_source_lines, + self.stack_evolution, + self.scratch_evolution, + ) + ), f"some mismatch in trace sizes: all expected to be {self.steps_executed}" + + def __str__(self) -> str: + return f"BlackBoxResult(steps_executed={self.steps_executed})" + + def steps(self) -> int: + return self.steps_executed + + def final_stack(self) -> str: + return self.stack_evolution[-1] + + def final_stack_top(self) -> Union[int, str, None]: + final_stack = self.raw_stacks[-1] + if not final_stack: + return None + top = final_stack[-1] + return str(top) if top.is_b else top.i + + def max_stack_height(self) -> int: + return max(len(s) for s in self.raw_stacks) + + def final_scratch( + self, with_formatting: bool = False + ) -> Dict[Union[int, str], Union[int, str]]: + unformatted = { + i: str(s) if s.is_b else s.i for i, s in self.final_scratch_state.items() + } + if not with_formatting: + return unformatted + return {f"s@{i:03}": s for i, s in unformatted.items()} + + def slots(self) -> List[int]: + return self.slots_used + + def final_as_row(self) -> Dict[str, Union[str, int]]: + return { + "steps": self.steps(), + " top_of_stack": self.final_stack_top(), + "max_stack_height": self.max_stack_height(), + **self.final_scratch(with_formatting=True), + } + + +class DryRunEncoder: + """Encoding utilities for dry run executions and results""" + + @classmethod + def encode_args(cls, args: Iterable[Union[str, int]]) -> List[str]: + """ + Encoding convention for Black Box Testing. + + * Assumes int's are uint64 and encodes them as such + * Leaves str's alone + """ + return [cls._encode_arg(a, i) for i, a in enumerate(args)] + + @classmethod + def hex0x(cls, x) -> str: + return f"0x{cls.hex(x)}" + + @classmethod + def hex(cls, out: Union[int, str]) -> str: + """ + Encoding convention for Black Box Testing. + + * Assumes int's are uint64 + * Assumes everything else is a str + * Encodes them into hex str's + """ + cls._assert_encodable(out) + return cls._to_bytes(out).hex() + + @classmethod + def _to_bytes(cls, x, only_ints=False): + is_int = isinstance(x, int) + if only_ints and not is_int: + return x + return x.to_bytes(8, "big") if is_int else bytes(x, "utf-8") + + @classmethod + def _encode_arg(cls, arg, idx): + cls._assert_encodable(arg, f"problem encoding arg ({arg}) at index ({idx})") + return cls._to_bytes(arg, only_ints=True) + + @classmethod + def _assert_encodable(cls, arg: Any, msg: str = "") -> None: + assert isinstance( + arg, (int, str) + ), f"{msg +': ' if msg else ''}can't handle arg [{arg}] of type {type(arg)}" + if isinstance(arg, int): + assert arg >= 0, f"can't handle negative arguments but was given {arg}" + + +class DryRunExecutor: + """Methods to package up and kick off dry run executions""" + + @classmethod + def dryrun_app( + cls, + algod: AlgodClient, + teal: str, + args: Iterable[Union[str, int]], + sender: str = ZERO_ADDRESS, + ) -> "DryRunTransactionResult": + return cls.execute_one_dryrun( + algod, teal, args, ExecutionMode.Application, sender=sender + ) + + @classmethod + def dryrun_logicsig( + cls, + algod: AlgodClient, + teal: str, + args: Iterable[Union[str, int]], + sender: str = ZERO_ADDRESS, + ) -> "DryRunTransactionResult": + return cls.execute_one_dryrun( + algod, teal, args, ExecutionMode.Signature, sender + ) + + @classmethod + def dryrun_app_on_sequence( + cls, + algod: AlgodClient, + teal: str, + inputs: List[Iterable[Union[str, int]]], + sender: str = ZERO_ADDRESS, + ) -> List["DryRunTransactionResult"]: + return cls._map(cls.dryrun_app, algod, teal, inputs, sender) + + @classmethod + def dryrun_logicsig_on_sequence( + cls, + algod: AlgodClient, + teal: str, + inputs: List[Iterable[Union[str, int]]], + sender: str = ZERO_ADDRESS, + ) -> List["DryRunTransactionResult"]: + return cls._map(cls.dryrun_logicsig, algod, teal, inputs, sender) + + @classmethod + def _map(cls, f, algod, teal, inps, sndr): + return list(map(lambda args: f(algod, teal, args, sender=sndr), inps)) + + @classmethod + def execute_one_dryrun( + cls, + algod: AlgodClient, + teal: str, + args: Iterable[Union[str, int]], + mode: ExecutionMode, + sender: str = ZERO_ADDRESS, + ) -> "DryRunTransactionResult": + assert ( + len(ExecutionMode) == 2 + ), f"assuming only 2 ExecutionMode's but have {len(ExecutionMode)}" + assert mode in ExecutionMode, f"unknown mode {mode} of type {type(mode)}" + is_app = mode == ExecutionMode.Application + + args = DryRunEncoder.encode_args(args) + builder = ( + DryRunHelper.singleton_app_request + if is_app + else DryRunHelper.singleton_logicsig_request + ) + dryrun_req = builder(teal, args, sender=sender) + dryrun_resp = algod.dryrun(dryrun_req) + return DryRunTransactionResult.from_single_response(dryrun_resp) + + +class DryRunTransactionResult: + """Methods to extract information from a single dry run transaction. + TODO: merge this with @barnjamin's similarly named class of PR #283 + + The class contains convenience methods and properties for inspecting + dry run execution results on a _single transaction_ and for making + assertions in tests. + + For example, let's execute a dry run for a logic sig teal program that purportedly computes $`x^2`$ + (see [lsig_square.teal](../../x/blackbox/teal/lsig_square.teal) for one such example). + So assume you have a string `teal` containing that TEAL source and run the following: + + ```python + >>> algod = get_algod() + >>> x = 9 + >>> args = (x,) + >>> dryrun_result = DryRunExecutor.dryrun_logicsig(algod, teal, args) + >>> assert dryrun_result.status() == "PASS" + >>> assert dryrun_result.stack_stop() == x ** 2 + ``` + In the above we have asserted the the program has succesfully exited with + status "PASS" and that the top of the stack contained $`x^2 = 9`$. + The _assertable properties_ were `status()` and `stack_top()`. + + DryRunTransactionResult provides the following **assertable properties**: + * `cost` + - total opcode cost utilized during execution + - only available for apps + * `last_log` + - the final hex bytes that was logged during execution (apps only) + - only available for apps + * `logs` + - similar to `last_log` but a list of _all_ the printed logs + * `final_scratch` + - the final scratch slot state contents represented as a dictionary + - CAVEAT: slots containing a type's zero-value (0 or "") are not reported + * `max_stack_height` + - the maximum height of stack had during execution + * `stack_top` + - the contents of the top of the stack and the end of execution + * `status` + - either "PASS" when the execution succeeded or "REJECT" otherwise + * `passed` + - shorthand for `status() == "PASS"` + * `rejected` + - shorthand for `status() == "REJECT"` + * `error` with optional `contains` matching + - when no contains is provided, returns True exactly when execution fails due to error + - when contains given, only return True if an error occured included contains + * `noError` + - returns True if there was no error, or the actual error when an error occured + """ + + def __init__(self, dryrun_resp: dict, txn_index: int): + txns = dryrun_resp.get("txns", []) + assert txns, "Dry Run response is missing transactions" + + assert ( + 0 <= txn_index < len(txns) + ), f"Out of bounds txn_index {txn_index} when there are only {len(txns)} transactions in the Dry Run response" + + txn = txns[txn_index] + + self.mode: ExecutionMode = self.get_txn_mode(txn) + self.parent_dryrun_response: dict = dryrun_resp + self.txn: dict = txn + self.extracts: dict = self.extract_all(self.txn, self.is_app()) + self.black_box_results: BlackboxResults = self.extracts["bbr"] + + def is_app(self) -> bool: + return self.mode == ExecutionMode.Application + + @classmethod + def get_txn_mode(cls, txn: dict) -> ExecutionMode: + """ + Guess the mode based on location of traces. If no luck, raise an AssertionError + """ + keyset = set(txn.keys()) + akey, lskey = "app-call-trace", "logic-sig-trace" + assert ( + len({akey, lskey} & keyset) == 1 + ), f"ambiguous mode for dry run transaction: expected exactly one of '{akey}', '{lskey}' to be in keyset {keyset}" + + if akey in keyset: + return ExecutionMode.Application + + return ExecutionMode.Signature + + @classmethod + def from_single_response(cls, dryrun_resp: dict) -> "DryRunTransactionResult": + txns = dryrun_resp.get("txns") or [] + assert ( + len(txns) == 1 + ), f"require exactly 1 dry run transaction to create a singleton but had {len(txns)} instead" + + return cls(dryrun_resp, 0) + + def dig(self, property: DryRunProperty, **kwargs: Dict[str, Any]) -> Any: + """Main router for assertable properties""" + txn = self.txn + bbr = self.black_box_results + + assert SequenceAssertion.mode_has_assertion( + self.mode, property + ), f"{self.mode} cannot handle dig information from txn for assertion type {property}" + + if property == DryRunProperty.cost: + return txn["cost"] + + if property == DryRunProperty.lastLog: + last_log = txn.get("logs", [None])[-1] + if last_log is None: + return last_log + return b64decode(last_log).hex() + + if property == DryRunProperty.finalScratch: + return {k: v.as_python_type() for k, v in bbr.final_scratch_state.items()} + + if property == DryRunProperty.stackTop: + trace = self.extracts["trace"] + stack = trace[-1]["stack"] + if not stack: + return None + tv = TealVal.from_scratch(stack[-1]) + return tv.as_python_type() + + if property == DryRunProperty.maxStackHeight: + return max(len(t["stack"]) for t in self.extracts["trace"]) + + if property == DryRunProperty.status: + return self.extracts["status"] + + if property == DryRunProperty.passed: + return self.extracts["status"] == "PASS" + + if property == DryRunProperty.rejected: + return self.extracts["status"] == "REJECT" + + if property == DryRunProperty.error: + contains = kwargs.get("contains") + ok, msg = assert_error( + self.parent_dryrun_response, contains=contains, enforce=False + ) + # when there WAS an error, we return its msg, else False + return ok + + if property == DryRunProperty.errorMessage: + _, msg = assert_no_error(self.parent_dryrun_response, enforce=False) + # when there was no error, we return None, else return its msg + return msg if msg else None + + raise Exception(f"Unknown assert_type {property}") + + def cost(self) -> Optional[int]: + """Assertable property for the total opcode cost that was used during dry run execution + return type: int + available Mode: Application only + """ + return self.dig(DRProp.cost) if self.is_app() else None + + def last_log(self) -> Optional[str]: + """Assertable property for the last log that was printed during dry run execution + return type: string representing the hex bytes of the final log + available Mode: Application only + """ + return self.dig(DRProp.lastLog) if self.is_app() else None + + def logs(self) -> Optional[List[str]]: + """Assertable property for all the logs that were printed during dry run execution + return type: list of strings representing hex bytes of the logs + available Mode: Application only + """ + return self.extracts["logs"] + + def final_scratch(self) -> Dict[int, Union[int, str]]: + """Assertable property for the scratch slots and their contents at the end of dry run execution + return type: dictionary from strings to int's or strings + available: all modes + CAVEAT: slots containing a type's zero-value (0 or "") are not reported + """ + return self.dig(DRProp.finalScratch) + + def max_stack_height(self) -> int: + """Assertable property for the maximum height the stack had during a dry run execution + return type: int + available: all modes + """ + return self.dig(DRProp.maxStackHeight) + + def stack_top(self) -> Union[int, str]: + """Assertable property for the contents of the top of the stack and the end of a dry run execution + return type: int or string + available: all modes + """ + return self.dig(DRProp.stackTop) + + def status(self) -> str: + """Assertable property for the program run status at the end of dry run execution + return type: string (either "PASS" or "REJECT") + available: all modes + """ + return self.dig(DRProp.status) + + def passed(self) -> bool: + """Assertable property for the program's dry run execution having SUCCEEDED + return type: bool + available: all modes + """ + return self.dig(DRProp.passed) + + def rejected(self) -> bool: + """Assertable property for the program's dry run execution having FAILED + return type: bool + available: all modes + """ + return self.dig(DRProp.rejected) + + def error(self, contains=None) -> bool: + """Assertable property for a program having failed during dry run execution due to an error. + The optional `contains` parameter allows specifying a particular string + expected to be a _substring_ of the error's message. In case the program errors, but + the contains did not match the actual error, False is returned. + return type: bool + available: all modes + """ + return self.dig(DRProp.error, contains=contains) + + def error_message(self) -> Union[bool, str]: + """Assertable property for the error message that a program produces. + return type: None (in the case of no error) or string with the error message, in case of error + available: all modes + """ + return self.dig(DRProp.errorMessage) + + def messages(self) -> List[str]: + return self.extracts["messages"] + + def last_message(self) -> Optional[str]: + return self.messages()[-1] if self.messages() else None + + def local_deltas(self) -> dict: + return self.extracts["ldeltas"] + + def global_delta(self) -> dict: + return self.extracts["gdelta"] + + def tabulate( + self, + col_max: int, + scratch_verbose: bool = False, + scratch_before_stack: bool = True, + ): + """Produce a string that when printed shows the evolution of a dry run. + + This is similar to DryrunTestCaseMixin's `pprint()` but also includes scratch + variable evolution. + + For example, calling `tabulate()` with default values produces something like: + + step | PC# | L# | Teal | Scratch | Stack + --------+-------+------+------------------------+-----------+---------------------- + 1 | 1 | 1 | #pragma version 6 | | [] + 2 | 4 | 2 | txna ApplicationArgs 0 | | [0x0000000000000002] + 3 | 5 | 3 | btoi | | [2] + 4 | 17 | 11 | label1: | | [2] + 5 | 19 | 12 | store 0 | 0->2 | [] + 6 | 21 | 13 | load 0 | | [2] + 7 | 23 | 14 | pushint 2 | | [2, 2] + 8 | 24 | 15 | exp | | [4] + 9 | 8 | 4 | callsub label1 | | [4] + 10 | 10 | 5 | store 1 | 1->4 | [] + 11 | 12 | 6 | load 1 | | [4] + 12 | 13 | 7 | itob | | [0x0000000000000004] + 13 | 14 | 8 | log | | [] + 14 | 16 | 9 | load 1 | | [4] + 15 | 25 | 16 | retsub | | [4] + """ + assert not ( + scratch_verbose and scratch_before_stack + ), "Cannot request scratch columns before stack when verbose" + bbr = self.black_box_results + + def empty_hack(se): + return se if se else [""] + + rows = [ + list( + map( + str, + [ + i + 1, + bbr.program_counters[i], + bbr.teal_line_numbers[i], + bbr.teal_source_lines[i], + bbr.stack_evolution[i], + *empty_hack(bbr.scratch_evolution[i]), + ], + ) + ) + for i in range(bbr.steps_executed) + ] + if col_max and col_max > 0: + rows = [[x[:col_max] for x in row] for row in rows] + headers = [ + "step", + "PC#", + "L#", + "Teal", + "Stack", + *([f"S@{s}" for s in bbr.slots_used] if scratch_verbose else ["Scratch"]), + ] + if scratch_before_stack: + # with assertion above, we know that there is only one + # scratch column and it's at the very end + headers[-1], headers[-2] = headers[-2], headers[-1] + for i in range(len(rows)): + rows[i][-1], rows[i][-2] = rows[i][-2], rows[i][-1] + + table = tabulate(rows, headers=headers, tablefmt="presto") + return table + + def report(self, args: Iterable[Union[str, int]], msg: str, row: int = 0) -> str: + bbr = self.black_box_results + return f"""=============== + <<<<<<<<<<<{msg}>>>>>>>>>>> + =============== + App Trace: + {self.tabulate(-1)} + =============== + MODE: {self.mode} + TOTAL COST: {self.cost()} + =============== + FINAL MESSAGE: {self.last_message()} + =============== + Messages: {self.messages()} + Logs: {self.logs()} + =============== + -----{bbr}----- + TOTAL STEPS: {bbr.steps()} + FINAL STACK: {bbr.final_stack()} + FINAL STACK TOP: {bbr.final_stack_top()} + MAX STACK HEIGHT: {bbr.max_stack_height()} + FINAL SCRATCH: {bbr.final_scratch()} + SLOTS USED: {bbr.slots()} + FINAL AS ROW: {bbr.final_as_row()} + =============== + Global Delta: + {self.global_delta()} + =============== + Local Delta: + {self.local_deltas()} + =============== + TXN AS ROW: {self.csv_row(row, args)} + =============== + <<<<<<<<<<<{msg}>>>>>>>>>>> + =============== + """ + + def csv_row( + self, row_num: int, args: Iterable[Union[int, str]] + ) -> Dict[str, Union[str, int]]: + return { + " Run": row_num, + " cost": self.cost(), + # back-tick needed to keep Excel/Google sheets from stumbling over hex + " last_log": f"`{self.last_log()}", + " final_message": self.last_message(), + " Status": self.status(), + **self.black_box_results.final_as_row(), + **{f"Arg_{i:02}": arg for i, arg in enumerate(args)}, + } + + @classmethod + def csv_report( + cls, inputs: List[tuple], dr_resps: List["DryRunTransactionResult"] + ) -> str: + """Produce a Comma Separated Values report string capturing important statistics + for a sequence of dry runs. + + For example, assuming you have a string `teal` which is a TEAL program computing $`x^2`$ + such as this [example program](x/blackbox/teal/app_square.teal). + Let's run some Exploratory Dry Run Analysis (EDRA) for $`x`$ in the range $`[0, 10]`$: + + ```python + >>> algod = get_algod() + >>> inputs = [(x,) for x in range(11)] # [(0), (1), ... , (10)] + >>> dryrun_results = DryRunExecutor.dryrun_app_on_sequence(algod, teal, inputs) + >>> csv = DryRunTransactionResult.csv_report(inputs, dryrun_results) + >>> print(csv) + ``` + Then you would get the following output: + ```plain + Run, Status, cost, final_message, last_log, top_of_stack,Arg_00,max_stack_height,s@000,s@001,steps + 1,REJECT,14,REJECT,`None,0,0,2,,,15 + 2,PASS,14,PASS,`0000000000000001,1,1,2,1,1,15 + 3,PASS,14,PASS,`0000000000000004,4,2,2,2,4,15 + 4,PASS,14,PASS,`0000000000000009,9,3,2,3,9,15 + 5,PASS,14,PASS,`0000000000000010,16,4,2,4,16,15 + 6,PASS,14,PASS,`0000000000000019,25,5,2,5,25,15 + 7,PASS,14,PASS,`0000000000000024,36,6,2,6,36,15 + 8,PASS,14,PASS,`0000000000000031,49,7,2,7,49,15 + 9,PASS,14,PASS,`0000000000000040,64,8,2,8,64,15 + 10,PASS,14,PASS,`0000000000000051,81,9,2,9,81,15 + ``` + """ + N = len(inputs) + assert N == len( + dr_resps + ), f"cannot produce CSV with unmatching size of inputs ({len(inputs)}) v. drresps ({len(dr_resps)})" + + dr_resps = [resp.csv_row(i + 1, inputs[i]) for i, resp in enumerate(dr_resps)] + with io.StringIO() as csv_str: + fields = sorted(set().union(*(txn.keys() for txn in dr_resps))) + writer = csv.DictWriter(csv_str, fieldnames=fields) + writer.writeheader() + for txn in dr_resps: + writer.writerow(txn) + + return csv_str.getvalue() + + @classmethod + def extract_logs(cls, txn): + return [b64decode(log).hex() for log in txn.get("logs", [])] + + @classmethod + def extract_cost(cls, txn): + return txn.get("cost") + + @classmethod + def extract_status(cls, txn, is_app: bool): + key, idx = ("app-call-messages", 1) if is_app else ("logic-sig-messages", 0) + return txn[key][idx] + + @classmethod + def extract_messages(cls, txn, is_app): + return txn["app-call-messages" if is_app else "logic-sig-messages"] + + @classmethod + def extract_local_deltas(cls, txn): + return txn.get("local-deltas", []) + + @classmethod + def extract_global_delta(cls, txn): + return txn.get("global-delta", []) + + @classmethod + def extract_lines(cls, txn, is_app): + return txn["disassembly" if is_app else "logic-sig-disassembly"] + + @classmethod + def extract_trace(cls, txn, is_app): + return txn["app-call-trace" if is_app else "logic-sig-trace"] + + @classmethod + def extract_all(cls, txn: dict, is_app: bool) -> dict: + result = { + "logs": cls.extract_logs(txn), + "cost": cls.extract_cost(txn), + "status": cls.extract_status(txn, is_app), + "messages": cls.extract_messages(txn, is_app), + "ldeltas": cls.extract_local_deltas(txn), + "gdelta": cls.extract_global_delta(txn), + "lines": cls.extract_lines(txn, is_app), + "trace": cls.extract_trace(txn, is_app), + } + + result["bbr"] = BlackboxResults.scrape(result["trace"], result["lines"]) + + return result + + +class SequenceAssertion: + """Enable asserting invariants on a sequence of dry run executions""" + + def __init__( + self, + predicate: Union[Dict[Tuple, Union[str, int]], Callable], + enforce: bool = False, + name: str = None, + ): + self.definition = predicate + self.predicate, self._expected = self.prepare_predicate(predicate) + self.enforce = enforce + self.name = name + + def __repr__(self): + return f"SequenceAssertion({self.definition})"[:100] + + def __call__(self, args: list, actual: Union[str, int]) -> Tuple[bool, str]: + assertion = self.predicate(args, actual) + msg = "" + if not assertion: + msg = f"SequenceAssertion for '{self.name}' failed for for args {args}: actual is [{actual}] BUT expected [{self.expected(args)}]" + if self.enforce: + assert assertion, msg + + return assertion, msg + + def expected(self, args: list) -> Union[str, int]: + return self._expected(args) + + def dryrun_assert( + self, + inputs: List[list], + dryrun_results: List["DryRunTransactionResult"], + assert_type: DryRunProperty, + ): + N = len(inputs) + assert N == len( + dryrun_results + ), f"inputs (len={N}) and dryrun responses (len={len(dryrun_results)}) must have the same length" + + assert isinstance( + assert_type, DryRunProperty + ), f"assertions types must be DryRunAssertionType's but got [{assert_type}] which is a {type(assert_type)}" + + for i, args in enumerate(inputs): + res = dryrun_results[i] + actual = res.dig(assert_type) + ok, msg = self(args, actual) + assert ok, res.report(args, msg, row=i + 1) + + @classmethod + def prepare_predicate(cls, predicate): + if isinstance(predicate, dict): + return ( + lambda args, actual: predicate[args] == actual, + lambda args: predicate[args], + ) + + if not isinstance(predicate, Callable): + # constant function in this case: + return lambda _, actual: predicate == actual, lambda _: predicate + + try: + sig = signature(predicate) + except Exception as e: + raise Exception( + f"callable predicate {predicate} must have a signature" + ) from e + + N = len(sig.parameters) + assert N in (1, 2), f"predicate has the wrong number of paramters {N}" + + if N == 2: + return predicate, lambda _: predicate + + # N == 1: + return lambda args, actual: predicate(args) == actual, lambda args: predicate( + args + ) + + @classmethod + def mode_has_assertion( + cls, mode: ExecutionMode, assertion_type: DryRunProperty + ) -> bool: + missing = { + ExecutionMode.Signature: { + DryRunProperty.cost, + DryRunProperty.lastLog, + }, + ExecutionMode.Application: set(), + } + if assertion_type in missing[mode]: + return False + + return True + + @classmethod + def inputs_and_assertions( + cls, scenario: Dict[str, Union[list, dict]], mode: ExecutionMode + ) -> Tuple[List[tuple], Dict[DRProp, Any]]: + """ + Validate that a Blackbox Test Scenario has been properly constructed, and return back + its components which consist of **inputs** and _optional_ **assertions**. + + A scenario should adhere to the following schema: + ``` + { + "inputs": List[Tuple[Union[str, int], ...]], + "assertions": Dict[DryRunAssertionType, ...an assertion...] + } + + Each assertion is a map from _assertion type_ to be made on a dry run, + to the actual assertion. Actual assertions can be: + * simple python types - these are useful in the case of _constant_ assertions. + For example, if you want to assert that the `maxStackHeight` is 3, just use `3`. + * dictionaries of type Dict[Tuple, Any] - these are useful when you just want to assert + a discrete set of input-output pairs. + For example, if you have 4 inputs that you want to assert are being squared, + you could use `{(2,): 4, (7,): 49, (13,): 169, (11,): 121}` + * functions which take a single variable. These are useful when you have a python "simulator" + for the assertions. + In the square example you could use `lambda args: args[0]**2` + * functions which take _two_ variables. These are useful when your assertion is more + subtle that out-and-out equality. For example, suppose you want to assert that the + `cost` of the dry run is `2*n` plus/minus 5 where `n` is the first arg of the input. Then + you could use `lambda args, actual: 2*args[0] - 5 <= actual <= 2*args[0] + 5` + ``` + """ + assert isinstance( + scenario, dict + ), f"a Blackbox Scenario should be a dict but got a {type(scenario)}" + + inputs = scenario.get("inputs") + # TODO: we can be more flexible here and allow arbitrary iterable `args`. Because + # assertions are allowed to be dicts, and therefore each `args` needs to be + # hashable in that case, we are restricting to tuples currently. + # However, this function could be friendlier and just _convert_ each of the + # `args` to a tuple, thus eliminating any downstream issues. + assert ( + inputs + and isinstance(inputs, list) + and all(isinstance(args, tuple) for args in inputs) + ), "need a list of inputs with at least one args and all args must be tuples" + + assertions = scenario.get("assertions", {}) + if assertions: + assert isinstance(assertions, dict), f"assertions must be a dict" + + for key in assertions: + assert isinstance(key, DRProp) and SequenceAssertion.mode_has_assertion( + mode, key + ), f"each key must be a DryrunAssertionTypes appropriate to {mode}. This is not the case for key {key}" + + return inputs, assertions diff --git a/blackbox/dryrun.py b/blackbox/dryrun.py new file mode 100644 index 00000000..0829a81d --- /dev/null +++ b/blackbox/dryrun.py @@ -0,0 +1,947 @@ +import base64 +import binascii +import string +from dataclasses import dataclass +import pytest +from typing import List, Union + +from algosdk.constants import payment_txn, appcall_txn +from algosdk.future import transaction +from algosdk.encoding import encode_address, msgpack_encode +from algosdk.v2client.models import ( + DryrunRequest, + DryrunSource, + Application, + ApplicationParams, + ApplicationStateSchema, + Account, + TealKeyValue, +) + + +ZERO_ADDRESS = encode_address(bytes(32)) +PRINTABLE = frozenset(string.printable) + + +@dataclass +class LSig: + """Logic Sig program parameters""" + + args: List[bytes] = None + + +@dataclass +class App: + """Application program parameters""" + + creator: str = ZERO_ADDRESS + round: int = None + app_idx: int = 0 + on_complete: int = 0 + args: List[bytes] = None + accounts: List[Union[str, Account]] = None + global_state: List[TealKeyValue] = None + + +#### LIGHTWEIGHT ASSERTIONS FOR RE-USE #### +def _msg_if(msg): + return "" if msg is None else f": {msg}" + + +def _fail(msg): + assert False, msg + + +def _assert_in(status, msgs, msg=None, enforce=True): + ok = status in msgs + result = None + if not ok: + result = f"{status} should be in {msgs}" + _msg_if(msg) + if enforce: + assert status in msgs, result + + return ok, result + + +def assert_pass(txn_index, msg, txns_res): + assert_status("PASS", txn_index, msg, txns_res) + + +def assert_reject(txn_index, msg, txns_res): + assert_status("REJECT", txn_index, msg, txns_res) + + +def assert_status(status, txn_index, msg, txns_res, enforce=True): + if txn_index is not None and (txn_index < 0 or txn_index >= len(txns_res)): + _fail(f"txn index {txn_index} is out of range [0, {len(txns_res)})") + + assert_all = True + all_msgs = [] + if status == "REJECT": + assert_all = False + + for idx, txn_res in enumerate(txns_res): + # skip if txn_index is set + if txn_index is not None and idx != txn_index: + continue + + msgs = [] + if ( + "logic-sig-messages" in txn_res + and txn_res["logic-sig-messages"] is not None + and len(txn_res["logic-sig-messages"]) > 0 + ): + msgs = txn_res["logic-sig-messages"] + elif ( + "app-call-messages" in txn_res + and txn_res["app-call-messages"] is not None + and len(txn_res["app-call-messages"]) > 0 + ): + msgs = txn_res["app-call-messages"] + else: + _fail("no messages from dryrun") + if assert_all or idx == txn_index: + _assert_in(status, msgs, msg=msg) + all_msgs.extend(msgs) + + if not assert_all: + return _assert_in(status, all_msgs, msg=msg, enforce=enforce) + + return True, None + + +def assert_error(drr, contains=None, txn_index=None, msg=None, enforce=True): + error = Helper.find_error(drr, txn_index=txn_index) + ok = bool(error) + result = None + if not ok: # the expected error did NOT occur + result = f"expected truthy error but got {error}" + _msg_if(msg) + if enforce: + assert error, result + return ok, result + # got here? Must have error + if contains is not None: + return _assert_in(contains, error, enforce=enforce) + + return True, None + + +def assert_no_error(drr, txn_index=None, msg=None, enforce=True): + error = Helper.find_error(drr, txn_index=txn_index) + ok = not bool(error) + result = None + if not ok: + result = f"{msg}: {error}" + _msg_if(msg) + if enforce: + assert not error, result + + return ok, result + + +def assert_global_state_contains(delta_value, txn_index, txns_res, msg=None): + if txn_index is not None and (txn_index < 0 or txn_index >= len(txns_res)): + _fail(f"txn index {txn_index} is out of range [0, {len(txns_res)})") + + found = False + all_global_deltas = [] + for idx, txn_res in enumerate(txns_res): + # skip if txn_index is set + if txn_index is not None and idx != txn_index: + continue + if ( + "global-delta" in txn_res + and txn_res["global-delta"] is not None + and len(txn_res["global-delta"]) > 0 + ): + found = Helper.find_delta_value( + txn_res["global-delta"], delta_value + ) + if not found and idx == txn_index: + msg = ( + msg + if msg is not None + else f"{delta_value} not found in {txn_res['global-delta']}" + ) + _fail(msg) + if found: + break + all_global_deltas.extend(txn_res["global-delta"]) + elif idx == txn_index: + _fail("no global state from dryrun") + + if not found: + msg = ( + msg + if msg is not None + else f"{delta_value} not found in any of {all_global_deltas}" + ) + _fail(msg) + + +def assert_local_state_contains( + addr, delta_value, txn_index, txns_res, msg=None +): + if txn_index is not None and (txn_index < 0 or txn_index >= len(txns_res)): + _fail(f"txn index {txn_index} is out of range [0, {len(txns_res)})") + + found = False + all_local_deltas = [] + for idx, txn_res in enumerate(txns_res): + # skip if txn_index is set + if txn_index is not None and idx != txn_index: + continue + if ( + "local-deltas" in txn_res + and txn_res["local-deltas"] is not None + and len(txn_res["local-deltas"]) > 0 + ): + for local_delta in txn_res["local-deltas"]: + addr_found = False + if local_delta["address"] == addr: + addr_found = True + found = Helper.find_delta_value( + local_delta["delta"], delta_value + ) + if not found and idx == txn_index: + msg = ( + msg + if msg is not None + else f"{delta_value} not found in {local_delta['delta']}" + ) + _fail(msg) + if found: + break + all_local_deltas.extend(local_delta["delta"]) + if not addr_found and idx == txn_index: + _fail(f"no address {addr} in local states from dryrun") + elif idx == txn_index: + _fail("no local states from dryrun") + + if not found: + msg = ( + msg + if msg is not None + else f"{delta_value} not found in any of {all_local_deltas}" + ) + _fail(msg) + + +class DryrunTestCaseMixin: + """ + Mixin class for unittest.TestCase + + Expects self.algo_client to be initialized in TestCase.setUp + """ + + def assertPass( + self, + prog_drr_txns, + lsig=None, + app=None, + sender=ZERO_ADDRESS, + txn_index=None, + msg=None, + ): + """ + Asserts that all programs pass. + By default it uses logic sig mode with args passed in lsig object. + If app is set then application call is made + + Args: + prog_drr_txns (bytes, str, dict, list): program to run, dryrun response object or list of transactions + lsig (dict, LSig): logic sig program additional parameters + app (dict, App): app program additional parameters + sender (str): txn sender + txn_index (int): txn result index to assert in + + Raises: + unittest.TestCase.failureException: if not passed + TypeError: program is not bytes or str + """ + txns_res = self._checked_request(prog_drr_txns, lsig, app, sender) + assert_pass(txn_index, msg, txns_res) + + def assertReject( + self, + prog_drr_txns, + lsig=None, + app=None, + sender=ZERO_ADDRESS, + txn_index=None, + msg=None, + ): + """ + Asserts any program is rejected. + By default it uses logic sig mode with args passed in lsig object. + If app is set then application call is made + + Args: + prog_drr_txns (bytes, str, dict, list): program to run, dryrun response object or list of transactions + lsig (dict, LSig): logic sig program additional parameters + app (dict, App): app program additional parameters + sender (str): txn sender + txn_index (int): txn result index to assert in + + Raises: + unittest.TestCase.failureException: if not passed + TypeError: program is not bytes or str + """ + txns_res = self._checked_request(prog_drr_txns, lsig, app, sender) + assert_reject(txn_index, msg, txns_res) + + def assertStatus( + self, + prog_drr_txns, + status, + lsig=None, + app=None, + sender=ZERO_ADDRESS, + txn_index=None, + msg=None, + ): + """ + Asserts that program completes with the status. + By default it uses logic sig mode with args passed in lsig object. + If app is set then application call is made + + Args: + prog_drr_txns (bytes, str, dict, list): program to run, dryrun response object or list of transactions + status (str): status to assert + lsig (dict, LSig): logic sig program additional parameters + app (dict, App): app program additional parameters + sender (str): txn sender + txn_index (int): txn result index to assert in + + Raises: + unittest.TestCase.failureException (AssetionException): if not passed + TypeError: program is not bytes or str + """ + txns_res = self._checked_request(prog_drr_txns, lsig, app, sender) + assert_status(status, txn_index, msg, txns_res) + + def assertNoError( + self, + prog_drr_txns, + lsig=None, + app=None, + sender=ZERO_ADDRESS, + txn_index=None, + msg=None, + ): + """ + Asserts that there are no errors. + for example, compilation errors or application state initialization errors. + By default it uses logic sig mode with args passed in lsig object. + If app is set then application call is made + + Args: + prog_drr_txns (bytes, str, dict, list): program to run, dryrun response object or list of transactions + lsig (dict, LSig): logic sig program additional parameters + app (dict, App): app program additional parameters + sender (str): txn sender + txn_index (int): txn result index to assert in + + Raises: + unittest.TestCase.failureException (AssetionException): if not passed + TypeError: program is not bytes or str + """ + drr = self._dryrun_request(prog_drr_txns, lsig, app, sender) + assert_no_error(drr, txn_index=txn_index, msg=msg) + + def assertError( + self, + prog_drr_txns, + contains=None, + lsig=None, + app=None, + sender=ZERO_ADDRESS, + txn_index=None, + msg=None, + ): + """ + Asserts that there are no errors. + for example, compilation errors or application state initialization errors. + By default it uses logic sig mode with args passed in lsig object. + If app is set then application call is made + + Args: + prog_drr_txns (bytes, str, dict, list): program to run, dryrun response object or list of transactions + lsig (dict, LSig): logic sig program additional parameters + app (dict, App): app program additional parameters + sender (str): txn sender + txn_index (int): txn result index to assert in + + Raises: + unittest.TestCase.failureException (AssetionException): if not passed + TypeError: program is not bytes or str + """ + + drr = self._dryrun_request(prog_drr_txns, lsig, app, sender) + assert_error(drr, contains=contains, txn_index=txn_index, msg=msg) + + def assertGlobalStateContains( + self, + prog_drr_txns, + delta_value, + app=None, + sender=ZERO_ADDRESS, + txn_index=None, + msg=None, + ): + """ + Asserts that execution of the program has this global delta value + + Args: + prog_drr_txns (bytes, str, dict, list): program to run, dryrun response object or list of transactions + delta_value (dict): value to assert + + Raises: + unittest.TestCase.failureException: if not passed + TypeError: program is not bytes or str + """ + + txns_res = self._checked_request( + prog_drr_txns, lsig=None, app=app, sender=sender + ) + assert_global_state_contains(delta_value, txn_index, txns_res, msg=msg) + + def assertLocalStateContains( + self, + prog_drr_txns, + addr, + delta_value, + app=None, + sender=ZERO_ADDRESS, + txn_index=None, + msg=None, + ): + """ + Asserts that execution of the program has this global delta value + + Args: + prog_drr_txns (bytes, str, dict, list): program to run, dryrun response object or list of transactions + addr (str): account + delta_value (dict): value to assert + + Raises: + unittest.TestCase.failureException: if not passed + TypeError: program is not bytes or str + """ + + txns_res = self._checked_request( + prog_drr_txns, lsig=None, app=app, sender=sender + ) + assert_local_state_contains( + addr, delta_value, txn_index, txns_res, msg=msg + ) + + def dryrun_request( + self, program, lsig=None, app=None, sender=ZERO_ADDRESS + ): + """ + Helper function for creation DryrunRequest and making the REST request + from program source or compiled bytes + + Args: + program (bytes, str): program to use as a source + lsig (dict, LSig): logic sig program additional parameters + app (dict, App): app program additional parameters + sender (str): txn sender + + Returns: + dict: dryrun response object + + Raises: + TypeError: program is not bytes or str + """ + drr = Helper.build_dryrun_request(program, lsig, app, sender) + return self.algo_client.dryrun(drr) + + def dryrun_request_from_txn(self, txns, app): + """ + Helper function for creation DryrunRequest and making the REST request + + Args: + txns (list): list of transaction to run as a group + app (dict, App): app program additional parameters. Only app.round and app.accounts are used. + + Returns: + dict: dryrun response object + + Raises: + TypeError: program is not bytes or str + """ + + if app is not None: + if not isinstance(app, App) and not isinstance(app, dict): + raise ValueError("app must be a dict or App") + if isinstance(app, dict): + app = App(**app) + + rnd = None + accounts = None + apps = [] + if app is not None: + if app.round is not None: + rnd = app.round + if app.accounts is not None: + accounts = app.accounts + for acc in accounts: + if acc.created_apps: + apps.extend(acc.created_apps) + + drr = DryrunRequest( + txns=txns, + accounts=accounts, + round=rnd, + apps=apps, + ) + return self.algo_client.dryrun(drr) + + @staticmethod + def default_address(): + """Helper function returning default zero addr""" + return ZERO_ADDRESS + + def _dryrun_request(self, prog_drr_txns, lsig, app, sender): + """ + Helper function to make a dryrun request + """ + if isinstance(prog_drr_txns, dict): + drr = prog_drr_txns + elif isinstance(prog_drr_txns, list): + drr = self.dryrun_request_from_txn(prog_drr_txns, app) + else: + drr = self.dryrun_request(prog_drr_txns, lsig, app, sender) + return drr + + def _checked_request( + self, prog_drr_txns, lsig=None, app=None, sender=ZERO_ADDRESS + ): + """ + Helper function to make a dryrun request and perform basic validation + """ + drr = self._dryrun_request(prog_drr_txns, lsig, app, sender) + if drr["error"]: + _fail(f"error in dryrun response: {drr['error']}") + + if not drr["txns"]: + _fail("empty response from dryrun") + + return drr["txns"] + + +class Helper: + """Utility functions for dryrun""" + + @classmethod + def singleton_logicsig_request( + cls, program: str, args: List[bytes], sender=ZERO_ADDRESS + ): + return cls.build_dryrun_request( + program, lsig=LSig(args=args), sender=sender + ) + + @classmethod + def singleton_app_request( + cls, program: str, args: List[bytes], sender=ZERO_ADDRESS + ): + return cls.build_dryrun_request( + program, app=App(args=args), sender=sender + ) + + @classmethod + def build_dryrun_request( + cls, program, lsig=None, app=None, sender=ZERO_ADDRESS + ): + """ + Helper function for creation DryrunRequest object from a program. + By default it uses logic sig mode + and if app_idx / on_complete are set then application call is made + + Args: + program (bytes, string): program to use as a source + lsig (dict, LSig): logic sig program additional parameters + app (dict, App): app program additional parameters + + Returns: + DryrunRequest: dryrun request object + + Raises: + TypeError: program is not bytes or str + ValueError: both lsig and app parameters provided or unknown type + """ + + if lsig is not None and app is not None: + raise ValueError("both lsig and app not supported") + + if app and not isinstance(app, (App, dict)): + raise ValueError("app must be a dict or App") + + if lsig and not isinstance(lsig, (LSig, dict)): + raise ValueError("lsig must be a dict or LSig") + + if not isinstance(program, (bytes, str)): + raise TypeError("program must be bytes or str") + + run_mode = cls._get_run_mode(app) + + app_or_lsig = ( + cls._prepare_lsig(lsig) + if run_mode == "lsig" + else cls._prepare_app(app) + ) + + del app + del lsig + + txn = ( + cls.sample_txn(sender, payment_txn) + if run_mode == "lsig" + else cls.sample_txn(sender, appcall_txn) + ) + + if isinstance(program, str): + return ( + cls._prepare_lsig_source_request( + program, app_or_lsig, run_mode, txn + ) + if run_mode == "lsig" + else cls._prepare_app_source_request( + program, app_or_lsig, sender, run_mode, txn + ) + ) + + # in case of bytes: + sources = [] + apps = [] + accounts = [] + rnd = None + + if run_mode != "lsig": + txns = [cls._build_appcall_signed_txn(txn, app_or_lsig)] + application = cls.sample_app(sender, app_or_lsig, program) + apps = [application] + accounts = app_or_lsig.accounts + rnd = app_or_lsig.round + else: + txns = [cls._build_logicsig_txn(program, txn, app_or_lsig)] + + return DryrunRequest( + txns=txns, + sources=sources, + apps=apps, + accounts=accounts, + round=rnd, + ) + + @classmethod + def _get_run_mode(cls, app): + run_mode = "lsig" + if app is not None: + on_complete = ( + app.get("on_complete") + if isinstance(app, dict) + else app.on_complete + ) + run_mode = ( + "clearp" + if on_complete == transaction.OnComplete.ClearStateOC + else "approv" + ) + return run_mode + + @classmethod + def _prepare_app(cls, app): + if isinstance(app, dict): + app = App(**app) + + if app.app_idx is None: + app.app_idx = 0 + + if app.accounts is not None: + accounts = [] + for acc in app.accounts: + if isinstance(acc, str): + accounts.append( + Account( + address=acc, + ) + ) + else: + accounts.append(acc) + app.accounts = accounts + + return app + + @classmethod + def _prepare_lsig(cls, lsig): + if lsig is None: + lsig = LSig() + elif isinstance(lsig, dict): + lsig = LSig(**lsig) + + return lsig + + @classmethod + def _prepare_lsig_source_request(cls, program, lsig, run_mode, txn): + source = DryrunSource(field_name=run_mode, source=program, txn_index=0) + apps = [] + accounts = [] + rnd = None + txns = [cls._build_logicsig_txn(program, txn, lsig)] + sources = [source] + return DryrunRequest( + txns=txns, + sources=sources, + apps=apps, + accounts=accounts, + round=rnd, + ) + + @classmethod + def _prepare_app_source_request(cls, program, app, sender, run_mode, txn): + source = DryrunSource(field_name=run_mode, source=program, txn_index=0) + txns = [cls._build_appcall_signed_txn(txn, app)] + application = cls.sample_app(sender, app) + apps = [application] + accounts = app.accounts + # app idx must match in sources and in apps arrays so dryrun find apps sources + source.app_index = application.id + rnd = app.round + sources = [source] + return DryrunRequest( + txns=txns, + sources=sources, + apps=apps, + accounts=accounts, + round=rnd, + ) + + @staticmethod + def _build_logicsig_txn(program, txn, lsig): + """ + Helper function to make LogicSigTransaction + """ + # replacing program with an empty one is OK since it set by source + # LogicSig does not like None/invalid programs because of validation + program = program if isinstance(program, bytes) else b"\x01" + logicsig = transaction.LogicSig(program, lsig.args) + return transaction.LogicSigTransaction(txn, logicsig) + + @staticmethod + def _build_appcall_signed_txn(txn, app): + """ + Helper function to make SignedTransaction + """ + txn.index = app.app_idx + txn.on_complete = app.on_complete + txn.app_args = app.args + if app.accounts is not None: + txn.accounts = [a.address for a in app.accounts] + return transaction.SignedTransaction(txn, None) + + @classmethod + def sample_txn(cls, sender, txn_type): + """ + Helper function for creation Transaction for dryrun + """ + sp = transaction.SuggestedParams( + int(1000), int(1), int(100), "", flat_fee=True + ) + if txn_type == payment_txn: + txn = transaction.Transaction( + sender, sp, None, None, payment_txn, None + ) + elif txn_type == appcall_txn: + txn = transaction.ApplicationCallTxn(sender, sp, 0, 0) + else: + raise ValueError("unsupported src object") + return txn + + @staticmethod + def sample_app(sender, app, program=None): + """ + Helper function for creation Application description for dryrun + """ + default_app_id = 1380011588 + # dryrun ledger can't stand app idx = 0 + # and requires some non-zero if even for app create txn + if app.app_idx == 0: + creator = sender + idx = default_app_id + else: + idx = app.app_idx + creator = app.creator + params = ApplicationParams( + creator=creator, + local_state_schema=ApplicationStateSchema(64, 64), + global_state_schema=ApplicationStateSchema(64, 64), + global_state=app.global_state, + ) + + if app.on_complete == transaction.OnComplete.ClearStateOC: + params.clear_state_program = program + else: + params.approval_program = program + + return Application(idx, params) + + @staticmethod + def _guess(value): + try: + value = base64.b64decode(value) + except binascii.Error: + return value + + try: + all_print = True + for b in value: + if chr(b) not in PRINTABLE: + all_print = False + if all_print: + return '"' + value.decode("utf8") + '"' + else: + if len(value) == 32: # address? hash? + return f"{encode_address(value)} ({value.hex()})" + elif len(value) < 16: # most likely bin number + return "0x" + value.hex() + return value.hex() + except UnicodeDecodeError: + return value.hex() + + @classmethod + def _format_stack(cls, stack): + parts = [] + for item in stack: + if item["type"] == 1: # bytes + item = cls._guess(item["bytes"]) + else: + item = str(item["uint"]) + parts.append(item) + return " ".join(parts) + + @classmethod + def pprint(cls, drr): + """Helper function to pretty print dryrun response""" + if "error" in drr and drr["error"]: + print("error:", drr["error"]) + if "txns" not in drr or not isinstance(drr["txns"], list): + return + + for idx, txn_res in enumerate(drr["txns"]): + msgs = [] + trace = [] + try: + msgs = txn_res["app-call-messages"] + trace = txn_res["app-call-trace"] + except KeyError: + try: + msgs = txn_res["logic-sig-messages"] + trace = txn_res["logic-sig-trace"] + except KeyError: + pass + if msgs: + print(f"txn[{idx}] messages:") + for msg in msgs: + print(msg) + if trace: + print(f"txn[{idx}] trace:") + for item in trace: + dis = txn_res["disassembly"][item["line"]] + stack = cls._format_stack(item["stack"]) + line = "{:4d}".format(item["line"]) + pc = "{:04d}".format(item["pc"]) + disasm = "{:25}".format(dis) + stack_line = "{}".format(stack) + result = f"{line} ({pc}): {disasm} [{stack_line}]" + if "error" in item: + result += f" error: {item['error']}" + print(result) + + @staticmethod + def find_error(drr, txn_index=None): + """ + Helper function to find error in dryrun response + """ + try: + if len(drr["error"]) > 0: + return drr["error"] + except (KeyError, TypeError): + pass + if "txns" in drr and isinstance(drr["txns"], list): + if txn_index is not None and ( + txn_index < 0 or txn_index >= len(drr["txns"]) + ): + return f"txn index {txn_index} is out of range [0, {len(drr['txns'])})" + + for idx, txn_res in enumerate(drr["txns"]): + if txn_index is not None and txn_index != idx: + continue + try: + ptype = "app" + trace = txn_res["app-call-trace"] + except KeyError: + try: + ptype = "logic" + trace = txn_res["logic-sig-trace"] + except KeyError: + continue + + for item in trace: + if "error" in item: + error = f"{ptype} {idx} failed at line {item['line']}: {item['error']}" + return error + + @staticmethod + def build_bytes_delta_value(value): + if isinstance(value, str): + value = value.encode("utf-8") + return dict( + action=1, # set bytes + bytes=base64.b64encode(value).decode( + "utf-8" + ), # b64 input to string + ) + + @staticmethod + def find_delta_value(deltas, delta_value): + found = False + for delta in deltas: + try: + if delta["key"] == delta_value["key"]: + value = delta["value"] + if value["action"] == delta_value["value"]["action"]: + if "uint" in delta_value["value"]: + if delta_value["value"]["uint"] == value["uint"]: + found = True + break + elif "bytes" in delta_value["value"]: + if delta_value["value"]["bytes"] == value["bytes"]: + found = True + break + except KeyError: + pass + return found + + @staticmethod + def save_dryrun_request(name_or_fp, req): + """Helper function to save dryrun request + + Args: + name_or_fp (str, file-like): filename or fp to save the request to + req (DryrunRequest): dryrun request object to save + """ + need_close = False + if isinstance(name_or_fp, str): + fp = open(name_or_fp, "wb") + need_close = True + else: + fp = name_or_fp + + data = msgpack_encode(req) + data = base64.b64decode(data) + + fp.write(data) + if need_close: + fp.close() From 7a6754d681b1e577bfa285c49369a548f7030b11 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Thu, 24 Mar 2022 22:48:19 -0500 Subject: [PATCH 12/85] unit-test not build-and-test --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 4ae90105..f61f9be5 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -63,6 +63,6 @@ jobs: with: config: ${{ env.CONFIG }} - name: Setup integration test environment - run: make pip-test build-and-test + run: make pip-test unit-test - name: Run integration tests run: make blackbox \ No newline at end of file From 2956b58f60104de64a5ea62ee15af58443afc61b Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Thu, 24 Mar 2022 23:19:50 -0500 Subject: [PATCH 13/85] .PHONY and remove unused commands --- Makefile | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) diff --git a/Makefile b/Makefile index 1b8f4888..86510874 100644 --- a/Makefile +++ b/Makefile @@ -1,13 +1,3 @@ -# Github Actions - -env-up: - bash -x .sandbox/sandbox up dev - -env-down: - .sandbox/sandbox down dev - -# Build - pip-publish: pip install -r requirements.txt pip install -e . @@ -25,3 +15,5 @@ blackbox: ls ./sandbox test pytest tests/integration_test.py + +.PHONY: pip-publish pip-test unit-test blackbox \ No newline at end of file From 6b739bbd8515b1ff7c3d6d9d837ec68678b75db2 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Fri, 25 Mar 2022 10:08:53 -0500 Subject: [PATCH 14/85] working with act! --- Makefile | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 86510874..4270d3a7 100644 --- a/Makefile +++ b/Makefile @@ -9,11 +9,17 @@ pip-test: unit-test: pytest tests/unit_test.py -blackbox: +blackbox-smoke-test: echo "hello blackbox!" pwd ls - ./sandbox test + ls sandbox + cd sandbox && bash -x ./sandbox test + pwd + +integration-test: pytest tests/integration_test.py +blackbox: blackbox-smoke-test integration-test + .PHONY: pip-publish pip-test unit-test blackbox \ No newline at end of file From ac23fb9440d095738843a215dec17b1302a63e52 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Fri, 25 Mar 2022 10:28:38 -0500 Subject: [PATCH 15/85] @barnjamin's solution crops up again https://github.com/algorand/sandbox/issues/64) --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 4270d3a7..75289333 100644 --- a/Makefile +++ b/Makefile @@ -14,7 +14,7 @@ blackbox-smoke-test: pwd ls ls sandbox - cd sandbox && bash -x ./sandbox test + cd sandbox && script -e -c "./sandbox test" pwd integration-test: From bc37fd6a6e1a8204e084ddf8c6b298802aece92a Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Fri, 25 Mar 2022 10:44:49 -0500 Subject: [PATCH 16/85] combine `script -e -c` with `bash -x` --- Makefile | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 75289333..317ffbe3 100644 --- a/Makefile +++ b/Makefile @@ -14,7 +14,8 @@ blackbox-smoke-test: pwd ls ls sandbox - cd sandbox && script -e -c "./sandbox test" + script -e -c "bash -x ./sandbox/sandbox test" + cd sandbox && docker-compose ps pwd integration-test: From a84ac01abb21f6c2c21895581022fc9a1fef8905 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Fri, 25 Mar 2022 10:56:12 -0500 Subject: [PATCH 17/85] more network config info --- Makefile | 2 +- tests/integration_test.py | 7 ++++++- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 317ffbe3..1c899764 100644 --- a/Makefile +++ b/Makefile @@ -19,7 +19,7 @@ blackbox-smoke-test: pwd integration-test: - pytest tests/integration_test.py + pytest -sv tests/integration_test.py blackbox: blackbox-smoke-test integration-test diff --git a/tests/integration_test.py b/tests/integration_test.py index 5a62d86a..8b4e86b0 100644 --- a/tests/integration_test.py +++ b/tests/integration_test.py @@ -17,7 +17,12 @@ def test_algod(): - assert get_algod().status(), "somehow got nothing out of Algod's status" + algod = get_algod() + url = algod.algod_address + print(f"algod.url: {url}") + status = algod.status() + print(f"algod.status(): {status}") + assert status, "somehow got nothing out of Algod's status" # def fac_with_overflow(n): From 410187e177b6a61f4314ea2e5bfdb6f25b63b8bd Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Fri, 25 Mar 2022 19:24:58 -0500 Subject: [PATCH 18/85] break up into mac and github specific commands --- .github/workflows/build.yml | 2 +- .gitignore | 1 + Makefile | 29 ++++++++++++++++++++++++----- tests/clients.py | 14 +++++--------- 4 files changed, 31 insertions(+), 15 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index f61f9be5..e7790eac 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -65,4 +65,4 @@ jobs: - name: Setup integration test environment run: make pip-test unit-test - name: Run integration tests - run: make blackbox \ No newline at end of file + run: make gh-blackbox \ No newline at end of file diff --git a/.gitignore b/.gitignore index b29ff5ad..7948588c 100644 --- a/.gitignore +++ b/.gitignore @@ -2,6 +2,7 @@ # Integration tests .sandbox +sandbox # Comma Seperated Value reports *.csv diff --git a/Makefile b/Makefile index 1c899764..d3680583 100644 --- a/Makefile +++ b/Makefile @@ -1,3 +1,5 @@ +####### Universal ###### + pip-publish: pip install -r requirements.txt pip install -e . @@ -9,18 +11,35 @@ pip-test: unit-test: pytest tests/unit_test.py -blackbox-smoke-test: +blackbox-smoke-prefix: echo "hello blackbox!" pwd ls ls sandbox - script -e -c "bash -x ./sandbox/sandbox test" cd sandbox && docker-compose ps - pwd integration-test: pytest -sv tests/integration_test.py -blackbox: blackbox-smoke-test integration-test -.PHONY: pip-publish pip-test unit-test blackbox \ No newline at end of file +###### Mac Only ###### + +# assumes you have a symbolic link: sandbox -> /cloned/repo/algorand/sandbox +mac-sandbox-test: + ./sandbox/sandbox test + +mac-blackbox-smoke: blackbox-smoke-prefix mac-sandbox-test + +mac-blackbox: mac-blackbox-smoke integration-test + + +###### Github Actions Only ###### + +gh-sandbox-test: + script -e -c "bash -x ./sandbox/sandbox test" + +gh-blackbox-smoke: blackbox-smoke-prefix gh-sandbox-test + +gh-blackbox: gh-blackbox-smoke integration-test + +.PHONY: pip-publish pip-test unit-test gh-blackbox \ No newline at end of file diff --git a/tests/clients.py b/tests/clients.py index f27e1805..7b039143 100644 --- a/tests/clients.py +++ b/tests/clients.py @@ -4,9 +4,9 @@ # from algosdk.v2client.indexer import IndexerClient DEVNET_TOKEN = "a" * 64 -ALGOD_PORT = 60000 -# KMD_PORT = 60001 -# INDEXER_PORTS = range(59_996, 60_000) +ALGOD_PORT = 4001 +# KMD_PORT = 4002 +# INDEXER_PORT = 8980 def get_algod() -> AlgodClient: @@ -17,9 +17,5 @@ def get_algod() -> AlgodClient: # return KMDClient(DEVNET_TOKEN, f"http://localhost:{KMD_PORT}") -# def get_indexer(port: int) -> IndexerClient: -# assert ( -# port in INDEXER_PORTS -# ), f"port for available indexers must be in {INDEXER_PORTS} but was provided {port}" - -# return IndexerClient(DEVNET_TOKEN, f"http://localhost:{port}") +# def get_indexer() -> IndexerClient: +# return IndexerClient(DEVNET_TOKEN, f"http://localhost:{INDEXER_PORT}") From fab5acb546e045f5b9511f9b260434c43a87975b Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Fri, 25 Mar 2022 19:26:47 -0500 Subject: [PATCH 19/85] should work on github as well --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index d3680583..c3c3f837 100644 --- a/Makefile +++ b/Makefile @@ -14,8 +14,8 @@ unit-test: blackbox-smoke-prefix: echo "hello blackbox!" pwd - ls - ls sandbox + ls -l + ls -l sandbox cd sandbox && docker-compose ps integration-test: From df55a5d17c03ce3f8502e0ad1d4eb8cf6ed124ad Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Fri, 25 Mar 2022 19:46:39 -0500 Subject: [PATCH 20/85] expect sandbox exit code 2 on github because missing indexer --- Makefile | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index c3c3f837..5d138c60 100644 --- a/Makefile +++ b/Makefile @@ -24,6 +24,11 @@ integration-test: ###### Mac Only ###### +# assumes you've installed pipx, build and tox via: +# pip install pipx; pipx install build; pipx install tox +mac-project-build: + pyproject-build + # assumes you have a symbolic link: sandbox -> /cloned/repo/algorand/sandbox mac-sandbox-test: ./sandbox/sandbox test @@ -32,11 +37,13 @@ mac-blackbox-smoke: blackbox-smoke-prefix mac-sandbox-test mac-blackbox: mac-blackbox-smoke integration-test +mac-publish: py ###### Github Actions Only ###### gh-sandbox-test: - script -e -c "bash -x ./sandbox/sandbox test" + # expect exit code 2 on github and 0 on mac, as indexer is not present in the install but is on the typical sandbox + script -e -c "bash -x ./sandbox/sandbox test"; if $? -eq 2 ]; then echo 0; else echo $?; fi gh-blackbox-smoke: blackbox-smoke-prefix gh-sandbox-test From 1752366caaba31fb67ad3a43d75c1d718236e05b Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Fri, 25 Mar 2022 19:59:41 -0500 Subject: [PATCH 21/85] missing left bracket --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 5d138c60..67669091 100644 --- a/Makefile +++ b/Makefile @@ -43,7 +43,7 @@ mac-publish: py gh-sandbox-test: # expect exit code 2 on github and 0 on mac, as indexer is not present in the install but is on the typical sandbox - script -e -c "bash -x ./sandbox/sandbox test"; if $? -eq 2 ]; then echo 0; else echo $?; fi + script -e -c "bash -x ./sandbox/sandbox test"; if [$? -eq 2 ]; then echo 0; else echo $?; fi gh-blackbox-smoke: blackbox-smoke-prefix gh-sandbox-test From 9cdb3f9ad4c440ad61593bc7613b648f5eafdb46 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Fri, 25 Mar 2022 20:08:35 -0500 Subject: [PATCH 22/85] simpler no-fail sandbox test --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index 67669091..01f678b4 100644 --- a/Makefile +++ b/Makefile @@ -42,8 +42,8 @@ mac-publish: py ###### Github Actions Only ###### gh-sandbox-test: - # expect exit code 2 on github and 0 on mac, as indexer is not present in the install but is on the typical sandbox - script -e -c "bash -x ./sandbox/sandbox test"; if [$? -eq 2 ]; then echo 0; else echo $?; fi + # allow exit code 2 as indexer returns 500 when last-round = 0 + script -e -c "bash -x ./sandbox/sandbox test" || echo "finished ./sandbox test" gh-blackbox-smoke: blackbox-smoke-prefix gh-sandbox-test From b7a5e50a4c17b55b567b68903c315a90dc504912 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Fri, 25 Mar 2022 21:06:31 -0500 Subject: [PATCH 23/85] mac-gh-simulate --- Makefile | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/Makefile b/Makefile index 01f678b4..3ea96e13 100644 --- a/Makefile +++ b/Makefile @@ -24,12 +24,12 @@ integration-test: ###### Mac Only ###### -# assumes you've installed pipx, build and tox via: -# pip install pipx; pipx install build; pipx install tox +# assumes installations of pipx, build and tox via: +# `pip install pipx; pipx install build; pipx install tox` mac-project-build: pyproject-build -# assumes you have a symbolic link: sandbox -> /cloned/repo/algorand/sandbox +# assumes a symbolic link: sandbox -> /cloned/repo/algorand/sandbox mac-sandbox-test: ./sandbox/sandbox test @@ -37,7 +37,10 @@ mac-blackbox-smoke: blackbox-smoke-prefix mac-sandbox-test mac-blackbox: mac-blackbox-smoke integration-test -mac-publish: py +# assumes you've installed act via `brew install act`: +mac-gh-simulate: + act + ###### Github Actions Only ###### From cfdaaf370a95de31aa946d90da329307982cdff6 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Fri, 25 Mar 2022 21:25:30 -0500 Subject: [PATCH 24/85] re-create teal_blackbox --- .gitignore | 3 + blackbox/blackbox.py | 5 +- blackbox/dryrun.py | 66 +- tests/dryrun_mixin_docs_test.py | 608 ++++++++++++++++ tests/integration_test.py | 1164 +++++++++++++++---------------- 5 files changed, 1202 insertions(+), 644 deletions(-) create mode 100644 tests/dryrun_mixin_docs_test.py diff --git a/.gitignore b/.gitignore index 7948588c..8f43dc34 100644 --- a/.gitignore +++ b/.gitignore @@ -10,6 +10,9 @@ sandbox # Emacs detritus *~ +# VS Code detritus +.vscode + ##### github recommends for Python ##### # Byte-compiled / optimized / DLL files diff --git a/blackbox/blackbox.py b/blackbox/blackbox.py index 0c29f61a..027c606b 100644 --- a/blackbox/blackbox.py +++ b/blackbox/blackbox.py @@ -8,11 +8,12 @@ from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union from algosdk.v2client.algod import AlgodClient -from algosdk.testing.dryrun import ( + +from blackbox.dryrun import ( ZERO_ADDRESS, assert_error, assert_no_error, - Helper as DryRunHelper, + DryRunHelper, ) diff --git a/blackbox/dryrun.py b/blackbox/dryrun.py index 0829a81d..66a81265 100644 --- a/blackbox/dryrun.py +++ b/blackbox/dryrun.py @@ -111,7 +111,7 @@ def assert_status(status, txn_index, msg, txns_res, enforce=True): def assert_error(drr, contains=None, txn_index=None, msg=None, enforce=True): - error = Helper.find_error(drr, txn_index=txn_index) + error = DryRunHelper.find_error(drr, txn_index=txn_index) ok = bool(error) result = None if not ok: # the expected error did NOT occur @@ -127,7 +127,7 @@ def assert_error(drr, contains=None, txn_index=None, msg=None, enforce=True): def assert_no_error(drr, txn_index=None, msg=None, enforce=True): - error = Helper.find_error(drr, txn_index=txn_index) + error = DryRunHelper.find_error(drr, txn_index=txn_index) ok = not bool(error) result = None if not ok: @@ -153,9 +153,7 @@ def assert_global_state_contains(delta_value, txn_index, txns_res, msg=None): and txn_res["global-delta"] is not None and len(txn_res["global-delta"]) > 0 ): - found = Helper.find_delta_value( - txn_res["global-delta"], delta_value - ) + found = DryRunHelper.find_delta_value(txn_res["global-delta"], delta_value) if not found and idx == txn_index: msg = ( msg @@ -178,9 +176,7 @@ def assert_global_state_contains(delta_value, txn_index, txns_res, msg=None): _fail(msg) -def assert_local_state_contains( - addr, delta_value, txn_index, txns_res, msg=None -): +def assert_local_state_contains(addr, delta_value, txn_index, txns_res, msg=None): if txn_index is not None and (txn_index < 0 or txn_index >= len(txns_res)): _fail(f"txn index {txn_index} is out of range [0, {len(txns_res)})") @@ -199,7 +195,7 @@ def assert_local_state_contains( addr_found = False if local_delta["address"] == addr: addr_found = True - found = Helper.find_delta_value( + found = DryRunHelper.find_delta_value( local_delta["delta"], delta_value ) if not found and idx == txn_index: @@ -431,13 +427,9 @@ def assertLocalStateContains( txns_res = self._checked_request( prog_drr_txns, lsig=None, app=app, sender=sender ) - assert_local_state_contains( - addr, delta_value, txn_index, txns_res, msg=msg - ) + assert_local_state_contains(addr, delta_value, txn_index, txns_res, msg=msg) - def dryrun_request( - self, program, lsig=None, app=None, sender=ZERO_ADDRESS - ): + def dryrun_request(self, program, lsig=None, app=None, sender=ZERO_ADDRESS): """ Helper function for creation DryrunRequest and making the REST request from program source or compiled bytes @@ -454,7 +446,7 @@ def dryrun_request( Raises: TypeError: program is not bytes or str """ - drr = Helper.build_dryrun_request(program, lsig, app, sender) + drr = DryRunHelper.build_dryrun_request(program, lsig, app, sender) return self.algo_client.dryrun(drr) def dryrun_request_from_txn(self, txns, app): @@ -515,9 +507,7 @@ def _dryrun_request(self, prog_drr_txns, lsig, app, sender): drr = self.dryrun_request(prog_drr_txns, lsig, app, sender) return drr - def _checked_request( - self, prog_drr_txns, lsig=None, app=None, sender=ZERO_ADDRESS - ): + def _checked_request(self, prog_drr_txns, lsig=None, app=None, sender=ZERO_ADDRESS): """ Helper function to make a dryrun request and perform basic validation """ @@ -531,29 +521,23 @@ def _checked_request( return drr["txns"] -class Helper: +class DryRunHelper: """Utility functions for dryrun""" @classmethod def singleton_logicsig_request( cls, program: str, args: List[bytes], sender=ZERO_ADDRESS ): - return cls.build_dryrun_request( - program, lsig=LSig(args=args), sender=sender - ) + return cls.build_dryrun_request(program, lsig=LSig(args=args), sender=sender) @classmethod def singleton_app_request( cls, program: str, args: List[bytes], sender=ZERO_ADDRESS ): - return cls.build_dryrun_request( - program, app=App(args=args), sender=sender - ) + return cls.build_dryrun_request(program, app=App(args=args), sender=sender) @classmethod - def build_dryrun_request( - cls, program, lsig=None, app=None, sender=ZERO_ADDRESS - ): + def build_dryrun_request(cls, program, lsig=None, app=None, sender=ZERO_ADDRESS): """ Helper function for creation DryrunRequest object from a program. By default it uses logic sig mode @@ -587,9 +571,7 @@ def build_dryrun_request( run_mode = cls._get_run_mode(app) app_or_lsig = ( - cls._prepare_lsig(lsig) - if run_mode == "lsig" - else cls._prepare_app(app) + cls._prepare_lsig(lsig) if run_mode == "lsig" else cls._prepare_app(app) ) del app @@ -603,9 +585,7 @@ def build_dryrun_request( if isinstance(program, str): return ( - cls._prepare_lsig_source_request( - program, app_or_lsig, run_mode, txn - ) + cls._prepare_lsig_source_request(program, app_or_lsig, run_mode, txn) if run_mode == "lsig" else cls._prepare_app_source_request( program, app_or_lsig, sender, run_mode, txn @@ -640,9 +620,7 @@ def _get_run_mode(cls, app): run_mode = "lsig" if app is not None: on_complete = ( - app.get("on_complete") - if isinstance(app, dict) - else app.on_complete + app.get("on_complete") if isinstance(app, dict) else app.on_complete ) run_mode = ( "clearp" @@ -746,13 +724,9 @@ def sample_txn(cls, sender, txn_type): """ Helper function for creation Transaction for dryrun """ - sp = transaction.SuggestedParams( - int(1000), int(1), int(100), "", flat_fee=True - ) + sp = transaction.SuggestedParams(int(1000), int(1), int(100), "", flat_fee=True) if txn_type == payment_txn: - txn = transaction.Transaction( - sender, sp, None, None, payment_txn, None - ) + txn = transaction.Transaction(sender, sp, None, None, payment_txn, None) elif txn_type == appcall_txn: txn = transaction.ApplicationCallTxn(sender, sp, 0, 0) else: @@ -899,9 +873,7 @@ def build_bytes_delta_value(value): value = value.encode("utf-8") return dict( action=1, # set bytes - bytes=base64.b64encode(value).decode( - "utf-8" - ), # b64 input to string + bytes=base64.b64encode(value).decode("utf-8"), # b64 input to string ) @staticmethod diff --git a/tests/dryrun_mixin_docs_test.py b/tests/dryrun_mixin_docs_test.py new file mode 100644 index 00000000..ddf16748 --- /dev/null +++ b/tests/dryrun_mixin_docs_test.py @@ -0,0 +1,608 @@ +""" +Derived from: https://github.com/algorand/docs/blob/bbd379df193399f82686e9f6d5c2bcb9d676d2d7/docs/features/asc1/teal_test.md#basic-setup-and-simple-tests +""" +import base64 +from tabulate import tabulate +import unittest + +from algosdk.constants import PAYMENT_TXN, APPCALL_TXN +from algosdk.future import transaction +from algosdk.encoding import decode_address, checksum +from algosdk.v2client.models import ( + Account, + Application, + ApplicationLocalState, + ApplicationParams, + ApplicationStateSchema, + TealKeyValue, + TealValue, +) +from blackbox.dryrun import DryrunTestCaseMixin, DryRunHelper + +from tests.clients import get_algod + + +def b64_encode_hack(s, b=None): + if not b: + b = bytes(s, "utf-8") + return base64.b64encode(b).decode("utf-8") + + +class ExampleTestCase(DryrunTestCaseMixin, unittest.TestCase): + """The test harness consist of DryrunTestCaseMixin class that is supposed to be used as a mixin + to unittest-based user-defined tests and Helper class with various utilities. + + DryrunTestCaseMixin provides helpers for testing both LogicSig and Application smart contracts. + + ## Basic asserts: to check if the program return true, false or does [not] err during compilation or execution: + * assertPass + * assertReject + * assertError + * assertNoError + """ + + def setUp(self): + self.algo_client = get_algod() + + def test_simple(self): + """ """ + self.assertPass("int 1") + self.assertReject("int 0") + self.assertNoError("int 0") + self.assertError("byte 0", "1 error") + + def test_logic_sig(self): + """Shows how to test logic sig with parameters + + This example demonstrates how to pass LogicSig parameters + - they need to be a list of bytes items in args key of lsig parameter to any assert function. + + In general, specifying lsig non-None parameter forces to use LogicSig run mode. + """ + source = """ +arg 0 +btoi +int 0x31 +== +""" + self.assertError(source, "cannot load arg[0]") + self.assertReject(source) + self.assertPass(source, lsig=dict(args=[b"1", b"2"])) + + drr = self.dryrun_request(source, lsig=dict(args=[b"\x31", b"2"])) + self.assertPass(drr) + + def test_logic_sig_ex(self): + """Shows how to use and examine raw dryrun response""" + source = """ +arg 0 +btoi +int 0x31 +== +""" + drr = self.dryrun_request(source, lsig=dict(args=[b"\x31", b"2"])) + self.assertPass(drr) + + def test_app_global_state(self): + """Use voting app as example to check app initialization + + Note the app parameter in assert functions. + It allows setting application-specific fields like OnCompletion, ApplicationID, ApplicationArgs, Accounts + + Two assertReject statements in the beginning on the test check prerequisites: + 1. application call in creation mode (app_idx = 0) + 2. number of required initialization parameters. + + Then dryrun_request helper is used to obtain execution result and written values with + assertGlobalStateContains. Changes are reported as EvalDelta type with key, value, action, uint or bytes properties. + bytes values are base64-encoded, and action is explained in the table below: + + | action | description | + |--------|-----------------| + | 1 | set bytes value | + | 2 | set uint value | + | 3 | delete value | + + Having this information, `assertGlobalStateContains` validates that **Creator** global key is set to txn sender address, + and all the **RegBegin**, **RegEnd**, **VoteBegin** and **VoteEnd** are properly initialized. + """ + source = """#pragma version 2 +int 0 +txn ApplicationID +== +bz not_creation +byte "Creator" +txn Sender +app_global_put +txn NumAppArgs +int 4 +== +bz failed +byte "RegBegin" +txna ApplicationArgs 0 +btoi +app_global_put +byte "RegEnd" +txna ApplicationArgs 1 +btoi +app_global_put +byte "VoteBegin" +txna ApplicationArgs 2 +btoi +app_global_put +byte "VoteEnd" +txna ApplicationArgs 3 +btoi +app_global_put +int 1 +return +not_creation: +int 0 +return +failed: +int 0 +return +""" + self.assertReject(source, app=dict(app_idx=0)) + self.assertReject( + source, + app=dict( + app_idx=1, + args=[b"\x01", b"\xFF", b"\x01\x00", b"\x01\xFF"], + ), + ) + self.assertPass( + source, + app=dict( + app_idx=0, + args=[b"\x01", b"\xFF", b"\x01\x00", b"\x01\xFF"], + ), + ) + + sender = "42NJMHTPFVPXVSDGA6JGKUV6TARV5UZTMPFIREMLXHETRKIVW34QFSDFRE" + drr = self.dryrun_request( + source, + sender=sender, + app=dict( + app_idx=0, + args=[ + (0x01).to_bytes(1, byteorder="big"), + (0xFF).to_bytes(1, byteorder="big"), + (0x0100).to_bytes(2, byteorder="big"), + (0x01FF).to_bytes(2, byteorder="big"), + ], + ), + ) + self.assertPass(drr) + + value = dict( + key=b64_encode_hack("Creator"), + value=dict( + action=1, + bytes=b64_encode_hack("", decode_address(sender)), + ), + ) + self.assertGlobalStateContains(drr, value) + + value = dict( + key=b64_encode_hack("RegBegin"), + value=dict(action=2, uint=0x01), + ) + self.assertGlobalStateContains(drr, value) + + value = dict(key=b64_encode_hack("RegEnd"), value=dict(action=2, uint=0xFF)) + self.assertGlobalStateContains(drr, value) + + value = dict( + key=b64_encode_hack("VoteBegin"), + value=dict(action=2, uint=0x0100), + ) + self.assertGlobalStateContains(drr, value) + + value = dict( + key=b64_encode_hack("VoteEnd"), + value=dict(action=2, uint=0x01FF), + ) + self.assertGlobalStateContains(drr, value) + + def test_app_global_state_existing(self): + """Use voting app as example to check app update + + Now let's test an application and check what does it write to the global state. + Example below is an initialization prologue of voting app. + """ + source = """#pragma version 2 +int 0 +txn ApplicationID +== +bz not_creation +// fail on creation in this test scenario +int 0 +return +not_creation: +int UpdateApplication +txn OnCompletion +== +bz failed +byte "Creator" +app_global_get +txn Sender +== +bz failed +int 1 +return +failed: +int 0""" + sender = self.default_address() + + self.assertReject(source, app=dict(app_idx=0)) + self.assertReject(source, app=dict(app_idx=1)) + self.assertReject(source, app=dict(app_idx=1, accounts=[sender])) + + app = dict( + app_idx=1, + global_state=[ + TealKeyValue( + key=b64_encode_hack("Creator"), + value=TealValue(type=1, bytes=b""), + ) + ], + ) + self.assertReject(source, app=app) + + app["on_complete"] = transaction.OnComplete.UpdateApplicationOC + self.assertReject(source, app=app) + + # TODO: get this one to pass as well + # app["global_state"][0].value.bytes = decode_address(sender) + # self.assertPass(source, app=app) + + def test_app_local_state(self): + """Use voting app as example to check local state writes""" + source = """#pragma version 2 +txna ApplicationArgs 0 +byte "vote" +== +bnz vote +int 0 +return +vote: +global Round +byte "VoteBegin" +app_global_get +>= +global Round +byte "VoteEnd" +app_global_get +<= +&& +bz failed +int 0 +txn ApplicationID +app_opted_in +bz failed +int 0 +txn ApplicationID +byte "voted" +app_local_get_ex +bnz voted +//read existing vote candidate +txna ApplicationArgs 1 +app_global_get +bnz increment_existing +pop +int 0 +increment_existing: +int 1 ++ +store 1 +txna ApplicationArgs 1 +load 1 +app_global_put +int 0 //sender +byte "voted" +txna ApplicationArgs 1 +app_local_put +int 1 +return +voted: +pop +int 1 +return +failed: +int 0 +return +""" + drr = self.dryrun_request(source, app=dict(app_idx=1)) + self.assertReject(drr) + self.assertError(drr, "invalid ApplicationArgs index 0") + + drr = self.dryrun_request(source, app=dict(app_idx=1, args=[b"vote"])) + self.assertReject(drr) + self.assertNoError(drr) + + sender = "42NJMHTPFVPXVSDGA6JGKUV6TARV5UZTMPFIREMLXHETRKIVW34QFSDFRE" + creator = "DFPKC2SJP3OTFVJFMCD356YB7BOT4SJZTGWLIPPFEWL3ZABUFLTOY6ILYE" + creator_data = Account( + address=creator, + status="Offline", + created_apps=[ + Application( + id=1, + params=ApplicationParams( + global_state=[ + TealKeyValue( + key=b64_encode_hack("VoteBegin"), + value=TealValue(type=2, uint=1), + ), + TealKeyValue( + key=b64_encode_hack("VoteEnd"), + value=TealValue(type=2, uint=1000), + ), + ] + ), + ), + ], + ) + + accounts = [creator_data] + + drr = self.dryrun_request( + source, + app=dict( + app_idx=1, + args=[b"vote"], + round=3, + creator=creator, + accounts=accounts, + ), + ) + self.assertReject(drr) + self.assertNoError(drr) + + sender_data = Account( + address=sender, + status="Offline", + apps_local_state=[ApplicationLocalState(id=1)], + ) + + accounts = [creator_data, sender_data] + drr = self.dryrun_request( + source, + sender=sender, + app=dict( + app_idx=1, + creator=creator, + args=[b"vote"], + round=3, + accounts=accounts, + ), + ) + self.assertError(drr, "invalid ApplicationArgs index 1") + + accounts = [creator_data, sender_data] + drr = self.dryrun_request( + source, + sender=sender, + app=dict( + app_idx=1, + creator=creator, + args=[b"vote", "test"], + round=3, + accounts=accounts, + ), + ) + self.assertPass(drr) + DryRunHelper.pprint(drr) + + value = dict( + key=b64_encode_hack("voted"), + value=DryRunHelper.build_bytes_delta_value("test"), + ) + self.assertLocalStateContains(drr, sender, value) + + value = dict(key=b64_encode_hack("test"), value=dict(action=2, uint=1)) + self.assertGlobalStateContains(drr, value) + + def test_transactions(self): + """Test app call and logic sig transactions interaction + INTERESTING logic sig USE CASE + + Although examples above provide testing tools for "create and run" scenarios when + testing single programs, sometimes transaction interactions also need to tested. + In this example we consider how stateful application can __offload__ some computations + to stateless logicsig program, and ensure the logic sig is the right one. + + Suppose our logic sig program computes the following hash: + h(h(a1) + h(a2) + h(a3) + h(a4)) + where + is string concatenation, and then verifies it against some provided proof. + + Suppose our application approves only if the calculation is made correctly. + To achieve this, create a txn group where: + + 1. Txn 1 is an escrow logic sig txn with hash calculation approval program (see logic_source below). + 2. Txn 2 is app call txn with txn 1 checker (see app_source below). + * Ensure txn 1 sender is know-ahead escrow address. + * Ensure txn 1 Note field is set to proof that is needed to be confirmed. + 3. Input data a1, a2, a3, a4 are set as ApplicationArgs for txn 2 and accessed from txn 1 + (logic sig args can be used as well, since both the logic hash and the proof checked in the app call program). + + """ + logic_source = """#pragma version 2 +gtxna 1 ApplicationArgs 0 +sha512_256 +gtxna 1 ApplicationArgs 1 +sha512_256 +concat +gtxna 1 ApplicationArgs 2 +sha512_256 +gtxna 1 ApplicationArgs 3 +sha512_256 +concat +concat +sha512_256 +txn Note +== +""" + # compile the logic sig program + logic_compiled = self.algo_client.compile(logic_source) + self.assertIn("hash", logic_compiled) + self.assertIn("result", logic_compiled) + logic = base64.b64decode(logic_compiled["result"]) + logic_hash = logic_compiled["hash"] + + # compute proof from parameters + args = [b"this", b"is", b"a", b"test"] + parts = [] + for arg in args: + parts.append(checksum(arg)) + + proof = checksum(b"".join(parts)) + + # create and compile app call program + app_source = f"""#pragma version 2 +gtxn 0 Sender +addr {logic_hash} +== +gtxn 0 Note +byte {"0x" + proof.hex()} +== +&& +""" + app_compiled = self.algo_client.compile(app_source) + self.assertIn("result", app_compiled) + app = base64.b64decode(app_compiled["result"]) + + # create transactions + txn1 = DryRunHelper.sample_txn(logic_hash, PAYMENT_TXN) + txn1.note = proof + logicsig = transaction.LogicSig(logic, None) + stxn1 = transaction.LogicSigTransaction(txn1, logicsig) + + app_idx = 1 + txn2 = DryRunHelper.sample_txn(self.default_address(), APPCALL_TXN) + txn2.index = app_idx + txn2.app_args = args + stxn2 = transaction.SignedTransaction(txn2, None) + + # create a balance record with the application + # creator address is a random one + creator = "DFPKC2SJP3OTFVJFMCD356YB7BOT4SJZTGWLIPPFEWL3ZABUFLTOY6ILYE" + creator_data = Account( + address=creator, + status="Offline", + created_apps=[ + Application( + id=1, + params=ApplicationParams( + approval_program=app, + local_state_schema=ApplicationStateSchema(64, 64), + global_state_schema=ApplicationStateSchema(64, 64), + ), + ) + ], + ) + + drr = self.dryrun_request_from_txn( + [stxn1, stxn2], app=dict(accounts=[creator_data]) + ) + self.assertPass(drr) + + # now check the verification logic + # wrong creator + txn1.sender = creator + drr = self.dryrun_request_from_txn( + [stxn1, stxn2], app=dict(accounts=[creator_data]) + ) + self.assertPass(drr, txn_index=0) + self.assertReject(drr, txn_index=1) + self.assertReject(drr) + + # wrong proof + txn1.sender = logic_hash + txn1.note = b"wrong" + drr = self.dryrun_request_from_txn( + [stxn1, stxn2], app=dict(accounts=[creator_data]) + ) + self.assertReject(drr, txn_index=0) + self.assertReject(drr, txn_index=1) + self.assertReject(drr) + + def test_factorial(self): + """ + Shows how to test the same code as a logic sig or an app + python -m unittest x.blackbox.dryrun_mixin_docs_test.ExampleTestCase.test_factorial + """ + source = """#pragma version 6 +{} 0 +btoi +callsub oldfac_0 +return + +// oldfac +oldfac_0: +store 0 +load 0 +int 2 +< +bnz oldfac_0_l2 +load 0 +load 0 +int 1 +- +load 0 +swap +callsub oldfac_0 +swap +store 0 +* +b oldfac_0_l3 +oldfac_0_l2: +int 1 +oldfac_0_l3: +retsub""" + + def tb(i): + return i.to_bytes(1, "big") + + lsig_src = source.format("arg") + app_src = source.format("txna ApplicationArgs") + + max_arg_before_overflow = 20 + finalgood_args = None + for i in range(max_arg_before_overflow): + finalgood_args = {"args": [tb(i)]} + lsig_dr = self.dryrun_request(lsig_src, lsig=finalgood_args) + self.assertPass(lsig_dr, msg=f"i={i}") + app_dr = self.dryrun_request(app_src, app=finalgood_args) + self.assertPass(app_dr, msg=f"i={i}") + + print(f"n={1+max_arg_before_overflow} was TOO BIG:") + toobig_args = {"args": [tb(1 + max_arg_before_overflow)]} + self.assertError(lsig_src, "overflow", lsig=toobig_args) + lsig_dr = self.dryrun_request(lsig_src, lsig=toobig_args) + DryRunHelper.pprint(lsig_dr) + + self.assertError(app_src, "overflow", app=toobig_args) + app_dr = self.dryrun_request(app_src, app=toobig_args) + DryRunHelper.pprint(app_dr) + + print("\n" * 3, f"BUT... n={1+max_arg_before_overflow} is JUST FINE:") + lsig_dr = self.dryrun_request(lsig_src, lsig=finalgood_args) + DryRunHelper.pprint(lsig_dr) + + app_dr = self.dryrun_request(app_src, app=finalgood_args) + DryRunHelper.pprint(app_dr) + + print("FINISHED logic sig", "\n" * 3, "BEGIN app") + + print("HOW ABOUT costs?") + + def get_cost(i): + return self.dryrun_request(app_src, app={"args": [tb(i)]})["txns"][0][ + "cost" + ] + + print( + tabulate([(i, get_cost(i)) for i in range(45)], headers=["n", "Cost(n)"]), + ) diff --git a/tests/integration_test.py b/tests/integration_test.py index 8b4e86b0..9f1ed6a9 100644 --- a/tests/integration_test.py +++ b/tests/integration_test.py @@ -2,19 +2,19 @@ import pytest -from algosdk.testing.dryrun import Helper as DryRunHelper - -# from algosdk.testing.teal_blackbox import ( -# DryRunEncoder as Encoder, -# DryRunExecutor as Executor, -# DryRunProperty as DRProp, -# DryRunTransactionResult as DRR, -# ExecutionMode, -# SequenceAssertion, -# ) +from blackbox.blackbox import ( + DryRunEncoder as Encoder, + DryRunExecutor as Executor, + DryRunProperty as DRProp, + DryRunTransactionResult as DRR, + ExecutionMode, + SequenceAssertion, +) from tests.clients import get_algod +TESTS_DIR = Path.cwd() / "tests" + def test_algod(): algod = get_algod() @@ -25,588 +25,562 @@ def test_algod(): assert status, "somehow got nothing out of Algod's status" -# def fac_with_overflow(n): -# if n < 2: -# return 1 -# if n > 20: -# return 2432902008176640000 -# return n * fac_with_overflow(n - 1) - - -# def fib(n): -# a, b = 0, 1 -# for _ in range(n): -# a, b = b, a + b -# return a - - -# def fib_cost(args): -# cost = 17 -# for n in range(1, args[0] + 1): -# cost += 31 * fib(n - 1) -# return cost - - -# def test_singleton_assertions(): -# algod = get_algod() -# algod_status = algod.status() -# assert algod_status - -# teal_fmt = """#pragma version 6 -# {} 0 -# btoi -# callsub square_0 -# {} -# return - -# // square -# square_0: -# store 0 -# load 0 -# pushint 2 // 2 -# exp -# retsub""" - -# teal_app, teal_lsig = list( -# map(lambda s: teal_fmt.format(s, ""), ["txna ApplicationArgs", "arg"]) -# ) - -# teal_app_log, bad_teal_lsig = list( -# map( -# lambda s: teal_fmt.format( -# s, -# """store 1 -# load 1 -# itob -# log -# load 1""", -# ), -# ["txna ApplicationArgs", "arg"], -# ) -# ) - -# x = 9 -# args = [x] - -# app_res, app_log_res = list( -# map( -# lambda teal: Executor.dryrun_app(algod, teal, args), -# [teal_app, teal_app_log], -# ) -# ) -# lsig_res, bad_lsig_res = list( -# map( -# lambda teal: Executor.dryrun_logicsig(algod, teal, args), -# [teal_lsig, bad_teal_lsig], -# ) -# ) - -# assert isinstance(app_res, DRR) -# assert isinstance(app_log_res, DRR) -# assert isinstance(lsig_res, DRR) -# assert isinstance(bad_lsig_res, DRR) - -# assert app_res.mode == ExecutionMode.Application -# assert app_log_res.mode == ExecutionMode.Application -# assert lsig_res.mode == ExecutionMode.Signature -# assert bad_lsig_res.mode == ExecutionMode.Signature - -# def prop_assert(dr_resp, actual, expected): -# assert expected == actual, dr_resp.report( -# args, f"expected {expected} but got {actual}" -# ) - -# prop_assert(app_res, app_res.cost(), 9) -# prop_assert(app_log_res, app_log_res.cost(), 14) -# prop_assert(lsig_res, lsig_res.cost(), None) - -# prop_assert(app_res, app_res.last_log(), None) -# prop_assert( -# app_log_res, app_log_res.last_log(), (x ** 2).to_bytes(8, "big").hex() -# ) -# prop_assert(app_log_res, app_log_res.last_log(), Encoder.hex(x ** 2)) -# prop_assert(lsig_res, lsig_res.last_log(), None) - -# prop_assert(app_res, app_res.final_scratch(), {0: x}) -# prop_assert(app_log_res, app_log_res.final_scratch(), {0: x, 1: x ** 2}) -# prop_assert(lsig_res, lsig_res.final_scratch(), {0: x}) -# prop_assert(bad_lsig_res, bad_lsig_res.final_scratch(), {0: x, 1: x ** 2}) - -# prop_assert(app_res, app_res.stack_top(), x ** 2) -# prop_assert(app_log_res, app_log_res.stack_top(), x ** 2) -# prop_assert(lsig_res, lsig_res.stack_top(), x ** 2) -# prop_assert(bad_lsig_res, bad_lsig_res.stack_top(), Encoder.hex0x(x ** 2)) - -# prop_assert(app_res, app_res.max_stack_height(), 2) -# prop_assert(app_log_res, app_log_res.max_stack_height(), 2) -# prop_assert(lsig_res, lsig_res.max_stack_height(), 2) -# prop_assert(bad_lsig_res, bad_lsig_res.max_stack_height(), 2) - -# prop_assert(app_res, app_res.status(), "PASS") -# prop_assert(app_log_res, app_log_res.status(), "PASS") -# prop_assert(lsig_res, lsig_res.status(), "PASS") -# prop_assert(bad_lsig_res, bad_lsig_res.status(), "REJECT") - -# prop_assert(app_res, app_res.passed(), True) -# prop_assert(app_log_res, app_log_res.passed(), True) -# prop_assert(lsig_res, lsig_res.passed(), True) -# prop_assert(bad_lsig_res, bad_lsig_res.passed(), False) - -# prop_assert(app_res, app_res.rejected(), False) -# prop_assert(app_log_res, app_log_res.rejected(), False) -# prop_assert(lsig_res, lsig_res.rejected(), False) -# prop_assert(bad_lsig_res, bad_lsig_res.rejected(), True) - -# prop_assert(app_res, app_res.error(), False) -# prop_assert(app_log_res, app_log_res.error(), False) -# prop_assert(lsig_res, lsig_res.error(), False) -# prop_assert(bad_lsig_res, bad_lsig_res.error(), True) -# assert bad_lsig_res.error( -# contains="logic 0 failed at line 7: log not allowed in current mode" -# ) -# prop_assert( -# bad_lsig_res, bad_lsig_res.error(contains="log not allowed"), True -# ) -# prop_assert( -# bad_lsig_res, bad_lsig_res.error(contains="WRONG PATTERN"), False -# ) - -# prop_assert(app_res, app_res.error_message(), None) -# prop_assert(app_log_res, app_log_res.error_message(), None) -# prop_assert(lsig_res, lsig_res.error_message(), None) -# assert ( -# "logic 0 failed at line 7: log not allowed in current mode" -# in bad_lsig_res.error_message() -# ) - - -# APP_SCENARIOS = { -# "app_exp": { -# "inputs": [()], -# # since only a single input, just assert a constant in each case -# "assertions": { -# DRProp.cost: 11, -# DRProp.lastLog: Encoder.hex(2 ** 10), -# # dicts have a special meaning as assertions. So in the case of "finalScratch" -# # which is supposed to _ALSO_ output a dict, we need to use a lambda as a work-around -# DRProp.finalScratch: lambda _: {0: 2 ** 10}, -# DRProp.stackTop: 2 ** 10, -# DRProp.maxStackHeight: 2, -# DRProp.status: "PASS", -# DRProp.passed: True, -# DRProp.rejected: False, -# DRProp.errorMessage: None, -# }, -# }, -# "app_square_byref": { -# "inputs": [(i,) for i in range(100)], -# "assertions": { -# DRProp.cost: lambda _, actual: 20 < actual < 22, -# DRProp.lastLog: Encoder.hex(1337), -# # due to dry-run artifact of not reporting 0-valued scratchvars, -# # we have a special case for n=0: -# DRProp.finalScratch: lambda args, actual: ( -# {2, 1337, (args[0] ** 2 if args[0] else 2)} -# ).issubset(set(actual.values())), -# DRProp.stackTop: 1337, -# DRProp.maxStackHeight: 3, -# DRProp.status: "PASS", -# DRProp.passed: True, -# DRProp.rejected: False, -# DRProp.errorMessage: None, -# }, -# }, -# "app_square": { -# "inputs": [(i,) for i in range(100)], -# "assertions": { -# DRProp.cost: 14, -# DRProp.lastLog: { -# # since execution REJECTS for 0, expect last log for this case to be None -# (i,): Encoder.hex(i * i) if i else None -# for i in range(100) -# }, -# DRProp.finalScratch: lambda args: ( -# {0: args[0], 1: args[0] ** 2} if args[0] else {} -# ), -# DRProp.stackTop: lambda args: args[0] ** 2, -# DRProp.maxStackHeight: 2, -# DRProp.status: lambda i: "PASS" if i[0] > 0 else "REJECT", -# DRProp.passed: lambda i: i[0] > 0, -# DRProp.rejected: lambda i: i[0] == 0, -# DRProp.errorMessage: None, -# }, -# }, -# "app_swap": { -# "inputs": [(1, 2), (1, "two"), ("one", 2), ("one", "two")], -# "assertions": { -# DRProp.cost: 27, -# DRProp.lastLog: Encoder.hex(1337), -# DRProp.finalScratch: lambda args: { -# 0: 4, -# 1: 5, -# 2: Encoder.hex0x(args[0]), -# 3: 1337, -# 4: Encoder.hex0x(args[1]), -# 5: Encoder.hex0x(args[0]), -# }, -# DRProp.stackTop: 1337, -# DRProp.maxStackHeight: 2, -# DRProp.status: "PASS", -# DRProp.passed: True, -# DRProp.rejected: False, -# DRProp.errorMessage: None, -# }, -# }, -# "app_string_mult": { -# "inputs": [("xyzw", i) for i in range(100)], -# "assertions": { -# DRProp.cost: lambda args: 30 + 15 * args[1], -# DRProp.lastLog: ( -# lambda args: Encoder.hex(args[0] * args[1]) -# if args[1] -# else None -# ), -# # due to dryrun 0-scratchvar artifact, special case for i == 0: -# DRProp.finalScratch: lambda args: ( -# { -# 0: 5, -# 1: args[1], -# 2: args[1] + 1, -# 3: Encoder.hex0x(args[0]), -# 4: Encoder.hex0x(args[0] * args[1]), -# 5: Encoder.hex0x(args[0] * args[1]), -# } -# if args[1] -# else { -# 0: 5, -# 2: args[1] + 1, -# 3: Encoder.hex0x(args[0]), -# } -# ), -# DRProp.stackTop: lambda args: len(args[0] * args[1]), -# DRProp.maxStackHeight: lambda args: 3 if args[1] else 2, -# DRProp.status: lambda args: ( -# "PASS" if 0 < args[1] < 45 else "REJECT" -# ), -# DRProp.passed: lambda args: 0 < args[1] < 45, -# DRProp.rejected: lambda args: 0 >= args[1] or args[1] >= 45, -# DRProp.errorMessage: None, -# }, -# }, -# "app_oldfac": { -# "inputs": [(i,) for i in range(25)], -# "assertions": { -# DRProp.cost: lambda args, actual: ( -# actual - 40 <= 17 * args[0] <= actual + 40 -# ), -# DRProp.lastLog: lambda args: ( -# Encoder.hex(fac_with_overflow(args[0])) -# if args[0] < 21 -# else None -# ), -# DRProp.finalScratch: lambda args: ( -# {0: args[0], 1: fac_with_overflow(args[0])} -# if 0 < args[0] < 21 -# else ( -# {0: min(21, args[0])} -# if args[0] -# else {1: fac_with_overflow(args[0])} -# ) -# ), -# DRProp.stackTop: lambda args: fac_with_overflow(args[0]), -# DRProp.maxStackHeight: lambda args: max(2, 2 * args[0]), -# DRProp.status: lambda args: "PASS" if args[0] < 21 else "REJECT", -# DRProp.passed: lambda args: args[0] < 21, -# DRProp.rejected: lambda args: args[0] >= 21, -# DRProp.errorMessage: lambda args, actual: ( -# actual is None if args[0] < 21 else "overflowed" in actual -# ), -# }, -# }, -# "app_slow_fibonacci": { -# "inputs": [(i,) for i in range(18)], -# "assertions": { -# DRProp.cost: lambda args: ( -# fib_cost(args) if args[0] < 17 else 70_000 -# ), -# DRProp.lastLog: lambda args: ( -# Encoder.hex(fib(args[0])) if 0 < args[0] < 17 else None -# ), -# DRProp.finalScratch: lambda args, actual: ( -# actual == {0: args[0], 1: fib(args[0])} -# if 0 < args[0] < 17 -# else (True if args[0] >= 17 else actual == {}) -# ), -# # we declare to "not care" about the top of the stack for n >= 17 -# DRProp.stackTop: lambda args, actual: ( -# actual == fib(args[0]) if args[0] < 17 else True -# ), -# # similarly, we don't care about max stack height for n >= 17 -# DRProp.maxStackHeight: lambda args, actual: ( -# actual == max(2, 2 * args[0]) if args[0] < 17 else True -# ), -# DRProp.status: lambda args: "PASS" -# if 0 < args[0] < 8 -# else "REJECT", -# DRProp.passed: lambda args: 0 < args[0] < 8, -# DRProp.rejected: lambda args: 0 >= args[0] or args[0] >= 8, -# DRProp.errorMessage: lambda args, actual: ( -# actual is None -# if args[0] < 17 -# else "dynamic cost budget exceeded" in actual -# ), -# }, -# }, -# } - - -# @pytest.mark.parametrize("filebase", APP_SCENARIOS.keys()) -# def test_app_with_report(filebase: str): -# mode, scenario = ExecutionMode.Application, APP_SCENARIOS[filebase] - -# # 0. Validate that the scenarios are well defined: -# inputs, assertions = SequenceAssertion.inputs_and_assertions( -# scenario, mode -# ) - -# algod = get_algod() - -# # 1. Read the TEAL from ./test/integration/teal/*.teal -# path = Path.cwd() / "test" / "integration" / "teal" -# case_name = filebase -# tealpath = path / f"{filebase}.teal" -# with open(tealpath, "r") as f: -# teal = f.read() - -# print( -# f"""Sandbox test and report {mode} for {case_name} from {tealpath}. TEAL is: -# ------- -# {teal} -# -------""" -# ) - -# # 2. Run the requests to obtain sequence of Dryrun responses: -# dryrun_results = Executor.dryrun_app_on_sequence(algod, teal, inputs) - -# # 3. Generate statistical report of all the runs: -# csvpath = path / f"{filebase}.csv" -# with open(csvpath, "w") as f: -# f.write(DRR.csv_report(inputs, dryrun_results)) - -# print(f"Saved Dry Run CSV report to {csvpath}") - -# # 4. Sequential assertions (if provided any) -# for i, type_n_assertion in enumerate(assertions.items()): -# assert_type, assertion = type_n_assertion - -# assert SequenceAssertion.mode_has_assertion( -# mode, assert_type -# ), f"assert_type {assert_type} is not applicable for {mode}. Please REMOVE or MODIFY" - -# assertion = SequenceAssertion( -# assertion, name=f"{case_name}[{i}]@{mode}-{assert_type}" -# ) -# print( -# f"{i+1}. Semantic assertion for {case_name}-{mode}: {assert_type} <<{assertion}>>" -# ) -# assertion.dryrun_assert(inputs, dryrun_results, assert_type) - - -# # NOTE: logic sig dry runs are missing some information when compared with app dry runs. -# # Therefore, certain assertions don't make sense for logic sigs explaining why some of the below are commented out: -# LOGICSIG_SCENARIOS = { -# "lsig_exp": { -# "inputs": [()], -# "assertions": { -# # DRA.cost: 11, -# # DRA.lastLog: lightly_encode_output(2 ** 10, logs=True), -# DRProp.finalScratch: lambda _: {}, -# DRProp.stackTop: 2 ** 10, -# DRProp.maxStackHeight: 2, -# DRProp.status: "PASS", -# DRProp.passed: True, -# DRProp.rejected: False, -# DRProp.errorMessage: None, -# }, -# }, -# "lsig_square_byref": { -# "inputs": [(i,) for i in range(100)], -# "assertions": { -# # DRA.cost: lambda _, actual: 20 < actual < 22, -# # DRA.lastLog: lightly_encode_output(1337, logs=True), -# # due to dry-run artifact of not reporting 0-valued scratchvars, -# # we have a special case for n=0: -# DRProp.finalScratch: lambda args: ( -# {0: args[0] ** 2} if args[0] else {} -# ), -# DRProp.stackTop: 1337, -# DRProp.maxStackHeight: 3, -# DRProp.status: "PASS", -# DRProp.passed: True, -# DRProp.rejected: False, -# DRProp.errorMessage: None, -# }, -# }, -# "lsig_square": { -# "inputs": [(i,) for i in range(100)], -# "assertions": { -# # DRA.cost: 14, -# # DRA.lastLog: {(i,): lightly_encode_output(i * i, logs=True) if i else None for i in range(100)}, -# DRProp.finalScratch: lambda args: ( -# {0: args[0]} if args[0] else {} -# ), -# DRProp.stackTop: lambda args: args[0] ** 2, -# DRProp.maxStackHeight: 2, -# DRProp.status: lambda i: "PASS" if i[0] > 0 else "REJECT", -# DRProp.passed: lambda i: i[0] > 0, -# DRProp.rejected: lambda i: i[0] == 0, -# DRProp.errorMessage: None, -# }, -# }, -# "lsig_swap": { -# "inputs": [(1, 2), (1, "two"), ("one", 2), ("one", "two")], -# "assertions": { -# # DRA.cost: 27, -# # DRA.lastLog: lightly_encode_output(1337, logs=True), -# DRProp.finalScratch: lambda args: { -# 0: Encoder.hex0x(args[1]), -# 1: Encoder.hex0x(args[0]), -# 3: 1, -# 4: Encoder.hex0x(args[0]), -# }, -# DRProp.stackTop: 1337, -# DRProp.maxStackHeight: 2, -# DRProp.status: "PASS", -# DRProp.passed: True, -# DRProp.rejected: False, -# DRProp.errorMessage: None, -# }, -# }, -# "lsig_string_mult": { -# "inputs": [("xyzw", i) for i in range(100)], -# "assertions": { -# # DRA.cost: lambda args: 30 + 15 * args[1], -# # DRA.lastLog: lambda args: lightly_encode_output(args[0] * args[1]) if args[1] else None, -# DRProp.finalScratch: lambda args: ( -# { -# 0: Encoder.hex0x(args[0] * args[1]), -# 2: args[1], -# 3: args[1] + 1, -# 4: Encoder.hex0x(args[0]), -# } -# if args[1] -# else { -# 3: args[1] + 1, -# 4: Encoder.hex0x(args[0]), -# } -# ), -# DRProp.stackTop: lambda args: len(args[0] * args[1]), -# DRProp.maxStackHeight: lambda args: 3 if args[1] else 2, -# DRProp.status: lambda args: "PASS" if args[1] else "REJECT", -# DRProp.passed: lambda args: bool(args[1]), -# DRProp.rejected: lambda args: not bool(args[1]), -# DRProp.errorMessage: None, -# }, -# }, -# "lsig_oldfac": { -# "inputs": [(i,) for i in range(25)], -# "assertions": { -# # DRA.cost: lambda args, actual: actual - 40 <= 17 * args[0] <= actual + 40, -# # DRA.lastLog: lambda args, actual: (actual is None) or (int(actual, base=16) == fac_with_overflow(args[0])), -# DRProp.finalScratch: lambda args: ( -# {0: min(args[0], 21)} if args[0] else {} -# ), -# DRProp.stackTop: lambda args: fac_with_overflow(args[0]), -# DRProp.maxStackHeight: lambda args: max(2, 2 * args[0]), -# DRProp.status: lambda args: "PASS" if args[0] < 21 else "REJECT", -# DRProp.passed: lambda args: args[0] < 21, -# DRProp.rejected: lambda args: args[0] >= 21, -# DRProp.errorMessage: lambda args, actual: ( -# actual is None -# if args[0] < 21 -# else "logic 0 failed at line 21: * overflowed" in actual -# ), -# }, -# }, -# "lsig_slow_fibonacci": { -# "inputs": [(i,) for i in range(18)], -# "assertions": { -# # DRA.cost: fib_cost, -# # DRA.lastLog: fib_last_log, -# # by returning True for n >= 15, we're declaring that we don't care about the scratchvar's for such cases: -# DRProp.finalScratch: lambda args, actual: ( -# actual == {0: args[0]} -# if 0 < args[0] < 15 -# else (True if args[0] else actual == {}) -# ), -# DRProp.stackTop: lambda args, actual: ( -# actual == fib(args[0]) if args[0] < 15 else True -# ), -# DRProp.maxStackHeight: lambda args, actual: ( -# actual == max(2, 2 * args[0]) if args[0] < 15 else True -# ), -# DRProp.status: lambda args: "PASS" -# if 0 < args[0] < 15 -# else "REJECT", -# DRProp.passed: lambda args: 0 < args[0] < 15, -# DRProp.rejected: lambda args: not (0 < args[0] < 15), -# DRProp.errorMessage: lambda args, actual: ( -# actual is None -# if args[0] < 15 -# else "dynamic cost budget exceeded" in actual -# ), -# }, -# }, -# } - - -# @pytest.mark.parametrize("filebase", LOGICSIG_SCENARIOS.keys()) -# def test_logicsig_with_report(filebase: str): -# mode, scenario = ExecutionMode.Signature, LOGICSIG_SCENARIOS[filebase] - -# # 0. Validate that the scenarios are well defined: -# inputs, assertions = SequenceAssertion.inputs_and_assertions( -# scenario, mode -# ) - -# algod = get_algod() - -# # 1. Read the TEAL from ./test/integration/teal/*.teal -# path = Path.cwd() / "test" / "integration" / "teal" -# case_name = filebase -# tealpath = path / f"{filebase}.teal" -# with open(tealpath, "r") as f: -# teal = f.read() - -# print( -# f"""Sandbox test and report {mode} for {case_name} from {tealpath}. TEAL is: -# ------- -# {teal} -# -------""" -# ) - -# # 2. Run the requests to obtain sequence of Dryrun resonses: -# dryrun_results = Executor.dryrun_logicsig_on_sequence(algod, teal, inputs) - -# # 3. Generate statistical report of all the runs: -# csvpath = path / f"{filebase}.csv" -# with open(csvpath, "w") as f: -# f.write(DRR.csv_report(inputs, dryrun_results)) - -# print(f"Saved Dry Run CSV report to {csvpath}") - -# # 4. Sequential assertions (if provided any) -# for i, type_n_assertion in enumerate(assertions.items()): -# assert_type, assertion = type_n_assertion - -# assert SequenceAssertion.mode_has_assertion( -# mode, assert_type -# ), f"assert_type {assert_type} is not applicable for {mode}. Please REMOVE of MODIFY" - -# assertion = SequenceAssertion( -# assertion, name=f"{case_name}[{i}]@{mode}-{assert_type}" -# ) -# print( -# f"{i+1}. Semantic assertion for {case_name}-{mode}: {assert_type} <<{assertion}>>" -# ) -# assertion.dryrun_assert(inputs, dryrun_results, assert_type) +def fac_with_overflow(n): + if n < 2: + return 1 + if n > 20: + return 2432902008176640000 + return n * fac_with_overflow(n - 1) + + +def fib(n): + a, b = 0, 1 + for _ in range(n): + a, b = b, a + b + return a + + +def fib_cost(args): + cost = 17 + for n in range(1, args[0] + 1): + cost += 31 * fib(n - 1) + return cost + + +def test_singleton_assertions(): + algod = get_algod() + algod_status = algod.status() + assert algod_status + + teal_fmt = """#pragma version 6 +{} 0 +btoi +callsub square_0 +{} +return + +// square +square_0: +store 0 +load 0 +pushint 2 // 2 +exp +retsub""" + + teal_app, teal_lsig = list( + map(lambda s: teal_fmt.format(s, ""), ["txna ApplicationArgs", "arg"]) + ) + + teal_app_log, bad_teal_lsig = list( + map( + lambda s: teal_fmt.format( + s, + """store 1 +load 1 +itob +log +load 1""", + ), + ["txna ApplicationArgs", "arg"], + ) + ) + + x = 9 + args = [x] + + app_res, app_log_res = list( + map( + lambda teal: Executor.dryrun_app(algod, teal, args), + [teal_app, teal_app_log], + ) + ) + lsig_res, bad_lsig_res = list( + map( + lambda teal: Executor.dryrun_logicsig(algod, teal, args), + [teal_lsig, bad_teal_lsig], + ) + ) + + assert isinstance(app_res, DRR) + assert isinstance(app_log_res, DRR) + assert isinstance(lsig_res, DRR) + assert isinstance(bad_lsig_res, DRR) + + assert app_res.mode == ExecutionMode.Application + assert app_log_res.mode == ExecutionMode.Application + assert lsig_res.mode == ExecutionMode.Signature + assert bad_lsig_res.mode == ExecutionMode.Signature + + def prop_assert(dr_resp, actual, expected): + assert expected == actual, dr_resp.report( + args, f"expected {expected} but got {actual}" + ) + + prop_assert(app_res, app_res.cost(), 9) + prop_assert(app_log_res, app_log_res.cost(), 14) + prop_assert(lsig_res, lsig_res.cost(), None) + + prop_assert(app_res, app_res.last_log(), None) + prop_assert(app_log_res, app_log_res.last_log(), (x**2).to_bytes(8, "big").hex()) + prop_assert(app_log_res, app_log_res.last_log(), Encoder.hex(x**2)) + prop_assert(lsig_res, lsig_res.last_log(), None) + + prop_assert(app_res, app_res.final_scratch(), {0: x}) + prop_assert(app_log_res, app_log_res.final_scratch(), {0: x, 1: x**2}) + prop_assert(lsig_res, lsig_res.final_scratch(), {0: x}) + prop_assert(bad_lsig_res, bad_lsig_res.final_scratch(), {0: x, 1: x**2}) + + prop_assert(app_res, app_res.stack_top(), x**2) + prop_assert(app_log_res, app_log_res.stack_top(), x**2) + prop_assert(lsig_res, lsig_res.stack_top(), x**2) + prop_assert(bad_lsig_res, bad_lsig_res.stack_top(), Encoder.hex0x(x**2)) + + prop_assert(app_res, app_res.max_stack_height(), 2) + prop_assert(app_log_res, app_log_res.max_stack_height(), 2) + prop_assert(lsig_res, lsig_res.max_stack_height(), 2) + prop_assert(bad_lsig_res, bad_lsig_res.max_stack_height(), 2) + + prop_assert(app_res, app_res.status(), "PASS") + prop_assert(app_log_res, app_log_res.status(), "PASS") + prop_assert(lsig_res, lsig_res.status(), "PASS") + prop_assert(bad_lsig_res, bad_lsig_res.status(), "REJECT") + + prop_assert(app_res, app_res.passed(), True) + prop_assert(app_log_res, app_log_res.passed(), True) + prop_assert(lsig_res, lsig_res.passed(), True) + prop_assert(bad_lsig_res, bad_lsig_res.passed(), False) + + prop_assert(app_res, app_res.rejected(), False) + prop_assert(app_log_res, app_log_res.rejected(), False) + prop_assert(lsig_res, lsig_res.rejected(), False) + prop_assert(bad_lsig_res, bad_lsig_res.rejected(), True) + + prop_assert(app_res, app_res.error(), False) + prop_assert(app_log_res, app_log_res.error(), False) + prop_assert(lsig_res, lsig_res.error(), False) + prop_assert(bad_lsig_res, bad_lsig_res.error(), True) + assert bad_lsig_res.error( + contains="logic 0 failed at line 7: log not allowed in current mode" + ) + prop_assert(bad_lsig_res, bad_lsig_res.error(contains="log not allowed"), True) + prop_assert(bad_lsig_res, bad_lsig_res.error(contains="WRONG PATTERN"), False) + + prop_assert(app_res, app_res.error_message(), None) + prop_assert(app_log_res, app_log_res.error_message(), None) + prop_assert(lsig_res, lsig_res.error_message(), None) + assert ( + "logic 0 failed at line 7: log not allowed in current mode" + in bad_lsig_res.error_message() + ) + + +APP_SCENARIOS = { + "app_exp": { + "inputs": [()], + # since only a single input, just assert a constant in each case + "assertions": { + DRProp.cost: 11, + DRProp.lastLog: Encoder.hex(2**10), + # dicts have a special meaning as assertions. So in the case of "finalScratch" + # which is supposed to _ALSO_ output a dict, we need to use a lambda as a work-around + DRProp.finalScratch: lambda _: {0: 2**10}, + DRProp.stackTop: 2**10, + DRProp.maxStackHeight: 2, + DRProp.status: "PASS", + DRProp.passed: True, + DRProp.rejected: False, + DRProp.errorMessage: None, + }, + }, + "app_square_byref": { + "inputs": [(i,) for i in range(100)], + "assertions": { + DRProp.cost: lambda _, actual: 20 < actual < 22, + DRProp.lastLog: Encoder.hex(1337), + # due to dry-run artifact of not reporting 0-valued scratchvars, + # we have a special case for n=0: + DRProp.finalScratch: lambda args, actual: ( + {2, 1337, (args[0] ** 2 if args[0] else 2)} + ).issubset(set(actual.values())), + DRProp.stackTop: 1337, + DRProp.maxStackHeight: 3, + DRProp.status: "PASS", + DRProp.passed: True, + DRProp.rejected: False, + DRProp.errorMessage: None, + }, + }, + "app_square": { + "inputs": [(i,) for i in range(100)], + "assertions": { + DRProp.cost: 14, + DRProp.lastLog: { + # since execution REJECTS for 0, expect last log for this case to be None + (i,): Encoder.hex(i * i) if i else None + for i in range(100) + }, + DRProp.finalScratch: lambda args: ( + {0: args[0], 1: args[0] ** 2} if args[0] else {} + ), + DRProp.stackTop: lambda args: args[0] ** 2, + DRProp.maxStackHeight: 2, + DRProp.status: lambda i: "PASS" if i[0] > 0 else "REJECT", + DRProp.passed: lambda i: i[0] > 0, + DRProp.rejected: lambda i: i[0] == 0, + DRProp.errorMessage: None, + }, + }, + "app_swap": { + "inputs": [(1, 2), (1, "two"), ("one", 2), ("one", "two")], + "assertions": { + DRProp.cost: 27, + DRProp.lastLog: Encoder.hex(1337), + DRProp.finalScratch: lambda args: { + 0: 4, + 1: 5, + 2: Encoder.hex0x(args[0]), + 3: 1337, + 4: Encoder.hex0x(args[1]), + 5: Encoder.hex0x(args[0]), + }, + DRProp.stackTop: 1337, + DRProp.maxStackHeight: 2, + DRProp.status: "PASS", + DRProp.passed: True, + DRProp.rejected: False, + DRProp.errorMessage: None, + }, + }, + "app_string_mult": { + "inputs": [("xyzw", i) for i in range(100)], + "assertions": { + DRProp.cost: lambda args: 30 + 15 * args[1], + DRProp.lastLog: ( + lambda args: Encoder.hex(args[0] * args[1]) if args[1] else None + ), + # due to dryrun 0-scratchvar artifact, special case for i == 0: + DRProp.finalScratch: lambda args: ( + { + 0: 5, + 1: args[1], + 2: args[1] + 1, + 3: Encoder.hex0x(args[0]), + 4: Encoder.hex0x(args[0] * args[1]), + 5: Encoder.hex0x(args[0] * args[1]), + } + if args[1] + else { + 0: 5, + 2: args[1] + 1, + 3: Encoder.hex0x(args[0]), + } + ), + DRProp.stackTop: lambda args: len(args[0] * args[1]), + DRProp.maxStackHeight: lambda args: 3 if args[1] else 2, + DRProp.status: lambda args: ("PASS" if 0 < args[1] < 45 else "REJECT"), + DRProp.passed: lambda args: 0 < args[1] < 45, + DRProp.rejected: lambda args: 0 >= args[1] or args[1] >= 45, + DRProp.errorMessage: None, + }, + }, + "app_oldfac": { + "inputs": [(i,) for i in range(25)], + "assertions": { + DRProp.cost: lambda args, actual: ( + actual - 40 <= 17 * args[0] <= actual + 40 + ), + DRProp.lastLog: lambda args: ( + Encoder.hex(fac_with_overflow(args[0])) if args[0] < 21 else None + ), + DRProp.finalScratch: lambda args: ( + {0: args[0], 1: fac_with_overflow(args[0])} + if 0 < args[0] < 21 + else ( + {0: min(21, args[0])} + if args[0] + else {1: fac_with_overflow(args[0])} + ) + ), + DRProp.stackTop: lambda args: fac_with_overflow(args[0]), + DRProp.maxStackHeight: lambda args: max(2, 2 * args[0]), + DRProp.status: lambda args: "PASS" if args[0] < 21 else "REJECT", + DRProp.passed: lambda args: args[0] < 21, + DRProp.rejected: lambda args: args[0] >= 21, + DRProp.errorMessage: lambda args, actual: ( + actual is None if args[0] < 21 else "overflowed" in actual + ), + }, + }, + "app_slow_fibonacci": { + "inputs": [(i,) for i in range(18)], + "assertions": { + DRProp.cost: lambda args: (fib_cost(args) if args[0] < 17 else 70_000), + DRProp.lastLog: lambda args: ( + Encoder.hex(fib(args[0])) if 0 < args[0] < 17 else None + ), + DRProp.finalScratch: lambda args, actual: ( + actual == {0: args[0], 1: fib(args[0])} + if 0 < args[0] < 17 + else (True if args[0] >= 17 else actual == {}) + ), + # we declare to "not care" about the top of the stack for n >= 17 + DRProp.stackTop: lambda args, actual: ( + actual == fib(args[0]) if args[0] < 17 else True + ), + # similarly, we don't care about max stack height for n >= 17 + DRProp.maxStackHeight: lambda args, actual: ( + actual == max(2, 2 * args[0]) if args[0] < 17 else True + ), + DRProp.status: lambda args: "PASS" if 0 < args[0] < 8 else "REJECT", + DRProp.passed: lambda args: 0 < args[0] < 8, + DRProp.rejected: lambda args: 0 >= args[0] or args[0] >= 8, + DRProp.errorMessage: lambda args, actual: ( + actual is None + if args[0] < 17 + else "dynamic cost budget exceeded" in actual + ), + }, + }, +} + + +@pytest.mark.parametrize("filebase", APP_SCENARIOS.keys()) +def test_app_with_report(filebase: str): + mode, scenario = ExecutionMode.Application, APP_SCENARIOS[filebase] + + # 0. Validate that the scenarios are well defined: + inputs, assertions = SequenceAssertion.inputs_and_assertions(scenario, mode) + + algod = get_algod() + + # 1. Read the TEAL from ./tests/teal/*.teal + path = TESTS_DIR / "teal" + case_name = filebase + tealpath = path / f"{filebase}.teal" + with open(tealpath, "r") as f: + teal = f.read() + + print( + f"""Sandbox test and report {mode} for {case_name} from {tealpath}. TEAL is: +------- +{teal} +-------""" + ) + + # 2. Run the requests to obtain sequence of Dryrun responses: + dryrun_results = Executor.dryrun_app_on_sequence(algod, teal, inputs) + + # 3. Generate statistical report of all the runs: + csvpath = path / f"{filebase}.csv" + with open(csvpath, "w") as f: + f.write(DRR.csv_report(inputs, dryrun_results)) + + print(f"Saved Dry Run CSV report to {csvpath}") + + # 4. Sequential assertions (if provided any) + for i, type_n_assertion in enumerate(assertions.items()): + assert_type, assertion = type_n_assertion + + assert SequenceAssertion.mode_has_assertion( + mode, assert_type + ), f"assert_type {assert_type} is not applicable for {mode}. Please REMOVE or MODIFY" + + assertion = SequenceAssertion( + assertion, name=f"{case_name}[{i}]@{mode}-{assert_type}" + ) + print( + f"{i+1}. Semantic assertion for {case_name}-{mode}: {assert_type} <<{assertion}>>" + ) + assertion.dryrun_assert(inputs, dryrun_results, assert_type) + + +# NOTE: logic sig dry runs are missing some information when compared with app dry runs. +# Therefore, certain assertions don't make sense for logic sigs explaining why some of the below are commented out: +LOGICSIG_SCENARIOS = { + "lsig_exp": { + "inputs": [()], + "assertions": { + # DRA.cost: 11, + # DRA.lastLog: lightly_encode_output(2 ** 10, logs=True), + DRProp.finalScratch: lambda _: {}, + DRProp.stackTop: 2**10, + DRProp.maxStackHeight: 2, + DRProp.status: "PASS", + DRProp.passed: True, + DRProp.rejected: False, + DRProp.errorMessage: None, + }, + }, + "lsig_square_byref": { + "inputs": [(i,) for i in range(100)], + "assertions": { + # DRA.cost: lambda _, actual: 20 < actual < 22, + # DRA.lastLog: lightly_encode_output(1337, logs=True), + # due to dry-run artifact of not reporting 0-valued scratchvars, + # we have a special case for n=0: + DRProp.finalScratch: lambda args: ({0: args[0] ** 2} if args[0] else {}), + DRProp.stackTop: 1337, + DRProp.maxStackHeight: 3, + DRProp.status: "PASS", + DRProp.passed: True, + DRProp.rejected: False, + DRProp.errorMessage: None, + }, + }, + "lsig_square": { + "inputs": [(i,) for i in range(100)], + "assertions": { + # DRA.cost: 14, + # DRA.lastLog: {(i,): lightly_encode_output(i * i, logs=True) if i else None for i in range(100)}, + DRProp.finalScratch: lambda args: ({0: args[0]} if args[0] else {}), + DRProp.stackTop: lambda args: args[0] ** 2, + DRProp.maxStackHeight: 2, + DRProp.status: lambda i: "PASS" if i[0] > 0 else "REJECT", + DRProp.passed: lambda i: i[0] > 0, + DRProp.rejected: lambda i: i[0] == 0, + DRProp.errorMessage: None, + }, + }, + "lsig_swap": { + "inputs": [(1, 2), (1, "two"), ("one", 2), ("one", "two")], + "assertions": { + # DRA.cost: 27, + # DRA.lastLog: lightly_encode_output(1337, logs=True), + DRProp.finalScratch: lambda args: { + 0: Encoder.hex0x(args[1]), + 1: Encoder.hex0x(args[0]), + 3: 1, + 4: Encoder.hex0x(args[0]), + }, + DRProp.stackTop: 1337, + DRProp.maxStackHeight: 2, + DRProp.status: "PASS", + DRProp.passed: True, + DRProp.rejected: False, + DRProp.errorMessage: None, + }, + }, + "lsig_string_mult": { + "inputs": [("xyzw", i) for i in range(100)], + "assertions": { + # DRA.cost: lambda args: 30 + 15 * args[1], + # DRA.lastLog: lambda args: lightly_encode_output(args[0] * args[1]) if args[1] else None, + DRProp.finalScratch: lambda args: ( + { + 0: Encoder.hex0x(args[0] * args[1]), + 2: args[1], + 3: args[1] + 1, + 4: Encoder.hex0x(args[0]), + } + if args[1] + else { + 3: args[1] + 1, + 4: Encoder.hex0x(args[0]), + } + ), + DRProp.stackTop: lambda args: len(args[0] * args[1]), + DRProp.maxStackHeight: lambda args: 3 if args[1] else 2, + DRProp.status: lambda args: "PASS" if args[1] else "REJECT", + DRProp.passed: lambda args: bool(args[1]), + DRProp.rejected: lambda args: not bool(args[1]), + DRProp.errorMessage: None, + }, + }, + "lsig_oldfac": { + "inputs": [(i,) for i in range(25)], + "assertions": { + # DRA.cost: lambda args, actual: actual - 40 <= 17 * args[0] <= actual + 40, + # DRA.lastLog: lambda args, actual: (actual is None) or (int(actual, base=16) == fac_with_overflow(args[0])), + DRProp.finalScratch: lambda args: ( + {0: min(args[0], 21)} if args[0] else {} + ), + DRProp.stackTop: lambda args: fac_with_overflow(args[0]), + DRProp.maxStackHeight: lambda args: max(2, 2 * args[0]), + DRProp.status: lambda args: "PASS" if args[0] < 21 else "REJECT", + DRProp.passed: lambda args: args[0] < 21, + DRProp.rejected: lambda args: args[0] >= 21, + DRProp.errorMessage: lambda args, actual: ( + actual is None + if args[0] < 21 + else "logic 0 failed at line 21: * overflowed" in actual + ), + }, + }, + "lsig_slow_fibonacci": { + "inputs": [(i,) for i in range(18)], + "assertions": { + # DRA.cost: fib_cost, + # DRA.lastLog: fib_last_log, + # by returning True for n >= 15, we're declaring that we don't care about the scratchvar's for such cases: + DRProp.finalScratch: lambda args, actual: ( + actual == {0: args[0]} + if 0 < args[0] < 15 + else (True if args[0] else actual == {}) + ), + DRProp.stackTop: lambda args, actual: ( + actual == fib(args[0]) if args[0] < 15 else True + ), + DRProp.maxStackHeight: lambda args, actual: ( + actual == max(2, 2 * args[0]) if args[0] < 15 else True + ), + DRProp.status: lambda args: "PASS" if 0 < args[0] < 15 else "REJECT", + DRProp.passed: lambda args: 0 < args[0] < 15, + DRProp.rejected: lambda args: not (0 < args[0] < 15), + DRProp.errorMessage: lambda args, actual: ( + actual is None + if args[0] < 15 + else "dynamic cost budget exceeded" in actual + ), + }, + }, +} + + +@pytest.mark.parametrize("filebase", LOGICSIG_SCENARIOS.keys()) +def test_logicsig_with_report(filebase: str): + mode, scenario = ExecutionMode.Signature, LOGICSIG_SCENARIOS[filebase] + + # 0. Validate that the scenarios are well defined: + inputs, assertions = SequenceAssertion.inputs_and_assertions(scenario, mode) + + algod = get_algod() + + # 1. Read the TEAL from ./tests/teal/*.teal + path = TESTS_DIR / "teal" + case_name = filebase + tealpath = path / f"{filebase}.teal" + with open(tealpath, "r") as f: + teal = f.read() + + print( + f"""Sandbox test and report {mode} for {case_name} from {tealpath}. TEAL is: +------- +{teal} +-------""" + ) + + # 2. Run the requests to obtain sequence of Dryrun resonses: + dryrun_results = Executor.dryrun_logicsig_on_sequence(algod, teal, inputs) + + # 3. Generate statistical report of all the runs: + csvpath = path / f"{filebase}.csv" + with open(csvpath, "w") as f: + f.write(DRR.csv_report(inputs, dryrun_results)) + + print(f"Saved Dry Run CSV report to {csvpath}") + + # 4. Sequential assertions (if provided any) + for i, type_n_assertion in enumerate(assertions.items()): + assert_type, assertion = type_n_assertion + + assert SequenceAssertion.mode_has_assertion( + mode, assert_type + ), f"assert_type {assert_type} is not applicable for {mode}. Please REMOVE of MODIFY" + + assertion = SequenceAssertion( + assertion, name=f"{case_name}[{i}]@{mode}-{assert_type}" + ) + print( + f"{i+1}. Semantic assertion for {case_name}-{mode}: {assert_type} <<{assertion}>>" + ) + assertion.dryrun_assert(inputs, dryrun_results, assert_type) From 9c41ee62588e0abc5d31c247be165fc0cca76b72 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Sat, 26 Mar 2022 00:02:22 -0500 Subject: [PATCH 25/85] edits --- README.md | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index ce65696b..2d9d283d 100644 --- a/README.md +++ b/README.md @@ -8,7 +8,8 @@ ### What is TEAL Blackbox Testing? -TEAL Blackbox Testing lets you treat your TEAL program as a black box that for every received input produces an output and other observable effects. You can create reports that summarize those effects, and turn the _reports_ into _program invariant conjectures_ which you then check with _sequence assertions_. +TEAL Blackbox Testing lets you gain confidence that your Algorand smart contracts +are correct by writing assertions and and analyzing results via dry runs. ### Why Blackbox Testing? @@ -20,7 +21,7 @@ Here are some use cases: ## Simple TEAL Blackbox Toolkit Example: Program for $`x^2`$ -Consider the following [TEAL program](https://github.com/algorand/py-algorand-sdk/blob/23c21170cfb19652d5da854e499dca47eabb20e8/x/blackbox/teal/lsig_square.teal) that purportedly computes $`x^2`$: +Consider this [TEAL program](./tests/teal/lsig_square.teal) for computing $`x^2`$: ```plain #pragma version 6 @@ -38,7 +39,7 @@ exp retsub ``` -You'd like to write some unit tests to validate that it computes what you think it should, and also make **assertions** about the: +We'd like to write some unit tests to validate its correctness and make **assertions** about the: * program's opcode cost * program's stack @@ -48,11 +49,11 @@ You'd like to write some unit tests to validate that it computes what you think * status (**PASS**, **REJECT** or _erroring_) * error conditions that are and aren't encountered -Even better, before making fine-grained assertions you'd like to get a sense of what the program is doing on a large set of inputs so you can discover program invariants. The TEAL Blackbox Toolkit's recommended approach for enabling these goals is to: +Even better, before making fine-grained assertions we'd like to get a sense of what the program is doing on a large set of inputs and discover _experimentally_ these program invariants. Let's go through how we can do this: * start by making basic assertions and validate them using dry runs (see "**Basic Assertions**" section below) -* execute the program on a run-sequence of inputs and explore the results (see "**EDRA: Exploratory Dry Run Analysis**" section below) -* create invariants for the entire run-sequence and assert that the invariants hold (see "**Advanced: Asserting Invariants on a Dry Run Sequence**" section below) +* execute the program on a sequence of inputs and explore the results (see "**EDRA: Exploratory Dry Run Analysis**" section below) +* create invariants for the entire sequence and assert that the invariants hold (see "**Advanced: Asserting Invariants on a Dry Run Sequence**" section below) > Becoming a TEAL Blackbox Toolkit Ninja involves 10 steps as described below From b0489d52807b96477c180ec7b7ddbf045a316e02 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Sat, 26 Mar 2022 00:35:26 -0500 Subject: [PATCH 26/85] DryRunInspector and other refactorings --- Makefile | 4 +-- README.md | 21 +++++++----- blackbox/blackbox.py | 26 +++++++-------- tests/integration/__init__.py | 0 .../blackbox_test.py} | 2 +- tests/integration/doc_examples_test.py | 33 +++++++++++++++++++ .../dryrun_mixin_docs_test.py | 0 tests/unit/__init__.py | 0 tests/{unit_test.py => unit/sanity_test.py} | 0 9 files changed, 60 insertions(+), 26 deletions(-) create mode 100644 tests/integration/__init__.py rename tests/{integration_test.py => integration/blackbox_test.py} (99%) create mode 100644 tests/integration/doc_examples_test.py rename tests/{ => integration}/dryrun_mixin_docs_test.py (100%) create mode 100644 tests/unit/__init__.py rename tests/{unit_test.py => unit/sanity_test.py} (100%) diff --git a/Makefile b/Makefile index 3ea96e13..a52d868f 100644 --- a/Makefile +++ b/Makefile @@ -9,7 +9,7 @@ pip-test: pip install . unit-test: - pytest tests/unit_test.py + pytest -sv tests/unit blackbox-smoke-prefix: echo "hello blackbox!" @@ -19,7 +19,7 @@ blackbox-smoke-prefix: cd sandbox && docker-compose ps integration-test: - pytest -sv tests/integration_test.py + pytest -sv tests/integration ###### Mac Only ###### diff --git a/README.md b/README.md index 2d9d283d..c8e163a4 100644 --- a/README.md +++ b/README.md @@ -61,13 +61,13 @@ Even better, before making fine-grained assertions we'd like to get a sense of w **STEP 1**. Start with a running local node and make note of Algod's port number (for our [standard sandbox](https://github.com/algorand/sandbox) this is `4001`) -**STEP 2**. Set the `ALGOD_PORT` value in [x/testnet.py](https://github.com/algorand/py-algorand-sdk/blob/5faf79ddb56327a0e036ff4e21a39b52535751ae/x/testnet.py#L6) to this port number. (The port is set to `60000` by default because [SDK-testing](https://github.com/algorand/algorand-sdk-testing) bootstraps with this setting on Circle and also to avoid conflicting locally with the typical sandbox setup) +**STEP 2**. Set the `ALGOD_PORT` value in [tests/clients.py](./tests/clients.py#L7) to this port number. (The port is already pre-set to `4001` because [graviton](https://github.com/algorand/graviton)'s CI process uses the standad sandbox) ### TEAL Program for Testing: Logic Sig v. App -**STEP 3**. Next, you'll need to figure out if your TEAL program should be a Logic Signature or an Application. Each of these program _modes_ has its merits, but I won't get into the pros/cons here. From a Blackbox Test's perspective, the main difference is how each receives its arguments from the program executor. Logic sigs rely on the [arg opcode](https://developer.algorand.org/docs/get-details/dapps/avm/teal/opcodes/#arg-n) while apps rely on [txna ApplicationArgs i](https://developer.algorand.org/docs/get-details/dapps/avm/teal/opcodes/#txna-f-i). In our $`x^2`$ **logic sig** example, you can see on [line 2](https://github.com/algorand/py-algorand-sdk/blob/23c21170cfb19652d5da854e499dca47eabb20e8/x/blackbox/teal/lsig_square.teal#L2) that the `arg` opcode is used. Because each argument opcode (`arg` versus `ApplicationArgs`) is exclusive to one mode, any program that takes input will execute succesfully in _one mode only_. +**STEP 3**. Next, you'll need to figure out if your TEAL program should be a Logic Signature or an Application. Each of these program _modes_ has its merits, but we won't get into the pros/cons here. From a Blackbox Test's perspective, the main difference is how external arguments are handled. Logic sigs rely on the [arg opcode](https://developer.algorand.org/docs/get-details/dapps/avm/teal/opcodes/#arg-n) while apps rely on [txna ApplicationArgs i](https://developer.algorand.org/docs/get-details/dapps/avm/teal/opcodes/#txna-f-i). In our $`x^2`$ **logic sig** example, you can see on [line 2](./tests/teal/lsig_square.teal#L2) that the `arg` opcode is used. Because each argument opcode (`arg` versus `ApplicationArgs`) is mode-exclusive, any program that takes input will execute succesfully in _one mode only_. -**STEP 4**. Write the TEAL program that you want to test. You can inline the test as described here or follow the approach of `x/blackbox/blackbox_test.py` and save under `x/blackbox/teal`. So following the inline +**STEP 4**. Write the TEAL program that you want to test. You can inline the test as described here or follow the approach of `./blackbox/blackbox_test.py` and save under `./blackbox/teal`. So following the inline appraoch we begin our TEAL Blackbox script with an inline teal source variable: ```python @@ -91,17 +91,20 @@ retsub""" The TEAL Blackbox Toolkit comes with the following utility classes: * `DryRunExecutor` - facility to execute dry run's on apps and logic sigs -* `DryRunTransactionResult` - class encapsulating a single app or logic sig dry run transaction and for making assertions about the dry run +* `DryRunInspector` - class encapsulating a single app or logic sig dry run transaction and for making assertions about the dry run * `SequenceAssertion` - class for asserting invariants about a _sequence_ of dry run executions in a declarative fashion ### Basic Assertions -When executing a dry run using `DryRunExecutor` you'll get back `DryRunTransactionResult` objects. Such objects have +When executing a dry run using `DryRunExecutor` you'll get back `DryRunInspector` objects. Such objects have **assertable properties** which can be used to validate the dry run. **STEP 4**. Back to our $`x^2`$ example, and assuming the `teal` variable is defined [as above](#teal). You can run the following: ```python +from blackbox.blackbox import DryRunExecutor +from tests.clients import get_algod + algod = get_algod() x = 9 args = (x,) @@ -120,11 +123,11 @@ Some available _assertable properties_ are: * `error()` * `max_stack_height()` -See the [DryRunTransactionResult class comment](https://github.com/algorand/py-algorand-sdk/blob/b2a3366b7bc976e0610429c186b7968a7f1bbc76/algosdk/testing/teal_blackbox.py#L371) for more assertable properties and details. +See the [DryRunInspector class comment](https://github.com/algorand/py-algorand-sdk/blob/b2a3366b7bc976e0610429c186b7968a7f1bbc76/algosdk/testing/teal_blackbox.py#L371) for more assertable properties and details. ### Printing out the TEAL Stack Trace for a Failing Assertion -**STEP 5**. The `DryRunTransactionResult`'s `report()` method lets you print out +**STEP 5**. The `DryRunInspector`'s `report()` method lets you print out a handy report in the case of a failing assertion. Let's intentionally break the test case above by claiming that $`x^2 = x^3`$ for $`x=2`$ and print out this _report_ when our silly assertion fails: ```python @@ -214,7 +217,7 @@ executions, and conjecture some program invariants. To aid in the investigation algod = get_algod() inputs = [(x,) for x in range(16)] dryrun_results = DryRunExecutor.dryrun_logicsig_on_sequence(algod, teal, inputs) -csv = DryRunTransactionResult.csv_report(inputs, dryrun_results) +csv = DryRunInspector.csv_report(inputs, dryrun_results) print(csv) ``` @@ -330,7 +333,7 @@ mode = ExecutionMode.Signature # Validate the scenario and dig out inputs/assertions: inputs, assertions = SequenceAssertion.inputs_and_assertions(scenario, mode) -# Execute the dry runs and obtain sequence of DryRunTransactionResults: +# Execute the dry runs and obtain sequence of DryRunInspectors: dryrun_results = Executor.dryrun_logicsig_on_sequence(algod, teal, inputs) # Sequence assertions: diff --git a/blackbox/blackbox.py b/blackbox/blackbox.py index 027c606b..ce199f96 100644 --- a/blackbox/blackbox.py +++ b/blackbox/blackbox.py @@ -279,7 +279,7 @@ def dryrun_app( teal: str, args: Iterable[Union[str, int]], sender: str = ZERO_ADDRESS, - ) -> "DryRunTransactionResult": + ) -> "DryRunInspector": return cls.execute_one_dryrun( algod, teal, args, ExecutionMode.Application, sender=sender ) @@ -291,7 +291,7 @@ def dryrun_logicsig( teal: str, args: Iterable[Union[str, int]], sender: str = ZERO_ADDRESS, - ) -> "DryRunTransactionResult": + ) -> "DryRunInspector": return cls.execute_one_dryrun( algod, teal, args, ExecutionMode.Signature, sender ) @@ -303,7 +303,7 @@ def dryrun_app_on_sequence( teal: str, inputs: List[Iterable[Union[str, int]]], sender: str = ZERO_ADDRESS, - ) -> List["DryRunTransactionResult"]: + ) -> List["DryRunInspector"]: return cls._map(cls.dryrun_app, algod, teal, inputs, sender) @classmethod @@ -313,7 +313,7 @@ def dryrun_logicsig_on_sequence( teal: str, inputs: List[Iterable[Union[str, int]]], sender: str = ZERO_ADDRESS, - ) -> List["DryRunTransactionResult"]: + ) -> List["DryRunInspector"]: return cls._map(cls.dryrun_logicsig, algod, teal, inputs, sender) @classmethod @@ -328,7 +328,7 @@ def execute_one_dryrun( args: Iterable[Union[str, int]], mode: ExecutionMode, sender: str = ZERO_ADDRESS, - ) -> "DryRunTransactionResult": + ) -> "DryRunInspector": assert ( len(ExecutionMode) == 2 ), f"assuming only 2 ExecutionMode's but have {len(ExecutionMode)}" @@ -343,10 +343,10 @@ def execute_one_dryrun( ) dryrun_req = builder(teal, args, sender=sender) dryrun_resp = algod.dryrun(dryrun_req) - return DryRunTransactionResult.from_single_response(dryrun_resp) + return DryRunInspector.from_single_response(dryrun_resp) -class DryRunTransactionResult: +class DryRunInspector: """Methods to extract information from a single dry run transaction. TODO: merge this with @barnjamin's similarly named class of PR #283 @@ -370,7 +370,7 @@ class DryRunTransactionResult: status "PASS" and that the top of the stack contained $`x^2 = 9`$. The _assertable properties_ were `status()` and `stack_top()`. - DryRunTransactionResult provides the following **assertable properties**: + DryRunInspector provides the following **assertable properties**: * `cost` - total opcode cost utilized during execution - only available for apps @@ -435,7 +435,7 @@ def get_txn_mode(cls, txn: dict) -> ExecutionMode: return ExecutionMode.Signature @classmethod - def from_single_response(cls, dryrun_resp: dict) -> "DryRunTransactionResult": + def from_single_response(cls, dryrun_resp: dict) -> "DryRunInspector": txns = dryrun_resp.get("txns") or [] assert ( len(txns) == 1 @@ -719,9 +719,7 @@ def csv_row( } @classmethod - def csv_report( - cls, inputs: List[tuple], dr_resps: List["DryRunTransactionResult"] - ) -> str: + def csv_report(cls, inputs: List[tuple], dr_resps: List["DryRunInspector"]) -> str: """Produce a Comma Separated Values report string capturing important statistics for a sequence of dry runs. @@ -733,7 +731,7 @@ def csv_report( >>> algod = get_algod() >>> inputs = [(x,) for x in range(11)] # [(0), (1), ... , (10)] >>> dryrun_results = DryRunExecutor.dryrun_app_on_sequence(algod, teal, inputs) - >>> csv = DryRunTransactionResult.csv_report(inputs, dryrun_results) + >>> csv = DryRunInspector.csv_report(inputs, dryrun_results) >>> print(csv) ``` Then you would get the following output: @@ -850,7 +848,7 @@ def expected(self, args: list) -> Union[str, int]: def dryrun_assert( self, inputs: List[list], - dryrun_results: List["DryRunTransactionResult"], + dryrun_results: List["DryRunInspector"], assert_type: DryRunProperty, ): N = len(inputs) diff --git a/tests/integration/__init__.py b/tests/integration/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/integration_test.py b/tests/integration/blackbox_test.py similarity index 99% rename from tests/integration_test.py rename to tests/integration/blackbox_test.py index 9f1ed6a9..14dc1732 100644 --- a/tests/integration_test.py +++ b/tests/integration/blackbox_test.py @@ -6,7 +6,7 @@ DryRunEncoder as Encoder, DryRunExecutor as Executor, DryRunProperty as DRProp, - DryRunTransactionResult as DRR, + DryRunInspector as DRR, ExecutionMode, SequenceAssertion, ) diff --git a/tests/integration/doc_examples_test.py b/tests/integration/doc_examples_test.py new file mode 100644 index 00000000..23e07d84 --- /dev/null +++ b/tests/integration/doc_examples_test.py @@ -0,0 +1,33 @@ +teal = """#pragma version 6 +arg 0 +btoi +callsub square_0 +return + +// square +square_0: +store 0 +load 0 +pushint 2 // 2 +exp +retsub""" + + +def test_step4(): + from blackbox.blackbox import DryRunExecutor + from tests.clients import get_algod + + algod = get_algod() + x = 9 + args = (x,) + dryrun_result = DryRunExecutor.dryrun_logicsig(algod, teal, args) + assert dryrun_result.status() == "PASS" + assert dryrun_result.stack_top() == x**2 + + print(dryrun_result.stack_top()) + print(dryrun_result.last_log()) + print(dryrun_result.cost()) + print(dryrun_result.status()) + print(dryrun_result.final_scratch()) + print(dryrun_result.error()) + print(dryrun_result.max_stack_height()) diff --git a/tests/dryrun_mixin_docs_test.py b/tests/integration/dryrun_mixin_docs_test.py similarity index 100% rename from tests/dryrun_mixin_docs_test.py rename to tests/integration/dryrun_mixin_docs_test.py diff --git a/tests/unit/__init__.py b/tests/unit/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/unit_test.py b/tests/unit/sanity_test.py similarity index 100% rename from tests/unit_test.py rename to tests/unit/sanity_test.py From dd97d258f8978b06663404a37947d66fdd679934 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Sat, 26 Mar 2022 01:29:11 -0500 Subject: [PATCH 27/85] edits and proofs that examples are working --- README.md | 142 ++++++++++++++----------- tests/integration/doc_examples_test.py | 55 ++++++++-- 2 files changed, 124 insertions(+), 73 deletions(-) diff --git a/README.md b/README.md index c8e163a4..a4c40878 100644 --- a/README.md +++ b/README.md @@ -105,13 +105,21 @@ When executing a dry run using `DryRunExecutor` you'll get back `DryRunInspecto from blackbox.blackbox import DryRunExecutor from tests.clients import get_algod -algod = get_algod() -x = 9 -args = (x,) -dryrun_result = DryRunExecutor.dryrun_logicsig(algod, teal, args) -assert dryrun_result.status() == "PASS" -assert dryrun_result.stack_top() == x ** 2 -``` + algod = get_algod() + x = 9 + args = (x,) + inspector = DryRunExecutor.dryrun_logicsig(algod, teal, args) + assert inspector.status() == "PASS" + assert inspector.stack_top() == x**2 + + print(inspector.stack_top()) + print(inspector.last_log()) + print(inspector.cost()) + print(inspector.status()) + print(inspector.final_scratch()) + print(inspector.error()) + print(inspector.max_stack_height()) + ``` Some available _assertable properties_ are: @@ -123,7 +131,7 @@ Some available _assertable properties_ are: * `error()` * `max_stack_height()` -See the [DryRunInspector class comment](https://github.com/algorand/py-algorand-sdk/blob/b2a3366b7bc976e0610429c186b7968a7f1bbc76/algosdk/testing/teal_blackbox.py#L371) for more assertable properties and details. +See the [DryRunInspector class comment](./blackbox/blackbox.py#L373) for more assertable properties and details. ### Printing out the TEAL Stack Trace for a Failing Assertion @@ -131,67 +139,75 @@ See the [DryRunInspector class comment](https://github.com/algorand/py-algorand- a handy report in the case of a failing assertion. Let's intentionally break the test case above by claiming that $`x^2 = x^3`$ for $`x=2`$ and print out this _report_ when our silly assertion fails: ```python +from blackbox.blackbox import DryRunExecutor +from tests.clients import get_algod + algod = get_algod() x = 2 args = (x,) -dryrun_result = DryRunExecutor.dryrun_logicsig(algod, teal, args) +inspector = DryRunExecutor.dryrun_logicsig(algod, teal, args) # This one's ok -expected, actual = "PASS", dryrun_result.status() -assert expected == actual, dryrun_result.report(args, f"expected {expected} but got {actual}") +expected, actual = "PASS", inspector.status() +assert expected == actual, inspector.report( + args, f"expected {expected} but got {actual}" +) # This one's absurd! x^3 != x^2 -expected, actual = x ** 3, dryrun_result.stack_stop() -assert expected == actual, dryrun_result.report(args, f"expected {expected} but got {actual}") +expected, actual = x**3, inspector.stack_top() +assert expected == actual, inspector.report( + args, f"expected {expected} but got {actual}" +) ``` If we run the test we'll get the following printout (this is for pytest, but other testing frameworks should be similar): ```sh -E AssertionError: =============== -E <<<<<<<<<<>>>>>>>>>>>> -E =============== -E App Trace: -E step | PC# | L# | Teal | Scratch | Stack -E --------+-------+------+-------------------+-----------+---------------------- -E 1 | 1 | 1 | #pragma version 6 | | [] -E 2 | 2 | 2 | arg_0 | | [0x0000000000000002] -E 3 | 3 | 3 | btoi | | [2] -E 4 | 7 | 6 | label1: | | [2] -E 5 | 9 | 7 | store 0 | 0->2 | [] -E 6 | 11 | 8 | load 0 | | [2] -E 7 | 13 | 9 | pushint 2 | | [2, 2] -E 8 | 14 | 10 | exp | | [4] -E 9 | 6 | 4 | callsub label1 | | [4] -E 10 | 15 | 11 | retsub | | [4] -E =============== -E MODE: Mode.Signature -E TOTAL COST: None -E =============== -E FINAL MESSAGE: PASS -E =============== -E Messages: ['PASS'] -E Logs: [] -E =============== -E -----BlackBoxResult(steps_executed=10)----- -E TOTAL STEPS: 10 -E FINAL STACK: [4] -E FINAL STACK TOP: 4 -E MAX STACK HEIGHT: 2 -E FINAL SCRATCH: {0: 2} -E SLOTS USED: [0] -E FINAL AS ROW: {'steps': 10, ' top_of_stack': 4, 'max_stack_height': 2, 's@000': 2} -E =============== -E Global Delta: -E [] -E =============== -E Local Delta: -E [] -E =============== -E TXN AS ROW: {' Run': 0, ' cost': None, ' final_log': None, ' final_message': 'PASS', ' Status': 'PASS', 'steps': 10, ' top_of_stack': 4, 'max_stack_height': 2, 's@000': 2, 'Arg_00': 2} -E =============== -E <<<<<<<<<<>>>>>>>>>>>> -E =============== +E AssertionError: =============== +E <<<<<<<<<<>>>>>>>>>> +E =============== +E App Trace: +E step | PC# | L# | Teal | Scratch | Stack +E --------+-------+------+-------------------+-----------+---------------------- +E 1 | 1 | 1 | #pragma version 6 | | [] +E 2 | 2 | 2 | arg_0 | | [0x0000000000000002] +E 3 | 3 | 3 | btoi | | [2] +E 4 | 7 | 6 | label1: | | [2] +E 5 | 9 | 7 | store 0 | 0->2 | [] +E 6 | 11 | 8 | load 0 | | [2] +E 7 | 13 | 9 | pushint 2 | | [2, 2] +E 8 | 14 | 10 | exp | | [4] +E 9 | 6 | 4 | callsub label1 | | [4] +E 10 | 15 | 11 | retsub | | [4] +E =============== +E MODE: ExecutionMode.Signature +E TOTAL COST: None +E =============== +E FINAL MESSAGE: PASS +E =============== +E Messages: ['PASS'] +E Logs: [] +E =============== +E -----BlackBoxResult(steps_executed=10)----- +E TOTAL STEPS: 10 +E FINAL STACK: [4] +E FINAL STACK TOP: 4 +E MAX STACK HEIGHT: 2 +E FINAL SCRATCH: {0: 2} +E SLOTS USED: [0] +E FINAL AS ROW: {'steps': 10, ' top_of_stack': 4, 'max_stack_height': 2, 's@000': 2} +E =============== +E Global Delta: +E [] +E =============== +E Local Delta: +E [] +E =============== +E TXN AS ROW: {' Run': 0, ' cost': None, ' last_log': '`None', ' final_message': 'PASS', ' Status': 'PASS', 'steps': 10, ' top_of_stack': 4, 'max_stack_height': 2, 's@000': 2, 'Arg_00': 2} +E =============== +E <<<<<<<<<<>>>>>>>>>> +E =============== +E E assert 8 == 4 ``` @@ -201,12 +217,11 @@ In particular, we can: * 2 was assigned to **scratch slot #0** at step 5 * the stack ended up with **4** on top * the run **PASS**'ed -* Read the message parameter that was provided and which explains in English what went wrong: `<<<<<<<<<<>>>>>>>>>>>>` +* Read the message parameter that was provided and which explains in English what went wrong: `expected 8 but got 4` ### EDRA: Exploratory Dry Run Analysis -Let's expand our investigation from a single dry-run to multiple runs or a **run sequence**. In other words, given a sequence of inputs, observe _assertable properties_ for the corresponding -executions, and conjecture some program invariants. To aid in the investigation we'll generate a report in CSV format (Comma Separated Values) where: +Let's expand our investigation from a single dry-run to multiple runs or a **run sequence**. We'll observe how _assertable properties_ depend on inputs and conjecture some program invariants. To aid in the investigation we'll generate a report in CSV format (Comma Separated Values) where: * columns represent _assertable properties_ of dry-runs, and * rows represents dry-run executions for specific inputs @@ -214,10 +229,13 @@ executions, and conjecture some program invariants. To aid in the investigation **STEP 6**. Back to our $`x^2`$ example, here's how to generate a report with 1 row for each of the inputs `0, 1, ... , 15`: ```python +from blackbox.blackbox import DryRunExecutor, DryRunInspector +from tests.clients import get_algod + algod = get_algod() inputs = [(x,) for x in range(16)] -dryrun_results = DryRunExecutor.dryrun_logicsig_on_sequence(algod, teal, inputs) -csv = DryRunInspector.csv_report(inputs, dryrun_results) +run_results = DryRunExecutor.dryrun_logicsig_on_sequence(algod, teal, inputs) +csv = DryRunInspector.csv_report(inputs, run_results) print(csv) ``` diff --git a/tests/integration/doc_examples_test.py b/tests/integration/doc_examples_test.py index 23e07d84..f6aac263 100644 --- a/tests/integration/doc_examples_test.py +++ b/tests/integration/doc_examples_test.py @@ -20,14 +20,47 @@ def test_step4(): algod = get_algod() x = 9 args = (x,) - dryrun_result = DryRunExecutor.dryrun_logicsig(algod, teal, args) - assert dryrun_result.status() == "PASS" - assert dryrun_result.stack_top() == x**2 - - print(dryrun_result.stack_top()) - print(dryrun_result.last_log()) - print(dryrun_result.cost()) - print(dryrun_result.status()) - print(dryrun_result.final_scratch()) - print(dryrun_result.error()) - print(dryrun_result.max_stack_height()) + inspector = DryRunExecutor.dryrun_logicsig(algod, teal, args) + assert inspector.status() == "PASS" + assert inspector.stack_top() == x**2 + + print(inspector.stack_top()) + print(inspector.last_log()) + print(inspector.cost()) + print(inspector.status()) + print(inspector.final_scratch()) + print(inspector.error()) + print(inspector.max_stack_height()) + + +def test_step5(): + from blackbox.blackbox import DryRunExecutor + from tests.clients import get_algod + + algod = get_algod() + x = 2 + args = (x,) + inspector = DryRunExecutor.dryrun_logicsig(algod, teal, args) + + # This one's ok + expected, actual = "PASS", inspector.status() + assert expected == actual, inspector.report( + args, f"expected {expected} but got {actual}" + ) + + # This one's absurd! x^3 != x^2 + expected, actual = x**3, inspector.stack_top() + assert expected == actual, inspector.report( + args, f"expected {expected} but got {actual}" + ) + + +def test_step6(): + from blackbox.blackbox import DryRunExecutor, DryRunInspector + from tests.clients import get_algod + + algod = get_algod() + inputs = [(x,) for x in range(16)] + run_results = DryRunExecutor.dryrun_logicsig_on_sequence(algod, teal, inputs) + csv = DryRunInspector.csv_report(inputs, run_results) + print(csv) From b44d75cf03259644171a41f00bae1ed8ae59ddf6 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Sat, 26 Mar 2022 08:04:23 -0500 Subject: [PATCH 28/85] pass step 5 of docs test --- README.md | 94 +++++++++++++------------- tests/integration/doc_examples_test.py | 65 +++++++++++++++++- 2 files changed, 109 insertions(+), 50 deletions(-) diff --git a/README.md b/README.md index a4c40878..d1ff4a45 100644 --- a/README.md +++ b/README.md @@ -160,55 +160,55 @@ assert expected == actual, inspector.report( ) ``` -If we run the test we'll get the following printout (this is for pytest, but other testing frameworks should be similar): +If we run the test we'll a printout such as: ```sh -E AssertionError: =============== -E <<<<<<<<<<>>>>>>>>>> -E =============== -E App Trace: -E step | PC# | L# | Teal | Scratch | Stack -E --------+-------+------+-------------------+-----------+---------------------- -E 1 | 1 | 1 | #pragma version 6 | | [] -E 2 | 2 | 2 | arg_0 | | [0x0000000000000002] -E 3 | 3 | 3 | btoi | | [2] -E 4 | 7 | 6 | label1: | | [2] -E 5 | 9 | 7 | store 0 | 0->2 | [] -E 6 | 11 | 8 | load 0 | | [2] -E 7 | 13 | 9 | pushint 2 | | [2, 2] -E 8 | 14 | 10 | exp | | [4] -E 9 | 6 | 4 | callsub label1 | | [4] -E 10 | 15 | 11 | retsub | | [4] -E =============== -E MODE: ExecutionMode.Signature -E TOTAL COST: None -E =============== -E FINAL MESSAGE: PASS -E =============== -E Messages: ['PASS'] -E Logs: [] -E =============== -E -----BlackBoxResult(steps_executed=10)----- -E TOTAL STEPS: 10 -E FINAL STACK: [4] -E FINAL STACK TOP: 4 -E MAX STACK HEIGHT: 2 -E FINAL SCRATCH: {0: 2} -E SLOTS USED: [0] -E FINAL AS ROW: {'steps': 10, ' top_of_stack': 4, 'max_stack_height': 2, 's@000': 2} -E =============== -E Global Delta: -E [] -E =============== -E Local Delta: -E [] -E =============== -E TXN AS ROW: {' Run': 0, ' cost': None, ' last_log': '`None', ' final_message': 'PASS', ' Status': 'PASS', 'steps': 10, ' top_of_stack': 4, 'max_stack_height': 2, 's@000': 2, 'Arg_00': 2} -E =============== -E <<<<<<<<<<>>>>>>>>>> -E =============== -E -E assert 8 == 4 +AssertionError: +=============== +<<<<<<<<<<>>>>>>>>>> +=============== + App Trace: + step | PC# | L# | Teal | Scratch | Stack +--------+-------+------+-------------------+-----------+---------------------- + 1 | 1 | 1 | #pragma version 6 | | [] + 2 | 2 | 2 | arg_0 | | [0x0000000000000002] + 3 | 3 | 3 | btoi | | [2] + 4 | 7 | 6 | label1: | | [2] + 5 | 9 | 7 | store 0 | 0->2 | [] + 6 | 11 | 8 | load 0 | | [2] + 7 | 13 | 9 | pushint 2 | | [2, 2] + 8 | 14 | 10 | exp | | [4] + 9 | 6 | 4 | callsub label1 | | [4] + 10 | 15 | 11 | retsub | | [4] +=============== +MODE: ExecutionMode.Signature +TOTAL COST: None +=============== +FINAL MESSAGE: PASS +=============== +Messages: ['PASS'] +Logs: [] +=============== +-----BlackBoxResult(steps_executed=10)----- +TOTAL STEPS: 10 +FINAL STACK: [4] +FINAL STACK TOP: 4 +MAX STACK HEIGHT: 2 +FINAL SCRATCH: {0: 2} +SLOTS USED: [0] +FINAL AS ROW: {'steps': 10, ' top_of_stack': 4, 'max_stack_height': 2, 's@000': 2} +=============== +Global Delta: +[] +=============== +Local Delta: +[] +=============== +TXN AS ROW: {' Run': 0, ' cost': None, ' last_log': '`None', ' final_message': 'PASS', ' Status': 'PASS', 'steps': 10, ' top_of_stack': 4, 'max_stack_height': 2, 's@000': 2, 'Arg_00': 2} +=============== +<<<<<<<<<<>>>>>>>>>> +=============== +assert 8 == 4 ``` In particular, we can: diff --git a/tests/integration/doc_examples_test.py b/tests/integration/doc_examples_test.py index f6aac263..57c97e9b 100644 --- a/tests/integration/doc_examples_test.py +++ b/tests/integration/doc_examples_test.py @@ -1,3 +1,7 @@ +import pytest +import re + + teal = """#pragma version 6 arg 0 btoi @@ -50,9 +54,64 @@ def test_step5(): # This one's absurd! x^3 != x^2 expected, actual = x**3, inspector.stack_top() - assert expected == actual, inspector.report( - args, f"expected {expected} but got {actual}" - ) + + # wrap for test purposes only + with pytest.raises(AssertionError) as ae: + assert expected == actual, inspector.report( + args, f"expected {expected} but got {actual}" + ) + expected = """AssertionError: +=============== +<<<<<<<<<<>>>>>>>>>> +=============== + App Trace: + step | PC# | L# | Teal | Scratch | Stack +--------+-------+------+-------------------+-----------+---------------------- + 1 | 1 | 1 | #pragma version 6 | | [] + 2 | 2 | 2 | arg_0 | | [0x0000000000000002] + 3 | 3 | 3 | btoi | | [2] + 4 | 7 | 6 | label1: | | [2] + 5 | 9 | 7 | store 0 | 0->2 | [] + 6 | 11 | 8 | load 0 | | [2] + 7 | 13 | 9 | pushint 2 | | [2, 2] + 8 | 14 | 10 | exp | | [4] + 9 | 6 | 4 | callsub label1 | | [4] + 10 | 15 | 11 | retsub | | [4] +=============== +MODE: ExecutionMode.Signature +TOTAL COST: None +=============== +FINAL MESSAGE: PASS +=============== +Messages: ['PASS'] +Logs: [] +=============== +-----BlackBoxResult(steps_executed=10)----- +TOTAL STEPS: 10 +FINAL STACK: [4] +FINAL STACK TOP: 4 +MAX STACK HEIGHT: 2 +FINAL SCRATCH: {0: 2} +SLOTS USED: [0] +FINAL AS ROW: {'steps': 10, ' top_of_stack': 4, 'max_stack_height': 2, 's@000': 2} +=============== +Global Delta: +[] +=============== +Local Delta: +[] +=============== +TXN AS ROW: {' Run': 0, ' cost': None, ' last_log': '`None', ' final_message': 'PASS', ' Status': 'PASS', 'steps': 10, ' top_of_stack': 4, 'max_stack_height': 2, 's@000': 2, 'Arg_00': 2} +=============== +<<<<<<<<<<>>>>>>>>>> +=============== +assert 8 == 4 +""" + + def remove_whitespace(s): + return re.sub(r"\s+", "", s) + + assert remove_whitespace(expected) == remove_whitespace(ae.exconly()) def test_step6(): From 083c92f57cc8a2dc5f6753479571ff4bacf6853a Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Sat, 26 Mar 2022 08:25:39 -0500 Subject: [PATCH 29/85] break higher-level Invariant into its own file --- README.md | 2 +- blackbox/blackbox.py | 192 ++++------------------------- blackbox/invariant.py | 150 ++++++++++++++++++++++ tests/integration/blackbox_test.py | 19 ++- 4 files changed, 183 insertions(+), 180 deletions(-) create mode 100644 blackbox/invariant.py diff --git a/README.md b/README.md index d1ff4a45..da8fd907 100644 --- a/README.md +++ b/README.md @@ -239,7 +239,7 @@ csv = DryRunInspector.csv_report(inputs, run_results) print(csv) ``` -Note: that each element in the `inputs` array `(x,)` is itself a tuple as `args` given to a dry run execution need to be `Iterable` (remember, that these will be passed to a TEAL program which may take one, several, or no inputs at all). +Note: that each element in the `inputs` array `(x,)` is itself a tuple as `args` given to a dry run execution need to be of type `Sequence` (remember, that these will be passed to a TEAL program which may take one, several, or no inputs at all). At this point, you'll be able to look at your [dry run sequence results](https://github.com/algorand/py-algorand-sdk/blob/1bc7b8fcf21401608cece65507c36d1f6dbad531/algosdk/testing/teal_blackbox.py#L713) and conduct some analysis. For the $`x^2`$ example if you load the CSV in Google sheets and reformat a bit it will look like: image diff --git a/blackbox/blackbox.py b/blackbox/blackbox.py index ce199f96..b1df5209 100644 --- a/blackbox/blackbox.py +++ b/blackbox/blackbox.py @@ -3,9 +3,8 @@ from dataclasses import dataclass from enum import Enum, auto import io -from inspect import signature from tabulate import tabulate -from typing import Any, Callable, Dict, Iterable, List, Optional, Tuple, Union +from typing import Any, Dict, Sequence, List, Optional, Union from algosdk.v2client.algod import AlgodClient @@ -37,6 +36,20 @@ class DryRunProperty(Enum): localStateHas = auto() +def mode_has_property(mode: ExecutionMode, assertion_type: "DryRunProperty") -> bool: + missing = { + ExecutionMode.Signature: { + DryRunProperty.cost, + DryRunProperty.lastLog, + }, + ExecutionMode.Application: set(), + } + if assertion_type in missing[mode]: + return False + + return True + + DRProp = DryRunProperty @@ -223,7 +236,7 @@ class DryRunEncoder: """Encoding utilities for dry run executions and results""" @classmethod - def encode_args(cls, args: Iterable[Union[str, int]]) -> List[str]: + def encode_args(cls, args: Sequence[Union[str, int]]) -> List[str]: """ Encoding convention for Black Box Testing. @@ -277,7 +290,7 @@ def dryrun_app( cls, algod: AlgodClient, teal: str, - args: Iterable[Union[str, int]], + args: Sequence[Union[str, int]], sender: str = ZERO_ADDRESS, ) -> "DryRunInspector": return cls.execute_one_dryrun( @@ -289,7 +302,7 @@ def dryrun_logicsig( cls, algod: AlgodClient, teal: str, - args: Iterable[Union[str, int]], + args: Sequence[Union[str, int]], sender: str = ZERO_ADDRESS, ) -> "DryRunInspector": return cls.execute_one_dryrun( @@ -301,7 +314,7 @@ def dryrun_app_on_sequence( cls, algod: AlgodClient, teal: str, - inputs: List[Iterable[Union[str, int]]], + inputs: List[Sequence[Union[str, int]]], sender: str = ZERO_ADDRESS, ) -> List["DryRunInspector"]: return cls._map(cls.dryrun_app, algod, teal, inputs, sender) @@ -311,7 +324,7 @@ def dryrun_logicsig_on_sequence( cls, algod: AlgodClient, teal: str, - inputs: List[Iterable[Union[str, int]]], + inputs: List[Sequence[Union[str, int]]], sender: str = ZERO_ADDRESS, ) -> List["DryRunInspector"]: return cls._map(cls.dryrun_logicsig, algod, teal, inputs, sender) @@ -325,7 +338,7 @@ def execute_one_dryrun( cls, algod: AlgodClient, teal: str, - args: Iterable[Union[str, int]], + args: Sequence[Union[str, int]], mode: ExecutionMode, sender: str = ZERO_ADDRESS, ) -> "DryRunInspector": @@ -448,7 +461,7 @@ def dig(self, property: DryRunProperty, **kwargs: Dict[str, Any]) -> Any: txn = self.txn bbr = self.black_box_results - assert SequenceAssertion.mode_has_assertion( + assert mode_has_property( self.mode, property ), f"{self.mode} cannot handle dig information from txn for assertion type {property}" @@ -667,7 +680,7 @@ def empty_hack(se): table = tabulate(rows, headers=headers, tablefmt="presto") return table - def report(self, args: Iterable[Union[str, int]], msg: str, row: int = 0) -> str: + def report(self, args: Sequence[Union[str, int]], msg: str, row: int = 0) -> str: bbr = self.black_box_results return f"""=============== <<<<<<<<<<<{msg}>>>>>>>>>>> @@ -705,7 +718,7 @@ def report(self, args: Iterable[Union[str, int]], msg: str, row: int = 0) -> str """ def csv_row( - self, row_num: int, args: Iterable[Union[int, str]] + self, row_num: int, args: Sequence[Union[int, str]] ) -> Dict[str, Union[str, int]]: return { " Run": row_num, @@ -813,160 +826,3 @@ def extract_all(cls, txn: dict, is_app: bool) -> dict: result["bbr"] = BlackboxResults.scrape(result["trace"], result["lines"]) return result - - -class SequenceAssertion: - """Enable asserting invariants on a sequence of dry run executions""" - - def __init__( - self, - predicate: Union[Dict[Tuple, Union[str, int]], Callable], - enforce: bool = False, - name: str = None, - ): - self.definition = predicate - self.predicate, self._expected = self.prepare_predicate(predicate) - self.enforce = enforce - self.name = name - - def __repr__(self): - return f"SequenceAssertion({self.definition})"[:100] - - def __call__(self, args: list, actual: Union[str, int]) -> Tuple[bool, str]: - assertion = self.predicate(args, actual) - msg = "" - if not assertion: - msg = f"SequenceAssertion for '{self.name}' failed for for args {args}: actual is [{actual}] BUT expected [{self.expected(args)}]" - if self.enforce: - assert assertion, msg - - return assertion, msg - - def expected(self, args: list) -> Union[str, int]: - return self._expected(args) - - def dryrun_assert( - self, - inputs: List[list], - dryrun_results: List["DryRunInspector"], - assert_type: DryRunProperty, - ): - N = len(inputs) - assert N == len( - dryrun_results - ), f"inputs (len={N}) and dryrun responses (len={len(dryrun_results)}) must have the same length" - - assert isinstance( - assert_type, DryRunProperty - ), f"assertions types must be DryRunAssertionType's but got [{assert_type}] which is a {type(assert_type)}" - - for i, args in enumerate(inputs): - res = dryrun_results[i] - actual = res.dig(assert_type) - ok, msg = self(args, actual) - assert ok, res.report(args, msg, row=i + 1) - - @classmethod - def prepare_predicate(cls, predicate): - if isinstance(predicate, dict): - return ( - lambda args, actual: predicate[args] == actual, - lambda args: predicate[args], - ) - - if not isinstance(predicate, Callable): - # constant function in this case: - return lambda _, actual: predicate == actual, lambda _: predicate - - try: - sig = signature(predicate) - except Exception as e: - raise Exception( - f"callable predicate {predicate} must have a signature" - ) from e - - N = len(sig.parameters) - assert N in (1, 2), f"predicate has the wrong number of paramters {N}" - - if N == 2: - return predicate, lambda _: predicate - - # N == 1: - return lambda args, actual: predicate(args) == actual, lambda args: predicate( - args - ) - - @classmethod - def mode_has_assertion( - cls, mode: ExecutionMode, assertion_type: DryRunProperty - ) -> bool: - missing = { - ExecutionMode.Signature: { - DryRunProperty.cost, - DryRunProperty.lastLog, - }, - ExecutionMode.Application: set(), - } - if assertion_type in missing[mode]: - return False - - return True - - @classmethod - def inputs_and_assertions( - cls, scenario: Dict[str, Union[list, dict]], mode: ExecutionMode - ) -> Tuple[List[tuple], Dict[DRProp, Any]]: - """ - Validate that a Blackbox Test Scenario has been properly constructed, and return back - its components which consist of **inputs** and _optional_ **assertions**. - - A scenario should adhere to the following schema: - ``` - { - "inputs": List[Tuple[Union[str, int], ...]], - "assertions": Dict[DryRunAssertionType, ...an assertion...] - } - - Each assertion is a map from _assertion type_ to be made on a dry run, - to the actual assertion. Actual assertions can be: - * simple python types - these are useful in the case of _constant_ assertions. - For example, if you want to assert that the `maxStackHeight` is 3, just use `3`. - * dictionaries of type Dict[Tuple, Any] - these are useful when you just want to assert - a discrete set of input-output pairs. - For example, if you have 4 inputs that you want to assert are being squared, - you could use `{(2,): 4, (7,): 49, (13,): 169, (11,): 121}` - * functions which take a single variable. These are useful when you have a python "simulator" - for the assertions. - In the square example you could use `lambda args: args[0]**2` - * functions which take _two_ variables. These are useful when your assertion is more - subtle that out-and-out equality. For example, suppose you want to assert that the - `cost` of the dry run is `2*n` plus/minus 5 where `n` is the first arg of the input. Then - you could use `lambda args, actual: 2*args[0] - 5 <= actual <= 2*args[0] + 5` - ``` - """ - assert isinstance( - scenario, dict - ), f"a Blackbox Scenario should be a dict but got a {type(scenario)}" - - inputs = scenario.get("inputs") - # TODO: we can be more flexible here and allow arbitrary iterable `args`. Because - # assertions are allowed to be dicts, and therefore each `args` needs to be - # hashable in that case, we are restricting to tuples currently. - # However, this function could be friendlier and just _convert_ each of the - # `args` to a tuple, thus eliminating any downstream issues. - assert ( - inputs - and isinstance(inputs, list) - and all(isinstance(args, tuple) for args in inputs) - ), "need a list of inputs with at least one args and all args must be tuples" - - assertions = scenario.get("assertions", {}) - if assertions: - assert isinstance(assertions, dict), f"assertions must be a dict" - - for key in assertions: - assert isinstance(key, DRProp) and SequenceAssertion.mode_has_assertion( - mode, key - ), f"each key must be a DryrunAssertionTypes appropriate to {mode}. This is not the case for key {key}" - - return inputs, assertions diff --git a/blackbox/invariant.py b/blackbox/invariant.py new file mode 100644 index 00000000..a135639f --- /dev/null +++ b/blackbox/invariant.py @@ -0,0 +1,150 @@ +from inspect import signature +from typing import Any, Callable, Dict, List, Tuple, Union + +from blackbox.blackbox import ( + DryRunInspector, + DryRunProperty, + ExecutionMode, + mode_has_property, +) + + +class Invariant: + """Enable asserting invariants on a sequence of dry run executions""" + + def __init__( + self, + predicate: Union[Dict[Tuple, Union[str, int]], Callable], + enforce: bool = False, + name: str = None, + ): + self.definition = predicate + self.predicate, self._expected = self.prepare_predicate(predicate) + self.enforce = enforce + self.name = name + + def __repr__(self): + return f"SequenceAssertion({self.definition})"[:100] + + def __call__(self, args: list, actual: Union[str, int]) -> Tuple[bool, str]: + assertion = self.predicate(args, actual) + msg = "" + if not assertion: + msg = f"SequenceAssertion for '{self.name}' failed for for args {args}: actual is [{actual}] BUT expected [{self.expected(args)}]" + if self.enforce: + assert assertion, msg + + return assertion, msg + + def expected(self, args: list) -> Union[str, int]: + return self._expected(args) + + def dryrun_assert( + self, + inputs: List[list], + dryrun_results: List[DryRunInspector], + assert_type: DryRunProperty, + ): + N = len(inputs) + assert N == len( + dryrun_results + ), f"inputs (len={N}) and dryrun responses (len={len(dryrun_results)}) must have the same length" + + assert isinstance( + assert_type, DryRunProperty + ), f"assertions types must be DryRunAssertionType's but got [{assert_type}] which is a {type(assert_type)}" + + for i, args in enumerate(inputs): + res = dryrun_results[i] + actual = res.dig(assert_type) + ok, msg = self(args, actual) + assert ok, res.report(args, msg, row=i + 1) + + @classmethod + def prepare_predicate(cls, predicate): + if isinstance(predicate, dict): + return ( + lambda args, actual: predicate[args] == actual, + lambda args: predicate[args], + ) + + if not isinstance(predicate, Callable): + # constant function in this case: + return lambda _, actual: predicate == actual, lambda _: predicate + + try: + sig = signature(predicate) + except Exception as e: + raise Exception( + f"callable predicate {predicate} must have a signature" + ) from e + + N = len(sig.parameters) + assert N in (1, 2), f"predicate has the wrong number of paramters {N}" + + if N == 2: + return predicate, lambda _: predicate + + # N == 1: + return lambda args, actual: predicate(args) == actual, lambda args: predicate( + args + ) + + @classmethod + def inputs_and_assertions( + cls, scenario: Dict[str, Union[list, dict]], mode: ExecutionMode + ) -> Tuple[List[tuple], Dict[DryRunProperty, Any]]: + """ + Validate that a Blackbox Test Scenario has been properly constructed, and return back + its components which consist of **inputs** and _optional_ **assertions**. + + A scenario should adhere to the following schema: + ``` + { + "inputs": List[Tuple[Union[str, int], ...]], + "assertions": Dict[DryRunAssertionType, ...an assertion...] + } + + Each assertion is a map from _assertion type_ to be made on a dry run, + to the actual assertion. Actual assertions can be: + * simple python types - these are useful in the case of _constant_ assertions. + For example, if you want to assert that the `maxStackHeight` is 3, just use `3`. + * dictionaries of type Dict[Tuple, Any] - these are useful when you just want to assert + a discrete set of input-output pairs. + For example, if you have 4 inputs that you want to assert are being squared, + you could use `{(2,): 4, (7,): 49, (13,): 169, (11,): 121}` + * functions which take a single variable. These are useful when you have a python "simulator" + for the assertions. + In the square example you could use `lambda args: args[0]**2` + * functions which take _two_ variables. These are useful when your assertion is more + subtle that out-and-out equality. For example, suppose you want to assert that the + `cost` of the dry run is `2*n` plus/minus 5 where `n` is the first arg of the input. Then + you could use `lambda args, actual: 2*args[0] - 5 <= actual <= 2*args[0] + 5` + ``` + """ + assert isinstance( + scenario, dict + ), f"a Blackbox Scenario should be a dict but got a {type(scenario)}" + + inputs = scenario.get("inputs") + # TODO: we can be more flexible here and allow arbitrary Sequence `args`. Because + # assertions are allowed to be dicts, and therefore each `args` needs to be + # hashable in that case, we are restricting to tuples currently. + # However, this function could be friendlier and just _convert_ each of the + # `args` to a tuple, thus eliminating any downstream issues. + assert ( + inputs + and isinstance(inputs, list) + and all(isinstance(args, tuple) for args in inputs) + ), "need a list of inputs with at least one args and all args must be tuples" + + assertions = scenario.get("assertions", {}) + if assertions: + assert isinstance(assertions, dict), f"assertions must be a dict" + + for key in assertions: + assert isinstance(key, DryRunProperty) and mode_has_property( + mode, key + ), f"each key must be a DryrunAssertionTypes appropriate to {mode}. This is not the case for key {key}" + + return inputs, assertions diff --git a/tests/integration/blackbox_test.py b/tests/integration/blackbox_test.py index 14dc1732..c818a0a1 100644 --- a/tests/integration/blackbox_test.py +++ b/tests/integration/blackbox_test.py @@ -8,8 +8,9 @@ DryRunProperty as DRProp, DryRunInspector as DRR, ExecutionMode, - SequenceAssertion, + mode_has_property, ) +from blackbox.invariant import Invariant from tests.clients import get_algod @@ -349,7 +350,7 @@ def test_app_with_report(filebase: str): mode, scenario = ExecutionMode.Application, APP_SCENARIOS[filebase] # 0. Validate that the scenarios are well defined: - inputs, assertions = SequenceAssertion.inputs_and_assertions(scenario, mode) + inputs, assertions = Invariant.inputs_and_assertions(scenario, mode) algod = get_algod() @@ -381,13 +382,11 @@ def test_app_with_report(filebase: str): for i, type_n_assertion in enumerate(assertions.items()): assert_type, assertion = type_n_assertion - assert SequenceAssertion.mode_has_assertion( + assert mode_has_property( mode, assert_type ), f"assert_type {assert_type} is not applicable for {mode}. Please REMOVE or MODIFY" - assertion = SequenceAssertion( - assertion, name=f"{case_name}[{i}]@{mode}-{assert_type}" - ) + assertion = Invariant(assertion, name=f"{case_name}[{i}]@{mode}-{assert_type}") print( f"{i+1}. Semantic assertion for {case_name}-{mode}: {assert_type} <<{assertion}>>" ) @@ -541,7 +540,7 @@ def test_logicsig_with_report(filebase: str): mode, scenario = ExecutionMode.Signature, LOGICSIG_SCENARIOS[filebase] # 0. Validate that the scenarios are well defined: - inputs, assertions = SequenceAssertion.inputs_and_assertions(scenario, mode) + inputs, assertions = Invariant.inputs_and_assertions(scenario, mode) algod = get_algod() @@ -573,13 +572,11 @@ def test_logicsig_with_report(filebase: str): for i, type_n_assertion in enumerate(assertions.items()): assert_type, assertion = type_n_assertion - assert SequenceAssertion.mode_has_assertion( + assert mode_has_property( mode, assert_type ), f"assert_type {assert_type} is not applicable for {mode}. Please REMOVE of MODIFY" - assertion = SequenceAssertion( - assertion, name=f"{case_name}[{i}]@{mode}-{assert_type}" - ) + assertion = Invariant(assertion, name=f"{case_name}[{i}]@{mode}-{assert_type}") print( f"{i+1}. Semantic assertion for {case_name}-{mode}: {assert_type} <<{assertion}>>" ) From 980790dc79510b2c0f00f6cd0cda7bc37c224fa1 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Sat, 26 Mar 2022 08:58:24 -0500 Subject: [PATCH 30/85] thru step 8 --- README.md | 54 ++++++++++++-------------- blackbox/blackbox.py | 12 +++--- tests/integration/doc_examples_test.py | 26 ++++++++++++- 3 files changed, 56 insertions(+), 36 deletions(-) diff --git a/README.md b/README.md index da8fd907..491552a4 100644 --- a/README.md +++ b/README.md @@ -46,10 +46,10 @@ We'd like to write some unit tests to validate its correctness and make **assert * stack's height * scratch variables * final log message (this is especially useful for [ABI-compliant programs](https://developer.algorand.org/docs/get-details/dapps/smart-contracts/ABI/)) -* status (**PASS**, **REJECT** or _erroring_) +* status (**PASS** or **REJECT**) * error conditions that are and aren't encountered -Even better, before making fine-grained assertions we'd like to get a sense of what the program is doing on a large set of inputs and discover _experimentally_ these program invariants. Let's go through how we can do this: +Even better, before making fine-grained assertions we'd like to get a sense of what the program is doing on a large set of inputs and discover _experimentally_ these **program invariants**. Let's go through how we can do this: * start by making basic assertions and validate them using dry runs (see "**Basic Assertions**" section below) * execute the program on a sequence of inputs and explore the results (see "**EDRA: Exploratory Dry Run Analysis**" section below) @@ -61,7 +61,7 @@ Even better, before making fine-grained assertions we'd like to get a sense of w **STEP 1**. Start with a running local node and make note of Algod's port number (for our [standard sandbox](https://github.com/algorand/sandbox) this is `4001`) -**STEP 2**. Set the `ALGOD_PORT` value in [tests/clients.py](./tests/clients.py#L7) to this port number. (The port is already pre-set to `4001` because [graviton](https://github.com/algorand/graviton)'s CI process uses the standad sandbox) +**STEP 2**. Set the `ALGOD_PORT` value in [tests/clients.py](./tests/clients.py#L7) to this port number. (The port is already pre-set to `4001` because [graviton](https://github.com/algorand/graviton)'s [CI process](https://en.wikipedia.org/wiki/Continuous_integration) uses the standad sandbox) ### TEAL Program for Testing: Logic Sig v. App @@ -88,11 +88,11 @@ retsub""" ### The TEAL Blackbox Toolkit's Utitlity Classes -The TEAL Blackbox Toolkit comes with the following utility classes: +The TEAL Blackbox Toolkit comes with the following main classes: -* `DryRunExecutor` - facility to execute dry run's on apps and logic sigs -* `DryRunInspector` - class encapsulating a single app or logic sig dry run transaction and for making assertions about the dry run -* `SequenceAssertion` - class for asserting invariants about a _sequence_ of dry run executions in a declarative fashion +* `DryRunExecutor` - executes dry run's for apps and logic sigs for one or more inputs +* `DryRunInspector` - encapsulates a dry run's result for a single input and allows inspecting and making assertions about it +* `Invariant` - class for asserting invariants about a _sequence_ of dry run executions in a declarative fashion ### Basic Assertions @@ -111,15 +111,7 @@ from tests.clients import get_algod inspector = DryRunExecutor.dryrun_logicsig(algod, teal, args) assert inspector.status() == "PASS" assert inspector.stack_top() == x**2 - - print(inspector.stack_top()) - print(inspector.last_log()) - print(inspector.cost()) - print(inspector.status()) - print(inspector.final_scratch()) - print(inspector.error()) - print(inspector.max_stack_height()) - ``` +``` Some available _assertable properties_ are: @@ -240,25 +232,26 @@ print(csv) ``` Note: that each element in the `inputs` array `(x,)` is itself a tuple as `args` given to a dry run execution need to be of type `Sequence` (remember, that these will be passed to a TEAL program which may take one, several, or no inputs at all). -At this point, you'll be able to look at your [dry run sequence results](https://github.com/algorand/py-algorand-sdk/blob/1bc7b8fcf21401608cece65507c36d1f6dbad531/algosdk/testing/teal_blackbox.py#L713) and conduct some analysis. For the $`x^2`$ example if you load the CSV in Google sheets and reformat a bit it will look like: +At this point, you'll be able to look at your [dry run sequence results](./blackbox/blackbox.py#L752) and conduct some analysis. For the $`x^2`$ example, +after loading the CSV in Google sheets and reformating a bit it will look like: image -Perusing the above, it looks right: +Pointing out some interesting results: * column `D` **Arg 00** has the input $`x`$ (it's the argument at index 0) * column `A` contains the **Run** number -* column `E` **top of stack** does indeed store $`x^2`$ at the program's termination +* column `E` **top of stack** does stores the program's termination, i.e. $`x^2`$ * column `B` **Status** of each runs **PASS**es _except for **Run 1** with **Arg 00** = 0_. (The first run **REJECT**s because $`0^2 = 0`$ and TEAL programs reject when the top of the stack is 0) * column `G` shows scratch slot **s@000** which stores the value of $`x`$ (except for the case $`x = 0`$ in which appears empty; in fact, slots always default to the zero value and an **artifact** of dry-runs is that they do not report when 0-values get stored into previously empty slots as no state change actually occurs) * column `F` **max stack height** is always 2. The final observation makes sense because there is no branching or looping in the program. -**STEP 7**. We can re-cast these observed effects in `Columns E, B, G, F` as **program invariant conjectures** written in Python as follows: +**STEP 7**. We can re-cast the observed effects in `Columns E, B, G, F` as **program invariant conjectures** written in Python as follows: -* `dryrun_result.stack_top() == x ** 2` -* `dryrun_result.max_stack_height() == 2` -* `dryrun_result.status() == ("REJECT" if x == 0 else "PASS")` -* `dryrun_result.final_scratch() == ({} if x == 0 else {0: x})` +* `inspector.stack_top() == x ** 2` +* `inspector.max_stack_height() == 2` +* `inspector.status() == ("REJECT" if x == 0 else "PASS")` +* `inspector.final_scratch() == ({} if x == 0 else {0: x})` ### Advanced: Asserting Invariants on a Dry Run Sequence @@ -278,16 +271,19 @@ execution independently, or use `DryRunExecutor`'s convenience methods `dryrun_a $`x \leq 100`$: ```python +from blackbox.blackbox import DryRunExecutor +from tests.clients import get_algod + algod = get_algod() inputs = [(x,) for x in range(101)] dryrun_results = DryRunExecutor.dryrun_logicsig_on_sequence(algod, teal, inputs) -for i, dryrun_result in enumerate(dryrun_results): +for i, inspector in enumerate(dryrun_results): args = inputs[i] x = args[0] - assert dryrun_result.stack_top() == x ** 2 - assert dryrun_result.max_stack_height() == 2 - assert dryrun_result.status() == ("REJECT" if x == 0 else "PASS") - assert dryrun_result.final_scratch() == ({} if x == 0 else {0: x}) + assert inspector.stack_top() == x**2 + assert inspector.max_stack_height() == 2 + assert inspector.status() == ("REJECT" if x == 0 else "PASS") + assert inspector.final_scratch() == ({} if x == 0 else {0: x}) ``` #### Declarative Blackbox Dry Run Sequence Assertions diff --git a/blackbox/blackbox.py b/blackbox/blackbox.py index b1df5209..3d15d140 100644 --- a/blackbox/blackbox.py +++ b/blackbox/blackbox.py @@ -375,9 +375,9 @@ class DryRunInspector: >>> algod = get_algod() >>> x = 9 >>> args = (x,) - >>> dryrun_result = DryRunExecutor.dryrun_logicsig(algod, teal, args) - >>> assert dryrun_result.status() == "PASS" - >>> assert dryrun_result.stack_stop() == x ** 2 + >>> inspector = DryRunExecutor.dryrun_logicsig(algod, teal, args) + >>> assert inspector.status() == "PASS" + >>> assert inspector.stack_stop() == x ** 2 ``` In the above we have asserted the the program has succesfully exited with status "PASS" and that the top of the stack contained $`x^2 = 9`$. @@ -743,13 +743,13 @@ def csv_report(cls, inputs: List[tuple], dr_resps: List["DryRunInspector"]) -> s ```python >>> algod = get_algod() >>> inputs = [(x,) for x in range(11)] # [(0), (1), ... , (10)] - >>> dryrun_results = DryRunExecutor.dryrun_app_on_sequence(algod, teal, inputs) - >>> csv = DryRunInspector.csv_report(inputs, dryrun_results) + >>> run_results = DryRunExecutor.dryrun_app_on_sequence(algod, teal, inputs) + >>> csv = DryRunInspector.csv_report(inputs, run_results) >>> print(csv) ``` Then you would get the following output: ```plain - Run, Status, cost, final_message, last_log, top_of_stack,Arg_00,max_stack_height,s@000,s@001,steps + Run, Status, cost, final_message, last_log, top_of_stack,Arg_00,max_stack_height,s@000,s@001,steps 1,REJECT,14,REJECT,`None,0,0,2,,,15 2,PASS,14,PASS,`0000000000000001,1,1,2,1,1,15 3,PASS,14,PASS,`0000000000000004,4,2,2,2,4,15 diff --git a/tests/integration/doc_examples_test.py b/tests/integration/doc_examples_test.py index 57c97e9b..f1eaede8 100644 --- a/tests/integration/doc_examples_test.py +++ b/tests/integration/doc_examples_test.py @@ -114,7 +114,7 @@ def remove_whitespace(s): assert remove_whitespace(expected) == remove_whitespace(ae.exconly()) -def test_step6(): +def test_step6_and_7(): from blackbox.blackbox import DryRunExecutor, DryRunInspector from tests.clients import get_algod @@ -123,3 +123,27 @@ def test_step6(): run_results = DryRunExecutor.dryrun_logicsig_on_sequence(algod, teal, inputs) csv = DryRunInspector.csv_report(inputs, run_results) print(csv) + + for i, inspector in enumerate(run_results): + args = inputs[i] + x = args[0] + inspector.stack_top() == x**2 + inspector.max_stack_height() == 2 + inspector.status() == ("REJECT" if x == 0 else "PASS") + inspector.final_scratch() == ({} if x == 0 else {0: x}) + + +def test_step8(): + from blackbox.blackbox import DryRunExecutor + from tests.clients import get_algod + + algod = get_algod() + inputs = [(x,) for x in range(101)] + dryrun_results = DryRunExecutor.dryrun_logicsig_on_sequence(algod, teal, inputs) + for i, inspector in enumerate(dryrun_results): + args = inputs[i] + x = args[0] + assert inspector.stack_top() == x**2 + assert inspector.max_stack_height() == 2 + assert inspector.status() == ("REJECT" if x == 0 else "PASS") + assert inspector.final_scratch() == ({} if x == 0 else {0: x}) From a8c7eab729a36503948849674ea55995d5fc4ec1 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Sat, 26 Mar 2022 10:15:27 -0500 Subject: [PATCH 31/85] sequence assertions -> invariants and pass all the doc tests --- README.md | 83 ++++++++++++++------------ blackbox/invariant.py | 69 +++++++++++---------- tests/integration/blackbox_test.py | 74 ++++++++++++----------- tests/integration/doc_examples_test.py | 33 ++++++++++ 4 files changed, 154 insertions(+), 105 deletions(-) diff --git a/README.md b/README.md index 491552a4..f1ac8521 100644 --- a/README.md +++ b/README.md @@ -231,8 +231,8 @@ csv = DryRunInspector.csv_report(inputs, run_results) print(csv) ``` -Note: that each element in the `inputs` array `(x,)` is itself a tuple as `args` given to a dry run execution need to be of type `Sequence` (remember, that these will be passed to a TEAL program which may take one, several, or no inputs at all). -At this point, you'll be able to look at your [dry run sequence results](./blackbox/blackbox.py#L752) and conduct some analysis. For the $`x^2`$ example, +Note that each element in the `inputs` array `(x,)` is itself a tuple as `args` given to a dry run execution need to be of type `Sequence` (remember, that these will be passed to a TEAL program which may take one, several, or no inputs at all). +At this point, you'll be able to look at your [dry run sequence results](./blackbox/blackbox.py#L752) and conduct some analysis. For the $`x^2`$ example, after loading the CSV in Google sheets and reformating a bit it will look like: image @@ -246,7 +246,7 @@ Pointing out some interesting results: * column `G` shows scratch slot **s@000** which stores the value of $`x`$ (except for the case $`x = 0`$ in which appears empty; in fact, slots always default to the zero value and an **artifact** of dry-runs is that they do not report when 0-values get stored into previously empty slots as no state change actually occurs) * column `F` **max stack height** is always 2. The final observation makes sense because there is no branching or looping in the program. -**STEP 7**. We can re-cast the observed effects in `Columns E, B, G, F` as **program invariant conjectures** written in Python as follows: +**STEP 7**. We can re-cast the observed effects in `Columns E, B, G, F` as **invariants** written in Python as follows: * `inspector.stack_top() == x ** 2` * `inspector.max_stack_height() == 2` @@ -255,16 +255,16 @@ Pointing out some interesting results: ### Advanced: Asserting Invariants on a Dry Run Sequence -The final and most advanced topic of this Howto is to turn _program invariant conjectures_ into -**sequence assertions**. That is, let's take the information we gleaned in our EDRA CSV report, +The final and most advanced topic we'll cover is how +to assert that invariants hold on a sequence of inputs. Lets take the information we gleaned in our EDRA CSV report, and create an integration test out of it. There are two ways to achieve this goal: -* Procedural sequence assertions -* Declarative sequence assertions +* Procedural invariant assertions +* Declarative invariant assertions #### Procedural Blackbox Dry Run Sequence Assertions -**STEP 8**. The procedural approach takes the _program invariant conjectures_ and simply asserts them +**STEP 8**. The procedural approach takes the _invariants_ and simply asserts them inside of a for loop that iterates over the inputs and dry runs. One can call each dry run execution independently, or use `DryRunExecutor`'s convenience methods `dryrun_app_on_sequence()` and `dryrun_logicsig_on_sequence()`. For example, let's assert that the above invariants hold for all @@ -289,41 +289,41 @@ for i, inspector in enumerate(dryrun_results): #### Declarative Blackbox Dry Run Sequence Assertions **STEP 9**. The TEAL Blackbox Toolkit also allows for declarative style test writing. -Let's look at some sample assertions for our `lsig_square` TEAL program: +Let's define some invariants for a particular +sequence of `lsig_square` TEAL program dry runs: ```python - "lsig_square": { - "inputs": [(i,) for i in range(100)], - "assertions": { - DRProp.stackTop: lambda args: args[0] ** 2, - DRProp.maxStackHeight: 2, - DRProp.status: lambda i: "REJECT" if i[0] = 0 else "PASS", - DRProp.finalScratch: lambda args: ({} if args[0] else {0: args[0]}), - }, +scenario = { + "inputs": [(i,) for i in range(100)], + "invariants": { + DRProp.stackTop: lambda args: args[0] ** 2, + DRProp.maxStackHeight: 2, + DRProp.status: lambda i: "REJECT" if i[0] = 0 else "PASS", + DRProp.finalScratch: lambda args: ({} if args[0] else {0: args[0]}), }, +} ``` In the parlance of the TEAL Blackbox Toolkit, a set of such declarative assertions -is called a **test scenario**. Scenarios are dict's containing two keys `inputs` and `assertions` and follow [certain conventions](https://github.com/algorand/py-algorand-sdk/blob/3d3992ccc9b3758f28e68d2c00408d2e1363a3bb/algosdk/testing/teal_blackbox.py#L942). In particular: +is called a **test scenario**. Scenarios are dict's containing two keys `inputs` and `invariants` and follow [certain conventions](./blackbox/invariant.py#L101). In particular: -* **inputs** are lists of tuples, each tuple representing the `args` to be fed into a single dry run execution -* **assertions** are dicts that map [DryRunProperty](https://github.com/algorand/py-algorand-sdk/blob/3d3992ccc9b3758f28e68d2c00408d2e1363a3bb/algosdk/testing/teal_blackbox.py#L20)'s to actual assertions -* here is a [live example scenario](https://github.com/algorand/py-algorand-sdk/blob/c6e91b86acf545b66a94d27581d6cfa6318206fc/x/blackbox/blackbox_test.py#L442) for $`x^2`$ +* **inputs** gives a list of tuples, each tuple representing the `args` to be fed into a single dry run execution +* **invariants** gives a dict that maps [DryRunProperty](https://github.com/algorand/py-algorand-sdk/blob/3d3992ccc9b3758f28e68d2c00408d2e1363a3bb/algosdk/testing/teal_blackbox.py#L20)'s to an invariant _predicate_ In English, letting $`x`$ be the input variable for our square function, the above **test scenario**: * provides a list of 100 tuples of the form $`(x)`$ that will serve as args. * IE: $`(0), (1), (2), ... , (99)`$ -* establishes 4 different _sequence assertions_ as follows: +* establishes 4 different _invariants_ as follows: * the **stack's top** will contain $`x^2`$ * the **max stack height** during execution is always 2 * the executions' **status** is **PASS** except for the case $`x=0`$ * the **final scratch** will have $`x`$ stored at slot `0` except for that strange $`x=0`$ case (recall the [0-val scratch slot artifact](#0val-artifact)) -Declarative sequence assertions make use of the following: +Declarative invariants make use of the following: * `DryRunProperty` (aka `DRProp`): an enum that acts as a key in a scenario's assertions dict -* class `SequenceAssertion` +* class `Invariant` * its constructor takes in a predicate (there are [4 kinds of predicates](#predicate)) and returns a callable that is used for runtime assertions * method `inputs_and_assertions()` validates a scenario and extracts out its assertions * method `dryrun_assert()` evaluates the dry-run sequence using the constructed `SequenceAssertion` @@ -331,30 +331,37 @@ Declarative sequence assertions make use of the following: To employ the declarative test scenario above write the following: ```python +from blackbox.blackbox import ( + DryRunExecutor, + DryRunProperty as DRProp, + ExecutionMode, +) +from blackbox.invariant import Invariant +from tests.clients import get_algod + algod = get_algod() scenario = { "inputs": [(i,) for i in range(100)], - "assertions": { + "invariants": { DRProp.stackTop: lambda args: args[0] ** 2, DRProp.maxStackHeight: 2, - DRProp.status: lambda i: "REJECT" if i[0] = 0 else "PASS", - DRProp.finalScratch: lambda args: ({} if args[0] else {0: args[0]}), + DRProp.status: lambda i: "REJECT" if i[0] == 0 else "PASS", + DRProp.finalScratch: lambda args: ({0: args[0]} if args[0] else {}), }, -}, -mode = ExecutionMode.Signature +} -# Validate the scenario and dig out inputs/assertions: -inputs, assertions = SequenceAssertion.inputs_and_assertions(scenario, mode) +# Validate the scenario and dig out inputs/invariants: +inputs, invariants = Invariant.inputs_and_invariants( + scenario, ExecutionMode.Signature +) -# Execute the dry runs and obtain sequence of DryRunInspectors: -dryrun_results = Executor.dryrun_logicsig_on_sequence(algod, teal, inputs) +# Execute the dry runs and obtain a sequence of DryRunInspectors: +inspectors = DryRunExecutor.dryrun_logicsig_on_sequence(algod, teal, inputs) -# Sequence assertions: -for i, prop_n_predicate in enumerate(assertions.items()): - property, predicate = prop_n_predicate - assertion = SequenceAssertion(predicate) - assertion.dryrun_assert(inputs, dryrun_results, property) +# Invariant assertions on sequence: +for property, invariant in invariants.items(): + invariant.validates(property, inputs, inspectors) ``` **STEP 10**. _**Deep Dive into Sequence Assertion via Exercises**_ diff --git a/blackbox/invariant.py b/blackbox/invariant.py index a135639f..b49ac69d 100644 --- a/blackbox/invariant.py +++ b/blackbox/invariant.py @@ -24,39 +24,39 @@ def __init__( self.name = name def __repr__(self): - return f"SequenceAssertion({self.definition})"[:100] + return f"Invariant({self.definition})"[:100] def __call__(self, args: list, actual: Union[str, int]) -> Tuple[bool, str]: - assertion = self.predicate(args, actual) + invariant = self.predicate(args, actual) msg = "" - if not assertion: - msg = f"SequenceAssertion for '{self.name}' failed for for args {args}: actual is [{actual}] BUT expected [{self.expected(args)}]" + if not invariant: + msg = f"Invariant for '{self.name}' failed for for args {args}: actual is [{actual}] BUT expected [{self.expected(args)}]" if self.enforce: - assert assertion, msg + assert invariant, msg - return assertion, msg + return invariant, msg def expected(self, args: list) -> Union[str, int]: return self._expected(args) - def dryrun_assert( + def validates( self, + property: DryRunProperty, inputs: List[list], - dryrun_results: List[DryRunInspector], - assert_type: DryRunProperty, + inspectors: List[DryRunInspector], ): N = len(inputs) assert N == len( - dryrun_results - ), f"inputs (len={N}) and dryrun responses (len={len(dryrun_results)}) must have the same length" + inspectors + ), f"inputs (len={N}) and dryrun responses (len={len(inspectors)}) must have the same length" assert isinstance( - assert_type, DryRunProperty - ), f"assertions types must be DryRunAssertionType's but got [{assert_type}] which is a {type(assert_type)}" + property, DryRunProperty + ), f"invariants types must be DryRunProperty's but got [{property}] which is a {type(property)}" for i, args in enumerate(inputs): - res = dryrun_results[i] - actual = res.dig(assert_type) + res = inspectors[i] + actual = res.dig(property) ok, msg = self(args, actual) assert ok, res.report(args, msg, row=i + 1) @@ -91,32 +91,35 @@ def prepare_predicate(cls, predicate): ) @classmethod - def inputs_and_assertions( - cls, scenario: Dict[str, Union[list, dict]], mode: ExecutionMode + def inputs_and_invariants( + cls, + scenario: Dict[str, Union[list, dict]], + mode: ExecutionMode, + raw_predicates: bool = False, ) -> Tuple[List[tuple], Dict[DryRunProperty, Any]]: """ Validate that a Blackbox Test Scenario has been properly constructed, and return back - its components which consist of **inputs** and _optional_ **assertions**. + its components which consist of **inputs** and _optional_ **invariants**. A scenario should adhere to the following schema: ``` { "inputs": List[Tuple[Union[str, int], ...]], - "assertions": Dict[DryRunAssertionType, ...an assertion...] + "invariants": Dict[DryRuninvariantType, ...an invariant...] } - Each assertion is a map from _assertion type_ to be made on a dry run, - to the actual assertion. Actual assertions can be: - * simple python types - these are useful in the case of _constant_ assertions. + Each invariants is a map from a _dryrun property_ to assert about + to the actual invariant. Actual invariants can be: + * simple python types - these are useful in the case of _constant_ invariants. For example, if you want to assert that the `maxStackHeight` is 3, just use `3`. * dictionaries of type Dict[Tuple, Any] - these are useful when you just want to assert a discrete set of input-output pairs. For example, if you have 4 inputs that you want to assert are being squared, you could use `{(2,): 4, (7,): 49, (13,): 169, (11,): 121}` * functions which take a single variable. These are useful when you have a python "simulator" - for the assertions. + for the invariant. In the square example you could use `lambda args: args[0]**2` - * functions which take _two_ variables. These are useful when your assertion is more + * functions which take _two_ variables. These are useful when your invariant is more subtle that out-and-out equality. For example, suppose you want to assert that the `cost` of the dry run is `2*n` plus/minus 5 where `n` is the first arg of the input. Then you could use `lambda args, actual: 2*args[0] - 5 <= actual <= 2*args[0] + 5` @@ -127,8 +130,8 @@ def inputs_and_assertions( ), f"a Blackbox Scenario should be a dict but got a {type(scenario)}" inputs = scenario.get("inputs") - # TODO: we can be more flexible here and allow arbitrary Sequence `args`. Because - # assertions are allowed to be dicts, and therefore each `args` needs to be + # TODO: we can be more flexible here and allow arbitrary iterable `args`. Because + # invariants are allowed to be dicts, and therefore each `args` needs to be # hashable in that case, we are restricting to tuples currently. # However, this function could be friendlier and just _convert_ each of the # `args` to a tuple, thus eliminating any downstream issues. @@ -138,13 +141,15 @@ def inputs_and_assertions( and all(isinstance(args, tuple) for args in inputs) ), "need a list of inputs with at least one args and all args must be tuples" - assertions = scenario.get("assertions", {}) - if assertions: - assert isinstance(assertions, dict), f"assertions must be a dict" + invariants = {} + predicates = scenario.get("invariants", {}) + if predicates: + assert isinstance(predicates, dict), f"invariants must be a dict" - for key in assertions: + for key, predicate in predicates.items(): assert isinstance(key, DryRunProperty) and mode_has_property( mode, key - ), f"each key must be a DryrunAssertionTypes appropriate to {mode}. This is not the case for key {key}" + ), f"each key must be a DryRunProperty's appropriate to {mode}. This is not the case for key {key}" + invariants[key] = Invariant(predicate, name=key) - return inputs, assertions + return inputs, predicates if raw_predicates else invariants diff --git a/tests/integration/blackbox_test.py b/tests/integration/blackbox_test.py index c818a0a1..11bece36 100644 --- a/tests/integration/blackbox_test.py +++ b/tests/integration/blackbox_test.py @@ -48,7 +48,7 @@ def fib_cost(args): return cost -def test_singleton_assertions(): +def test_singleton_invariants(): algod = get_algod() algod_status = algod.status() assert algod_status @@ -179,10 +179,10 @@ def prop_assert(dr_resp, actual, expected): "app_exp": { "inputs": [()], # since only a single input, just assert a constant in each case - "assertions": { + "invariants": { DRProp.cost: 11, DRProp.lastLog: Encoder.hex(2**10), - # dicts have a special meaning as assertions. So in the case of "finalScratch" + # dicts have a special meaning as invariants. So in the case of "finalScratch" # which is supposed to _ALSO_ output a dict, we need to use a lambda as a work-around DRProp.finalScratch: lambda _: {0: 2**10}, DRProp.stackTop: 2**10, @@ -195,7 +195,7 @@ def prop_assert(dr_resp, actual, expected): }, "app_square_byref": { "inputs": [(i,) for i in range(100)], - "assertions": { + "invariants": { DRProp.cost: lambda _, actual: 20 < actual < 22, DRProp.lastLog: Encoder.hex(1337), # due to dry-run artifact of not reporting 0-valued scratchvars, @@ -213,7 +213,7 @@ def prop_assert(dr_resp, actual, expected): }, "app_square": { "inputs": [(i,) for i in range(100)], - "assertions": { + "invariants": { DRProp.cost: 14, DRProp.lastLog: { # since execution REJECTS for 0, expect last log for this case to be None @@ -233,7 +233,7 @@ def prop_assert(dr_resp, actual, expected): }, "app_swap": { "inputs": [(1, 2), (1, "two"), ("one", 2), ("one", "two")], - "assertions": { + "invariants": { DRProp.cost: 27, DRProp.lastLog: Encoder.hex(1337), DRProp.finalScratch: lambda args: { @@ -254,7 +254,7 @@ def prop_assert(dr_resp, actual, expected): }, "app_string_mult": { "inputs": [("xyzw", i) for i in range(100)], - "assertions": { + "invariants": { DRProp.cost: lambda args: 30 + 15 * args[1], DRProp.lastLog: ( lambda args: Encoder.hex(args[0] * args[1]) if args[1] else None @@ -286,7 +286,7 @@ def prop_assert(dr_resp, actual, expected): }, "app_oldfac": { "inputs": [(i,) for i in range(25)], - "assertions": { + "invariants": { DRProp.cost: lambda args, actual: ( actual - 40 <= 17 * args[0] <= actual + 40 ), @@ -314,7 +314,7 @@ def prop_assert(dr_resp, actual, expected): }, "app_slow_fibonacci": { "inputs": [(i,) for i in range(18)], - "assertions": { + "invariants": { DRProp.cost: lambda args: (fib_cost(args) if args[0] < 17 else 70_000), DRProp.lastLog: lambda args: ( Encoder.hex(fib(args[0])) if 0 < args[0] < 17 else None @@ -350,7 +350,9 @@ def test_app_with_report(filebase: str): mode, scenario = ExecutionMode.Application, APP_SCENARIOS[filebase] # 0. Validate that the scenarios are well defined: - inputs, assertions = Invariant.inputs_and_assertions(scenario, mode) + inputs, invariants = Invariant.inputs_and_invariants( + scenario, mode, raw_predicates=True + ) algod = get_algod() @@ -378,27 +380,27 @@ def test_app_with_report(filebase: str): print(f"Saved Dry Run CSV report to {csvpath}") - # 4. Sequential assertions (if provided any) - for i, type_n_assertion in enumerate(assertions.items()): - assert_type, assertion = type_n_assertion + # 4. Sequential invariants (if provided any) + for i, type_n_invariant in enumerate(invariants.items()): + property, invariant = type_n_invariant assert mode_has_property( - mode, assert_type - ), f"assert_type {assert_type} is not applicable for {mode}. Please REMOVE or MODIFY" + mode, property + ), f"assert_type {property} is not applicable for {mode}. Please REMOVE or MODIFY" - assertion = Invariant(assertion, name=f"{case_name}[{i}]@{mode}-{assert_type}") + invariant = Invariant(invariant, name=f"{case_name}[{i}]@{mode}-{property}") print( - f"{i+1}. Semantic assertion for {case_name}-{mode}: {assert_type} <<{assertion}>>" + f"{i+1}. Semantic invariant for {case_name}-{mode}: {property} <<{invariant}>>" ) - assertion.dryrun_assert(inputs, dryrun_results, assert_type) + invariant.validates(property, inputs, dryrun_results) # NOTE: logic sig dry runs are missing some information when compared with app dry runs. -# Therefore, certain assertions don't make sense for logic sigs explaining why some of the below are commented out: +# Therefore, certain invariants don't make sense for logic sigs explaining why some of the below are commented out: LOGICSIG_SCENARIOS = { "lsig_exp": { "inputs": [()], - "assertions": { + "invariants": { # DRA.cost: 11, # DRA.lastLog: lightly_encode_output(2 ** 10, logs=True), DRProp.finalScratch: lambda _: {}, @@ -412,7 +414,7 @@ def test_app_with_report(filebase: str): }, "lsig_square_byref": { "inputs": [(i,) for i in range(100)], - "assertions": { + "invariants": { # DRA.cost: lambda _, actual: 20 < actual < 22, # DRA.lastLog: lightly_encode_output(1337, logs=True), # due to dry-run artifact of not reporting 0-valued scratchvars, @@ -428,7 +430,7 @@ def test_app_with_report(filebase: str): }, "lsig_square": { "inputs": [(i,) for i in range(100)], - "assertions": { + "invariants": { # DRA.cost: 14, # DRA.lastLog: {(i,): lightly_encode_output(i * i, logs=True) if i else None for i in range(100)}, DRProp.finalScratch: lambda args: ({0: args[0]} if args[0] else {}), @@ -442,7 +444,7 @@ def test_app_with_report(filebase: str): }, "lsig_swap": { "inputs": [(1, 2), (1, "two"), ("one", 2), ("one", "two")], - "assertions": { + "invariants": { # DRA.cost: 27, # DRA.lastLog: lightly_encode_output(1337, logs=True), DRProp.finalScratch: lambda args: { @@ -461,7 +463,7 @@ def test_app_with_report(filebase: str): }, "lsig_string_mult": { "inputs": [("xyzw", i) for i in range(100)], - "assertions": { + "invariants": { # DRA.cost: lambda args: 30 + 15 * args[1], # DRA.lastLog: lambda args: lightly_encode_output(args[0] * args[1]) if args[1] else None, DRProp.finalScratch: lambda args: ( @@ -487,7 +489,7 @@ def test_app_with_report(filebase: str): }, "lsig_oldfac": { "inputs": [(i,) for i in range(25)], - "assertions": { + "invariants": { # DRA.cost: lambda args, actual: actual - 40 <= 17 * args[0] <= actual + 40, # DRA.lastLog: lambda args, actual: (actual is None) or (int(actual, base=16) == fac_with_overflow(args[0])), DRProp.finalScratch: lambda args: ( @@ -507,7 +509,7 @@ def test_app_with_report(filebase: str): }, "lsig_slow_fibonacci": { "inputs": [(i,) for i in range(18)], - "assertions": { + "invariants": { # DRA.cost: fib_cost, # DRA.lastLog: fib_last_log, # by returning True for n >= 15, we're declaring that we don't care about the scratchvar's for such cases: @@ -540,7 +542,9 @@ def test_logicsig_with_report(filebase: str): mode, scenario = ExecutionMode.Signature, LOGICSIG_SCENARIOS[filebase] # 0. Validate that the scenarios are well defined: - inputs, assertions = Invariant.inputs_and_assertions(scenario, mode) + inputs, invariants = Invariant.inputs_and_invariants( + scenario, mode, raw_predicates=True + ) algod = get_algod() @@ -568,16 +572,16 @@ def test_logicsig_with_report(filebase: str): print(f"Saved Dry Run CSV report to {csvpath}") - # 4. Sequential assertions (if provided any) - for i, type_n_assertion in enumerate(assertions.items()): - assert_type, assertion = type_n_assertion + # 4. Sequential invariants (if provided any) + for i, type_n_invariant in enumerate(invariants.items()): + property, invariant = type_n_invariant assert mode_has_property( - mode, assert_type - ), f"assert_type {assert_type} is not applicable for {mode}. Please REMOVE of MODIFY" + mode, property + ), f"assert_type {property} is not applicable for {mode}. Please REMOVE of MODIFY" - assertion = Invariant(assertion, name=f"{case_name}[{i}]@{mode}-{assert_type}") + invariant = Invariant(invariant, name=f"{case_name}[{i}]@{mode}-{property}") print( - f"{i+1}. Semantic assertion for {case_name}-{mode}: {assert_type} <<{assertion}>>" + f"{i+1}. Semantic invariant for {case_name}-{mode}: {property} <<{invariant}>>" ) - assertion.dryrun_assert(inputs, dryrun_results, assert_type) + invariant.validates(property, inputs, dryrun_results) diff --git a/tests/integration/doc_examples_test.py b/tests/integration/doc_examples_test.py index f1eaede8..776dc4a6 100644 --- a/tests/integration/doc_examples_test.py +++ b/tests/integration/doc_examples_test.py @@ -147,3 +147,36 @@ def test_step8(): assert inspector.max_stack_height() == 2 assert inspector.status() == ("REJECT" if x == 0 else "PASS") assert inspector.final_scratch() == ({} if x == 0 else {0: x}) + + +def test_step9(): + from blackbox.blackbox import ( + DryRunExecutor, + DryRunProperty as DRProp, + ExecutionMode, + ) + from blackbox.invariant import Invariant + from tests.clients import get_algod + + algod = get_algod() + + scenario = { + "inputs": [(i,) for i in range(100)], + "invariants": { + DRProp.stackTop: lambda args: args[0] ** 2, + DRProp.maxStackHeight: 2, + DRProp.status: lambda i: "REJECT" if i[0] == 0 else "PASS", + DRProp.finalScratch: lambda args: ({0: args[0]} if args[0] else {}), + }, + } + # Validate the scenario and dig out inputs/invariants: + inputs, invariants = Invariant.inputs_and_invariants( + scenario, ExecutionMode.Signature + ) + + # Execute the dry runs and obtain sequence of DryRunInspectors: + inspectors = DryRunExecutor.dryrun_logicsig_on_sequence(algod, teal, inputs) + + # Invariant assertions on sequence: + for property, invariant in invariants.items(): + invariant.validates(property, inputs, inspectors) From e36cea77be5edf281eb84bd703aec59dfc4ac39e Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Sat, 26 Mar 2022 11:09:19 -0500 Subject: [PATCH 32/85] final draft --- README.md | 30 +++++++++++++++++------------- 1 file changed, 17 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index f1ac8521..0b3458c4 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,5 @@ + # TEAL Blackbox Toolkit: Program Reporting and Testing via Dry Runs @@ -364,14 +365,18 @@ for property, invariant in invariants.items(): invariant.validates(property, inputs, inspectors) ``` -**STEP 10**. _**Deep Dive into Sequence Assertion via Exercises**_ +**STEP 10**. _**Deep Dive into Invariants via Exercises**_ + +Four kinds of predicates are used to define _invariants_: + +1. _simple python types_ - these are useful in the case of _constant_ invariants. In the above `maxStackHeight` is asserted to _**ALWAYS**_ equal 2 by using `2` in the declaration: + +`DRProp.maxStackHeight: 2` -There are 4 kinds of Sequence Assertions _aka_ predicates +2. _1-variable functions_ -these are useful when you have a python "simulator" for the invariant. In the above `stackTop` is asserted to be $`x^2`$ by using a lambda expression for $`x^2`$ in the declaration: + +`DRProp.stackTop: lambda args: args[0] ** 2` -1. _simple python types_ - these are useful in the case of _constant_ assertions. For example above, it was -asserted that `maxStackHeight` was _**ALWAYS**_ 2 by just using `2` in the declaration `DRProp.maxStackHeight: 2,` -2. _1-variable functions_ -these are useful when you have a python "simulator" for the assertable property. For example above it was asserted that `stackTop` was -$`x^2`$ by using a lambda expression for $`x^2`$ in the declaration `DRProp.stackTop: lambda args: args[0] ** 2,` 3. _dictionaries_ of type `Dict[Tuple, Any]` - these are useful when you want to assert a discrete set of input-output pairs. For example, if you have 4 inputs that you want to assert are being squared, you could use ```python @@ -379,16 +384,15 @@ DRProp.stackTop: { (2,): 4, (7,): 49, (13,): 169, - (11,): 121 -}, + (11,): 121, +} ``` ->Note that this case illustrates why `args` should be tuples intead of lists. In order to specify a map from args to expected, we need to make `args` a key ->in a dictionary: Python dictionary keys must be hashable and lists are **not hashable** while tuples _are_ hashable. +>Note that this case illustrates why `args` should be tuples intead of lists. In order to specify a map from args to expected, we need to make `args` a key in a dictionary. As Python dictionary keys must be hashable and lists are _not hashable_ while tuples _are_ hashable. - +4. _2-variable functions_ -these are useful when your assertion is more subtle than out-and-out equality. For example, suppose you want to assert that the `cost` of each run is _between_ $`2n \pm 5`$ where $`n`$ is the first arg of the input. Then you could declare: -4. _2-variable functions_ -these are useful when your assertion is more subtle than out-and-out equality. For example, suppose you want to assert that the `cost` of each run is _between_ $`2n \pm 5`$ where $`n`$ is the first arg of the input. Then you could declare `DRProp.cost: lambda args, actual: 2*args[0] - 5 <= actual <= 2*args[0] + 5` +`DRProp.cost: lambda args, actual: 2*args[0] - 5 <= actual <= 2*args[0] + 5` #### **EXERCISE A** @@ -429,4 +433,4 @@ A few items to take note of: * **max stack height** is $`2n`$ except for $`n=0`$ and the error case * you can see the final values of scratch slots **s@000** and **s@001** which are respectively $`n`$ and `fibonacci(n)` -You can see how [sequence assertions can be made](https://github.com/algorand/py-algorand-sdk/blob/77addfc236e78e41e2fd761fd59b506d8d344346/x/blackbox/blackbox_test.py#L324) on this function. +Here's an example of how [invariants can be asserted](https://github.com/algorand/graviton/blob/a8c7eab729a36503948849674ea55995d5fc4ec1/tests/integration/blackbox_test.py#L315) on this function. From dfc88495437de258c048e64560140cfa37723937 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Sat, 26 Mar 2022 12:38:25 -0500 Subject: [PATCH 33/85] fix links etc. --- README.md | 8 ++++---- blackbox/blackbox.py | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index 0b3458c4..48216922 100644 --- a/README.md +++ b/README.md @@ -124,12 +124,12 @@ Some available _assertable properties_ are: * `error()` * `max_stack_height()` -See the [DryRunInspector class comment](./blackbox/blackbox.py#L373) for more assertable properties and details. +See the [DryRunInspector class comment](./blackbox/blackbox.py#L387) for more assertable properties and details. ### Printing out the TEAL Stack Trace for a Failing Assertion **STEP 5**. The `DryRunInspector`'s `report()` method lets you print out -a handy report in the case of a failing assertion. Let's intentionally break the test case above by claiming that $`x^2 = x^3`$ for $`x=2`$ and print out this _report_ when our silly assertion fails: +a handy report in the case of a failing assertion. Let's intentionally break the test case above by claiming that $`x^2 = x^3`$ for $`x=2`$ and print out this _report_ when our silly assertion fails. ```python from blackbox.blackbox import DryRunExecutor @@ -153,7 +153,7 @@ assert expected == actual, inspector.report( ) ``` -If we run the test we'll a printout such as: +If we run the test we'll (e.g. with `pytest`) see a printout such as: ```sh AssertionError: @@ -309,7 +309,7 @@ In the parlance of the TEAL Blackbox Toolkit, a set of such declarative assertio is called a **test scenario**. Scenarios are dict's containing two keys `inputs` and `invariants` and follow [certain conventions](./blackbox/invariant.py#L101). In particular: * **inputs** gives a list of tuples, each tuple representing the `args` to be fed into a single dry run execution -* **invariants** gives a dict that maps [DryRunProperty](https://github.com/algorand/py-algorand-sdk/blob/3d3992ccc9b3758f28e68d2c00408d2e1363a3bb/algosdk/testing/teal_blackbox.py#L20)'s to an invariant _predicate_ +* **invariants** gives a dict that maps [DryRunProperty](./blackbox/blackbox.py#L25)'s to an invariant _predicate_ In English, letting $`x`$ be the input variable for our square function, the above **test scenario**: diff --git a/blackbox/blackbox.py b/blackbox/blackbox.py index 3d15d140..273be432 100644 --- a/blackbox/blackbox.py +++ b/blackbox/blackbox.py @@ -36,6 +36,9 @@ class DryRunProperty(Enum): localStateHas = auto() +DRProp = DryRunProperty + + def mode_has_property(mode: ExecutionMode, assertion_type: "DryRunProperty") -> bool: missing = { ExecutionMode.Signature: { @@ -50,9 +53,6 @@ def mode_has_property(mode: ExecutionMode, assertion_type: "DryRunProperty") -> return True -DRProp = DryRunProperty - - @dataclass class TealVal: i: int = 0 From 4da7415db3903943846bbb727b389febb6d91987 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Sat, 26 Mar 2022 13:09:45 -0500 Subject: [PATCH 34/85] test the doc exercises too --- Makefile | 1 - tests/integration/doc_examples_test.py | 39 ++++++++++++++++++++++++++ 2 files changed, 39 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index a52d868f..3499f8e1 100644 --- a/Makefile +++ b/Makefile @@ -41,7 +41,6 @@ mac-blackbox: mac-blackbox-smoke integration-test mac-gh-simulate: act - ###### Github Actions Only ###### gh-sandbox-test: diff --git a/tests/integration/doc_examples_test.py b/tests/integration/doc_examples_test.py index 776dc4a6..e83e040b 100644 --- a/tests/integration/doc_examples_test.py +++ b/tests/integration/doc_examples_test.py @@ -180,3 +180,42 @@ def test_step9(): # Invariant assertions on sequence: for property, invariant in invariants.items(): invariant.validates(property, inputs, inspectors) + + +@pytest.mark.parametrize("exercise", ["A", "B"]) +def test_exercises(exercise): + from blackbox.blackbox import ( + DryRunExecutor, + DryRunProperty as DRProp, + ExecutionMode, + ) + from blackbox.invariant import Invariant + from tests.clients import get_algod + + algod = get_algod() + + status_predicate = ( + ({(x,): "PASS" if x else "REJECT" for x in range(100)}) + if exercise == "A" + else (lambda args, actual: "PASS" == actual if args[0] else True) + ) + scenario = { + "inputs": [(i,) for i in (2, 7, 13, 11)], + "invariants": { + DRProp.stackTop: {(2,): 4, (7,): 49, (13,): 169, (11,): 121}, + DRProp.maxStackHeight: 2, + DRProp.status: status_predicate, + DRProp.finalScratch: lambda args: ({0: args[0]} if args[0] else {}), + }, + } + # Validate the scenario and dig out inputs/invariants: + inputs, invariants = Invariant.inputs_and_invariants( + scenario, ExecutionMode.Signature + ) + + # Execute the dry runs and obtain sequence of DryRunInspectors: + inspectors = DryRunExecutor.dryrun_logicsig_on_sequence(algod, teal, inputs) + + # Invariant assertions on sequence: + for property, invariant in invariants.items(): + invariant.validates(property, inputs, inspectors) From c445857c665cb6c1474906a29b97d1fae643fae6 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Sat, 26 Mar 2022 14:26:16 -0500 Subject: [PATCH 35/85] Update Makefile --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 3499f8e1..c7a67934 100644 --- a/Makefile +++ b/Makefile @@ -37,7 +37,7 @@ mac-blackbox-smoke: blackbox-smoke-prefix mac-sandbox-test mac-blackbox: mac-blackbox-smoke integration-test -# assumes you've installed act via `brew install act`: +# assumes act is installed, e.g. via `brew install act`: mac-gh-simulate: act From 82c8cff586b05558f63db4d585f5d6b71aa176be Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Sat, 26 Mar 2022 14:26:22 -0500 Subject: [PATCH 36/85] Update Makefile --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index c7a67934..fcd7fc90 100644 --- a/Makefile +++ b/Makefile @@ -44,7 +44,7 @@ mac-gh-simulate: ###### Github Actions Only ###### gh-sandbox-test: - # allow exit code 2 as indexer returns 500 when last-round = 0 + # relax exit code condition because indexer returns 500 when last-round = 0 script -e -c "bash -x ./sandbox/sandbox test" || echo "finished ./sandbox test" gh-blackbox-smoke: blackbox-smoke-prefix gh-sandbox-test From 5351bd4a95e58375ec4507a407d04260cd38eb47 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Sat, 26 Mar 2022 14:38:18 -0500 Subject: [PATCH 37/85] alignment --- README.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/README.md b/README.md index 48216922..6e74e531 100644 --- a/README.md +++ b/README.md @@ -106,12 +106,12 @@ When executing a dry run using `DryRunExecutor` you'll get back `DryRunInspecto from blackbox.blackbox import DryRunExecutor from tests.clients import get_algod - algod = get_algod() - x = 9 - args = (x,) - inspector = DryRunExecutor.dryrun_logicsig(algod, teal, args) - assert inspector.status() == "PASS" - assert inspector.stack_top() == x**2 +algod = get_algod() +x = 9 +args = (x,) +inspector = DryRunExecutor.dryrun_logicsig(algod, teal, args) +assert inspector.status() == "PASS" +assert inspector.stack_top() == x**2 ``` Some available _assertable properties_ are: From d41586c5dccc8a661cdc16996a319b15a158fedb Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Sat, 26 Mar 2022 14:43:03 -0500 Subject: [PATCH 38/85] better explanation --- README.md | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/README.md b/README.md index 6e74e531..632aa983 100644 --- a/README.md +++ b/README.md @@ -114,6 +114,11 @@ assert inspector.status() == "PASS" assert inspector.stack_top() == x**2 ``` +Here we have executed a dry run on input $`x=9`$, then asserted that: + +* the program status was `PASS` +* the program exited with the top of its staack containing $`x^2 = 9^2 = 81`$ + Some available _assertable properties_ are: * `stack_top()` From c4026bc29c1ae48c2b7e979ef4ce80407cef5103 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Sat, 26 Mar 2022 14:53:23 -0500 Subject: [PATCH 39/85] final tweaks... realy --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 632aa983..51642fa6 100644 --- a/README.md +++ b/README.md @@ -117,7 +117,7 @@ assert inspector.stack_top() == x**2 Here we have executed a dry run on input $`x=9`$, then asserted that: * the program status was `PASS` -* the program exited with the top of its staack containing $`x^2 = 9^2 = 81`$ +* the program exited with the top of its stack containing $`x^2 = 9^2 = 81`$ Some available _assertable properties_ are: @@ -158,7 +158,7 @@ assert expected == actual, inspector.report( ) ``` -If we run the test we'll (e.g. with `pytest`) see a printout such as: +If we run the test (e.g. with `pytest`) we'll see a printout such as: ```sh AssertionError: @@ -393,7 +393,7 @@ DRProp.stackTop: { } ``` ->Note that this case illustrates why `args` should be tuples intead of lists. In order to specify a map from args to expected, we need to make `args` a key in a dictionary. As Python dictionary keys must be hashable and lists are _not hashable_ while tuples _are_ hashable. +>Note that this case illustrates why each `args` container should be a tuple intead of a list. In order to specify a map from args to expected, we need to make `args` a key in a dictionary. Python dictionary keys must be hashable and lists are _not hashable_ while tuples _are_ hashable, hence the tuple-requirement. 4. _2-variable functions_ -these are useful when your assertion is more subtle than out-and-out equality. For example, suppose you want to assert that the `cost` of each run is _between_ $`2n \pm 5`$ where $`n`$ is the first arg of the input. Then you could declare: From e9b29e8fed180da3d2f072817c35df2719eabfc2 Mon Sep 17 00:00:00 2001 From: michaeldiamant Date: Mon, 28 Mar 2022 21:27:36 -0400 Subject: [PATCH 40/85] Ignore PyCharm files --- .gitignore | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.gitignore b/.gitignore index 8f43dc34..4b5d70de 100644 --- a/.gitignore +++ b/.gitignore @@ -144,3 +144,6 @@ dmypy.json # Pyre type checker .pyre/ + +# IDE +.idea From 42face96e600a8b61d9c4b2aa623dfee18524245 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Mon, 28 Mar 2022 21:35:59 -0400 Subject: [PATCH 41/85] mac- --> local- and other experimental tweaks --- Makefile | 10 +++++----- __init__.py | 0 blackbox/blackbox.py | 2 +- blackbox/invariant.py | 2 +- pyproject.toml | 8 +++++--- requirements.txt | 1 + tests/integration/blackbox_test.py | 6 +++--- tests/integration/dryrun_mixin_docs_test.py | 4 ++-- 8 files changed, 18 insertions(+), 15 deletions(-) create mode 100644 __init__.py diff --git a/Makefile b/Makefile index fcd7fc90..1c1cfbf1 100644 --- a/Makefile +++ b/Makefile @@ -26,19 +26,19 @@ integration-test: # assumes installations of pipx, build and tox via: # `pip install pipx; pipx install build; pipx install tox` -mac-project-build: +local-project-build: pyproject-build # assumes a symbolic link: sandbox -> /cloned/repo/algorand/sandbox -mac-sandbox-test: +local-sandbox-test: ./sandbox/sandbox test -mac-blackbox-smoke: blackbox-smoke-prefix mac-sandbox-test +local-blackbox-smoke: blackbox-smoke-prefix local-sandbox-test -mac-blackbox: mac-blackbox-smoke integration-test +local-blackbox: local-blackbox-smoke integration-test # assumes act is installed, e.g. via `brew install act`: -mac-gh-simulate: +local-gh-simulate: act ###### Github Actions Only ###### diff --git a/__init__.py b/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/blackbox/blackbox.py b/blackbox/blackbox.py index 273be432..e6bab37b 100644 --- a/blackbox/blackbox.py +++ b/blackbox/blackbox.py @@ -8,7 +8,7 @@ from algosdk.v2client.algod import AlgodClient -from blackbox.dryrun import ( +from ..blackbox.dryrun import ( ZERO_ADDRESS, assert_error, assert_no_error, diff --git a/blackbox/invariant.py b/blackbox/invariant.py index b49ac69d..201e1bbc 100644 --- a/blackbox/invariant.py +++ b/blackbox/invariant.py @@ -1,7 +1,7 @@ from inspect import signature from typing import Any, Callable, Dict, List, Tuple, Union -from blackbox.blackbox import ( +from ..blackbox.blackbox import ( DryRunInspector, DryRunProperty, ExecutionMode, diff --git a/pyproject.toml b/pyproject.toml index 53c55f08..eaf69e59 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,6 +1,8 @@ -[build-system] -requires = ["setuptools", "wheel"] -build-backend = "setuptools.build_meta" +# Maybe this is the culprit for why I can't get a proper +# build in dependant repos? +# [build-system] +# requires = ["setuptools", "wheel"] +# build-backend = "setuptools.build_meta" [metadata] name = "graviton" diff --git a/requirements.txt b/requirements.txt index 4f6bf643..98d98a90 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1 +1,2 @@ +. pytest==7.1.1 diff --git a/tests/integration/blackbox_test.py b/tests/integration/blackbox_test.py index 11bece36..f16ac090 100644 --- a/tests/integration/blackbox_test.py +++ b/tests/integration/blackbox_test.py @@ -2,7 +2,7 @@ import pytest -from blackbox.blackbox import ( +from ...blackbox.blackbox import ( DryRunEncoder as Encoder, DryRunExecutor as Executor, DryRunProperty as DRProp, @@ -10,9 +10,9 @@ ExecutionMode, mode_has_property, ) -from blackbox.invariant import Invariant +from ...blackbox.invariant import Invariant -from tests.clients import get_algod +from ...tests.clients import get_algod TESTS_DIR = Path.cwd() / "tests" diff --git a/tests/integration/dryrun_mixin_docs_test.py b/tests/integration/dryrun_mixin_docs_test.py index ddf16748..5027dee2 100644 --- a/tests/integration/dryrun_mixin_docs_test.py +++ b/tests/integration/dryrun_mixin_docs_test.py @@ -17,9 +17,9 @@ TealKeyValue, TealValue, ) -from blackbox.dryrun import DryrunTestCaseMixin, DryRunHelper +from ...blackbox.dryrun import DryrunTestCaseMixin, DryRunHelper -from tests.clients import get_algod +from ...tests.clients import get_algod def b64_encode_hack(s, b=None): From b1389af0b345f838f6b96ecc754b9b2aa7083f11 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Mon, 28 Mar 2022 21:38:18 -0400 Subject: [PATCH 42/85] ... literally, in imports --- tests/integration/doc_examples_test.py | 28 +++++++++++++------------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/tests/integration/doc_examples_test.py b/tests/integration/doc_examples_test.py index e83e040b..66146aad 100644 --- a/tests/integration/doc_examples_test.py +++ b/tests/integration/doc_examples_test.py @@ -18,8 +18,8 @@ def test_step4(): - from blackbox.blackbox import DryRunExecutor - from tests.clients import get_algod + from ...blackbox.blackbox import DryRunExecutor + from ...tests.clients import get_algod algod = get_algod() x = 9 @@ -38,8 +38,8 @@ def test_step4(): def test_step5(): - from blackbox.blackbox import DryRunExecutor - from tests.clients import get_algod + from ...blackbox.blackbox import DryRunExecutor + from ...tests.clients import get_algod algod = get_algod() x = 2 @@ -115,8 +115,8 @@ def remove_whitespace(s): def test_step6_and_7(): - from blackbox.blackbox import DryRunExecutor, DryRunInspector - from tests.clients import get_algod + from ...blackbox.blackbox import DryRunExecutor, DryRunInspector + from ...tests.clients import get_algod algod = get_algod() inputs = [(x,) for x in range(16)] @@ -134,8 +134,8 @@ def test_step6_and_7(): def test_step8(): - from blackbox.blackbox import DryRunExecutor - from tests.clients import get_algod + from ...blackbox.blackbox import DryRunExecutor + from ...tests.clients import get_algod algod = get_algod() inputs = [(x,) for x in range(101)] @@ -150,13 +150,13 @@ def test_step8(): def test_step9(): - from blackbox.blackbox import ( + from ...blackbox.blackbox import ( DryRunExecutor, DryRunProperty as DRProp, ExecutionMode, ) - from blackbox.invariant import Invariant - from tests.clients import get_algod + from ...blackbox.invariant import Invariant + from ...tests.clients import get_algod algod = get_algod() @@ -184,13 +184,13 @@ def test_step9(): @pytest.mark.parametrize("exercise", ["A", "B"]) def test_exercises(exercise): - from blackbox.blackbox import ( + from ...blackbox.blackbox import ( DryRunExecutor, DryRunProperty as DRProp, ExecutionMode, ) - from blackbox.invariant import Invariant - from tests.clients import get_algod + from ...blackbox.invariant import Invariant + from ...tests.clients import get_algod algod = get_algod() From 30510ca320fc071ac2101505240c97ff5d5ffc94 Mon Sep 17 00:00:00 2001 From: michaeldiamant Date: Mon, 28 Mar 2022 22:01:41 -0400 Subject: [PATCH 43/85] Run integration tests on Python 3.10 --- .github/workflows/build.yml | 24 +++++++++++++++++------- 1 file changed, 17 insertions(+), 7 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index e7790eac..e2a81e4d 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -8,8 +8,9 @@ on: - main env: - CONFIG: dev - GENESIS: genesis/dev/genesis.json + PYTHON_VERSION: 3.10 + SANDBOX_CONFIG: dev + SANDBOX_GENESIS: genesis/dev/genesis.json jobs: build-test: @@ -17,7 +18,7 @@ jobs: container: python:${{ matrix.python }} strategy: matrix: - python: ['3.10'] + python: [ "${{ env.PYTHON_VERSION }}"] steps: - run: python3 --version - name: Check out code @@ -35,10 +36,19 @@ jobs: uses: actions/checkout@v2 with: fetch-depth: 0 + - uses: actions/setup-python@v3 + with: + python-version: ${{ env.PYTHON_VERSION }} + - name: Test Python version + run: | + installed="$(python --version)" + expected="${{ env.PYTHON_VERSION }}" + echo $installed + [[ $installed =~ "Python ${expected}" ]] && echo "Configured Python" || (echo "Failed to configure Python" && exit 1) - name: Install required os level applications run: | sudo apt update -y - sudo apt install -y curl git nodejs python-is-python3 python3-pip + sudo apt install -y curl git nodejs sudo apt -y install ca-certificates curl gnupg lsb-release sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo gpg --dearmor -o /usr/share/keyrings/docker-archive-keyring.gpg sudo echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/docker-archive-keyring.gpg] https://download.docker.com/linux/ubuntu \ @@ -56,12 +66,12 @@ jobs: # Ignore the failure of a step and avoid terminating the job. continue-on-error: true with: - key: docker-layer-caching-${{ github.workflow }}-${{ hashFiles(env.CONFIG, env.GENESIS) }}-{hash} - restore-keys: docker-layer-caching-${{ github.workflow }}-${{ hashFiles(env.CONFIG, env.GENESIS) }}- + key: docker-layer-caching-${{ github.workflow }}-${{ hashFiles(env.SANDBOX_CONFIG, env.SANDBOX_GENESIS) }}-{hash} + restore-keys: docker-layer-caching-${{ github.workflow }}-${{ hashFiles(env.CONFIG, env.SANDBOX_GENESIS) }}- - name: Create sandbox uses: lucasvanmol/algorand-sandbox-action@v1 with: - config: ${{ env.CONFIG }} + config: ${{ env.SANDBOX_CONFIG }} - name: Setup integration test environment run: make pip-test unit-test - name: Run integration tests From 315560a99859b68d0a6d230e8c20cde590b4ceed Mon Sep 17 00:00:00 2001 From: michaeldiamant Date: Mon, 28 Mar 2022 22:16:14 -0400 Subject: [PATCH 44/85] Try to make Python env value available --- .github/workflows/build.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index e2a81e4d..0463cd01 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -16,6 +16,8 @@ jobs: build-test: runs-on: ubuntu-20.04 container: python:${{ matrix.python }} + env: + PYTHON_VERSION:"${{ env.PYTHON_VERSION }}" strategy: matrix: python: [ "${{ env.PYTHON_VERSION }}"] From b960a40553dd9f907751869d07d63bee577fae41 Mon Sep 17 00:00:00 2001 From: michaeldiamant Date: Mon, 28 Mar 2022 22:17:50 -0400 Subject: [PATCH 45/85] Revert to hard-coded matrix version --- .github/workflows/build.yml | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 0463cd01..a05063e6 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -8,7 +8,7 @@ on: - main env: - PYTHON_VERSION: 3.10 + PYTHON_VERSION: 3.10 # Duplicated in `build-test` due to Workflow limitations. SANDBOX_CONFIG: dev SANDBOX_GENESIS: genesis/dev/genesis.json @@ -16,11 +16,9 @@ jobs: build-test: runs-on: ubuntu-20.04 container: python:${{ matrix.python }} - env: - PYTHON_VERSION:"${{ env.PYTHON_VERSION }}" strategy: matrix: - python: [ "${{ env.PYTHON_VERSION }}"] + python: [ "3.10" ] steps: - run: python3 --version - name: Check out code From e73e624e4d1da52b00aca1f1bf112097b541aafc Mon Sep 17 00:00:00 2001 From: michaeldiamant Date: Mon, 28 Mar 2022 22:20:27 -0400 Subject: [PATCH 46/85] Try quoting values to fix error --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index a05063e6..3b54b93d 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -38,7 +38,7 @@ jobs: fetch-depth: 0 - uses: actions/setup-python@v3 with: - python-version: ${{ env.PYTHON_VERSION }} + python-version: "${{ env.PYTHON_VERSION }}" - name: Test Python version run: | installed="$(python --version)" From b70e4c48e9c7a29dba6848038eb1e2c3db26352b Mon Sep 17 00:00:00 2001 From: michaeldiamant Date: Mon, 28 Mar 2022 22:21:56 -0400 Subject: [PATCH 47/85] Try quoting more values --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 3b54b93d..34ff00a2 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -8,7 +8,7 @@ on: - main env: - PYTHON_VERSION: 3.10 # Duplicated in `build-test` due to Workflow limitations. + PYTHON_VERSION: "3.10" # Duplicated in `build-test` due to Workflow limitations. SANDBOX_CONFIG: dev SANDBOX_GENESIS: genesis/dev/genesis.json From 55353175e07a0718450cd94e4d3eb378f6b40190 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Mon, 28 Mar 2022 22:29:53 -0400 Subject: [PATCH 48/85] try packge_dir instead of packages --- setup.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index b47860e3..bd0cc7f4 100644 --- a/setup.py +++ b/setup.py @@ -14,12 +14,14 @@ long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/algorand/graviton", - packages=setuptools.find_packages(), + # packages=setuptools.find_packages(), + package_dir={"": "."}, install_requires=["py-algorand-sdk", "tabulate==0.8.9"], classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], + package_data={"pyteal": ["*.pyi"]}, python_requires=">=3.8", ) From 06c141761dd0ab31288b04afa5bde8dfc8396f8c Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Mon, 28 Mar 2022 23:10:29 -0400 Subject: [PATCH 49/85] try setup.py again --- Makefile | 6 +- __init__.py | 0 blackbox/blackbox.py | 2 +- blackbox/invariant.py | 2 +- setup.py | 62 +++++++++++++-------- tests/integration/blackbox_test.py | 6 +- tests/integration/doc_examples_test.py | 28 +++++----- tests/integration/dryrun_mixin_docs_test.py | 4 +- 8 files changed, 62 insertions(+), 48 deletions(-) delete mode 100644 __init__.py diff --git a/Makefile b/Makefile index 1c1cfbf1..f5adca0d 100644 --- a/Makefile +++ b/Makefile @@ -1,13 +1,9 @@ ####### Universal ###### -pip-publish: +pip: pip install -r requirements.txt pip install -e . -pip-test: - pip install -r requirements.txt - pip install . - unit-test: pytest -sv tests/unit diff --git a/__init__.py b/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/blackbox/blackbox.py b/blackbox/blackbox.py index e6bab37b..273be432 100644 --- a/blackbox/blackbox.py +++ b/blackbox/blackbox.py @@ -8,7 +8,7 @@ from algosdk.v2client.algod import AlgodClient -from ..blackbox.dryrun import ( +from blackbox.dryrun import ( ZERO_ADDRESS, assert_error, assert_no_error, diff --git a/blackbox/invariant.py b/blackbox/invariant.py index 201e1bbc..b49ac69d 100644 --- a/blackbox/invariant.py +++ b/blackbox/invariant.py @@ -1,7 +1,7 @@ from inspect import signature from typing import Any, Callable, Dict, List, Tuple, Union -from ..blackbox.blackbox import ( +from blackbox.blackbox import ( DryRunInspector, DryRunProperty, ExecutionMode, diff --git a/setup.py b/setup.py index bd0cc7f4..ab894800 100644 --- a/setup.py +++ b/setup.py @@ -1,27 +1,45 @@ -#!/usr/bin/env python3 - -import setuptools - -with open("README.md", "r") as fh: - long_description = fh.read() +from setuptools import setup -setuptools.setup( +setup( + python_requires=">=3.8", + install_requires=["py-algorand-sdk", "tabulate==0.8.9"], name="graviton", version="0.0.1", - author="Algorand", - author_email="pypiservice@algorand.com", - description="verify your TEAL program by experiment and observation", - long_description=long_description, - long_description_content_type="text/markdown", + description="TBD", + author="TBD", url="https://github.com/algorand/graviton", - # packages=setuptools.find_packages(), - package_dir={"": "."}, - install_requires=["py-algorand-sdk", "tabulate==0.8.9"], - classifiers=[ - "Programming Language :: Python :: 3", - "License :: OSI Approved :: MIT License", - "Operating System :: OS Independent", - ], - package_data={"pyteal": ["*.pyi"]}, - python_requires=">=3.8", + py_modules=["blackbox"], ) + + + + + + +#!/usr/bin/env python3 + +# import setuptools + +# with open("README.md", "r") as fh: +# long_description = fh.read() + +# setuptools.setup( +# name="graviton", +# version="0.0.1", +# author="Algorand", +# author_email="pypiservice@algorand.com", +# description="verify your TEAL program by experiment and observation", +# long_description=long_description, +# long_description_content_type="text/markdown", +# url="https://github.com/algorand/graviton", +# packages=setuptools.find_packages(), +# # package_dir={"": "."}, +# install_requires=["py-algorand-sdk", "tabulate==0.8.9"], +# classifiers=[ +# "Programming Language :: Python :: 3", +# "License :: OSI Approved :: MIT License", +# "Operating System :: OS Independent", +# ], +# package_data={"pyteal": ["*.pyi"]}, +# python_requires=">=3.8", +# ) diff --git a/tests/integration/blackbox_test.py b/tests/integration/blackbox_test.py index f16ac090..11bece36 100644 --- a/tests/integration/blackbox_test.py +++ b/tests/integration/blackbox_test.py @@ -2,7 +2,7 @@ import pytest -from ...blackbox.blackbox import ( +from blackbox.blackbox import ( DryRunEncoder as Encoder, DryRunExecutor as Executor, DryRunProperty as DRProp, @@ -10,9 +10,9 @@ ExecutionMode, mode_has_property, ) -from ...blackbox.invariant import Invariant +from blackbox.invariant import Invariant -from ...tests.clients import get_algod +from tests.clients import get_algod TESTS_DIR = Path.cwd() / "tests" diff --git a/tests/integration/doc_examples_test.py b/tests/integration/doc_examples_test.py index 66146aad..e83e040b 100644 --- a/tests/integration/doc_examples_test.py +++ b/tests/integration/doc_examples_test.py @@ -18,8 +18,8 @@ def test_step4(): - from ...blackbox.blackbox import DryRunExecutor - from ...tests.clients import get_algod + from blackbox.blackbox import DryRunExecutor + from tests.clients import get_algod algod = get_algod() x = 9 @@ -38,8 +38,8 @@ def test_step4(): def test_step5(): - from ...blackbox.blackbox import DryRunExecutor - from ...tests.clients import get_algod + from blackbox.blackbox import DryRunExecutor + from tests.clients import get_algod algod = get_algod() x = 2 @@ -115,8 +115,8 @@ def remove_whitespace(s): def test_step6_and_7(): - from ...blackbox.blackbox import DryRunExecutor, DryRunInspector - from ...tests.clients import get_algod + from blackbox.blackbox import DryRunExecutor, DryRunInspector + from tests.clients import get_algod algod = get_algod() inputs = [(x,) for x in range(16)] @@ -134,8 +134,8 @@ def test_step6_and_7(): def test_step8(): - from ...blackbox.blackbox import DryRunExecutor - from ...tests.clients import get_algod + from blackbox.blackbox import DryRunExecutor + from tests.clients import get_algod algod = get_algod() inputs = [(x,) for x in range(101)] @@ -150,13 +150,13 @@ def test_step8(): def test_step9(): - from ...blackbox.blackbox import ( + from blackbox.blackbox import ( DryRunExecutor, DryRunProperty as DRProp, ExecutionMode, ) - from ...blackbox.invariant import Invariant - from ...tests.clients import get_algod + from blackbox.invariant import Invariant + from tests.clients import get_algod algod = get_algod() @@ -184,13 +184,13 @@ def test_step9(): @pytest.mark.parametrize("exercise", ["A", "B"]) def test_exercises(exercise): - from ...blackbox.blackbox import ( + from blackbox.blackbox import ( DryRunExecutor, DryRunProperty as DRProp, ExecutionMode, ) - from ...blackbox.invariant import Invariant - from ...tests.clients import get_algod + from blackbox.invariant import Invariant + from tests.clients import get_algod algod = get_algod() diff --git a/tests/integration/dryrun_mixin_docs_test.py b/tests/integration/dryrun_mixin_docs_test.py index 5027dee2..ddf16748 100644 --- a/tests/integration/dryrun_mixin_docs_test.py +++ b/tests/integration/dryrun_mixin_docs_test.py @@ -17,9 +17,9 @@ TealKeyValue, TealValue, ) -from ...blackbox.dryrun import DryrunTestCaseMixin, DryRunHelper +from blackbox.dryrun import DryrunTestCaseMixin, DryRunHelper -from ...tests.clients import get_algod +from tests.clients import get_algod def b64_encode_hack(s, b=None): From 9ca637eb67145d5ca480b46fc9b89bbc8a19a49a Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Mon, 28 Mar 2022 23:13:49 -0400 Subject: [PATCH 50/85] try gin --- setup.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/setup.py b/setup.py index ab894800..e889a398 100644 --- a/setup.py +++ b/setup.py @@ -1,4 +1,4 @@ -from setuptools import setup +from setuptools import setup, find_packages setup( python_requires=">=3.8", @@ -8,14 +8,11 @@ description="TBD", author="TBD", url="https://github.com/algorand/graviton", - py_modules=["blackbox"], + # py_modules=["blackbox"], + packages=find_packages(), ) - - - - #!/usr/bin/env python3 # import setuptools From 9f21ca45f25d380bb22e4087a42dd887fe4177e9 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Mon, 28 Mar 2022 23:22:22 -0400 Subject: [PATCH 51/85] maybe the init... --- blackbox/__init__.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/blackbox/__init__.py b/blackbox/__init__.py index e69de29b..7c073d25 100644 --- a/blackbox/__init__.py +++ b/blackbox/__init__.py @@ -0,0 +1,5 @@ +from . import blackbox +from . import dryrun +from . import invariant + +name = "blackbox" From 94e30638b7b454db4e239736acba277ef9a43106 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Mon, 28 Mar 2022 23:26:33 -0400 Subject: [PATCH 52/85] try gingin --- setup.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index e889a398..a776d9db 100644 --- a/setup.py +++ b/setup.py @@ -8,8 +8,8 @@ description="TBD", author="TBD", url="https://github.com/algorand/graviton", - # py_modules=["blackbox"], - packages=find_packages(), + py_modules=["blackbox"], + # packages=find_packages(), ) From 16896f2d132e4d82f9822b3466a4846b76e1871e Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Mon, 28 Mar 2022 23:33:03 -0400 Subject: [PATCH 53/85] try gin --- setup.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index a776d9db..56801985 100644 --- a/setup.py +++ b/setup.py @@ -8,8 +8,8 @@ description="TBD", author="TBD", url="https://github.com/algorand/graviton", - py_modules=["blackbox"], - # packages=find_packages(), + # py_modules=["blackbox"], + packages=find_packages("blackbox/"), ) From 35a11ba922c54dbfd090a77f167b340feadbe84a Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Mon, 28 Mar 2022 23:39:50 -0400 Subject: [PATCH 54/85] try gin --- blackbox/__init__.py | 5 ----- pyproject.toml | 4 ++++ setup.cfg | 24 ++++++++++++++++++++++++ setup.py | 42 ------------------------------------------ 4 files changed, 28 insertions(+), 47 deletions(-) create mode 100644 setup.cfg delete mode 100644 setup.py diff --git a/blackbox/__init__.py b/blackbox/__init__.py index 7c073d25..e69de29b 100644 --- a/blackbox/__init__.py +++ b/blackbox/__init__.py @@ -1,5 +0,0 @@ -from . import blackbox -from . import dryrun -from . import invariant - -name = "blackbox" diff --git a/pyproject.toml b/pyproject.toml index eaf69e59..faf95a63 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,3 +1,7 @@ +[build-system] +requires = ["setuptools>=42"] +build-backend = "setuptools.build_meta" + # Maybe this is the culprit for why I can't get a proper # build in dependant repos? # [build-system] diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 00000000..1305a2de --- /dev/null +++ b/setup.cfg @@ -0,0 +1,24 @@ +[metadata] +name = graviton +version = 0.0.1 +author = Example Author +author_email = author@example.com +description = A small example package +long_description = file: README.md +long_description_content_type = text/markdown +url = https://github.com/algorand/graviton +project_urls = + Bug Tracker = https://github.com/algorand/graviton/issues +classifiers = + Programming Language :: Python :: 3 + License :: OSI Approved :: MIT License + Operating System :: OS Independent + +[options] +package_dir = + = blackbox +packages = find: +python_requires = >=3.8 + +[options.packages.find] +where = blackbox \ No newline at end of file diff --git a/setup.py b/setup.py deleted file mode 100644 index 56801985..00000000 --- a/setup.py +++ /dev/null @@ -1,42 +0,0 @@ -from setuptools import setup, find_packages - -setup( - python_requires=">=3.8", - install_requires=["py-algorand-sdk", "tabulate==0.8.9"], - name="graviton", - version="0.0.1", - description="TBD", - author="TBD", - url="https://github.com/algorand/graviton", - # py_modules=["blackbox"], - packages=find_packages("blackbox/"), -) - - -#!/usr/bin/env python3 - -# import setuptools - -# with open("README.md", "r") as fh: -# long_description = fh.read() - -# setuptools.setup( -# name="graviton", -# version="0.0.1", -# author="Algorand", -# author_email="pypiservice@algorand.com", -# description="verify your TEAL program by experiment and observation", -# long_description=long_description, -# long_description_content_type="text/markdown", -# url="https://github.com/algorand/graviton", -# packages=setuptools.find_packages(), -# # package_dir={"": "."}, -# install_requires=["py-algorand-sdk", "tabulate==0.8.9"], -# classifiers=[ -# "Programming Language :: Python :: 3", -# "License :: OSI Approved :: MIT License", -# "Operating System :: OS Independent", -# ], -# package_data={"pyteal": ["*.pyi"]}, -# python_requires=">=3.8", -# ) From 827ebbd0328b35f26c1a6b24c9270ed6a87e42ee Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Mon, 28 Mar 2022 23:42:23 -0400 Subject: [PATCH 55/85] try gin --- setup.cfg | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.cfg b/setup.cfg index 1305a2de..ccedd456 100644 --- a/setup.cfg +++ b/setup.cfg @@ -16,9 +16,9 @@ classifiers = [options] package_dir = - = blackbox + = . packages = find: python_requires = >=3.8 [options.packages.find] -where = blackbox \ No newline at end of file +where = . \ No newline at end of file From f4570ebe811fe179cb431db2f2f791a98c397238 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Mon, 28 Mar 2022 23:54:29 -0400 Subject: [PATCH 56/85] try gin --- setup.cfg | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.cfg b/setup.cfg index ccedd456..1f744a6e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -15,8 +15,8 @@ classifiers = Operating System :: OS Independent [options] -package_dir = - = . +# package_dir = +# =blackbox packages = find: python_requires = >=3.8 From e36ad8f3d4788d4df4198766194e5d2f0c8788d0 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Tue, 29 Mar 2022 16:42:45 -0400 Subject: [PATCH 57/85] try gin --- {blackbox => graviton}/__init__.py | 0 {blackbox => graviton}/blackbox.py | 2 +- {blackbox => graviton}/dryrun.py | 0 {blackbox => graviton}/invariant.py | 2 +- setup.py | 52 +++++++++++++++++++++ tests/integration/blackbox_test.py | 4 +- tests/integration/dryrun_mixin_docs_test.py | 2 +- 7 files changed, 57 insertions(+), 5 deletions(-) rename {blackbox => graviton}/__init__.py (100%) rename {blackbox => graviton}/blackbox.py (99%) rename {blackbox => graviton}/dryrun.py (100%) rename {blackbox => graviton}/invariant.py (99%) create mode 100644 setup.py diff --git a/blackbox/__init__.py b/graviton/__init__.py similarity index 100% rename from blackbox/__init__.py rename to graviton/__init__.py diff --git a/blackbox/blackbox.py b/graviton/blackbox.py similarity index 99% rename from blackbox/blackbox.py rename to graviton/blackbox.py index 273be432..ab776c90 100644 --- a/blackbox/blackbox.py +++ b/graviton/blackbox.py @@ -8,7 +8,7 @@ from algosdk.v2client.algod import AlgodClient -from blackbox.dryrun import ( +from graviton.dryrun import ( ZERO_ADDRESS, assert_error, assert_no_error, diff --git a/blackbox/dryrun.py b/graviton/dryrun.py similarity index 100% rename from blackbox/dryrun.py rename to graviton/dryrun.py diff --git a/blackbox/invariant.py b/graviton/invariant.py similarity index 99% rename from blackbox/invariant.py rename to graviton/invariant.py index b49ac69d..47a1fac4 100644 --- a/blackbox/invariant.py +++ b/graviton/invariant.py @@ -1,7 +1,7 @@ from inspect import signature from typing import Any, Callable, Dict, List, Tuple, Union -from blackbox.blackbox import ( +from graviton.blackbox import ( DryRunInspector, DryRunProperty, ExecutionMode, diff --git a/setup.py b/setup.py new file mode 100644 index 00000000..c52cdd22 --- /dev/null +++ b/setup.py @@ -0,0 +1,52 @@ +from setuptools import setup, find_packages + +with open("README.md", "r") as fh: + long_description = fh.read() + +setup( + name="graviton", + version="0.0.1", + url="https://github.com/algorand/graviton", + description="verify your TEAL program by experiment and observation", + long_description=long_description, + author="Algorand", + author_email="pypiservice@algorand.com", + python_requires=">=3.10", + install_requires=["py-algorand-sdk", "tabulate==0.8.9"], + classifiers=[ + "Programming Language :: Python :: 3", + "License :: OSI Approved :: MIT License", + "Operating System :: OS Independent", + ], + # py_modules=["blackbox"], + packages=find_packages(), +) + + +#!/usr/bin/env python3 + +# import setuptools + +# with open("README.md", "r") as fh: +# long_description = fh.read() + +# setuptools.setup( +# name="graviton", +# version="0.0.1", +# author="Algorand", +# author_email="pypiservice@algorand.com", +# description="verify your TEAL program by experiment and observation", +# long_description=long_description, +# long_description_content_type="text/markdown", +# url="https://github.com/algorand/graviton", +# packages=setuptools.find_packages(), +# # package_dir={"": "."}, +# install_requires=["py-algorand-sdk", "tabulate==0.8.9"], +# classifiers=[ +# "Programming Language :: Python :: 3", +# "License :: OSI Approved :: MIT License", +# "Operating System :: OS Independent", +# ], +# package_data={"pyteal": ["*.pyi"]}, +# python_requires=">=3.8", +# ) diff --git a/tests/integration/blackbox_test.py b/tests/integration/blackbox_test.py index 11bece36..f69cfbff 100644 --- a/tests/integration/blackbox_test.py +++ b/tests/integration/blackbox_test.py @@ -2,7 +2,7 @@ import pytest -from blackbox.blackbox import ( +from graviton.blackbox import ( DryRunEncoder as Encoder, DryRunExecutor as Executor, DryRunProperty as DRProp, @@ -10,7 +10,7 @@ ExecutionMode, mode_has_property, ) -from blackbox.invariant import Invariant +from graviton.invariant import Invariant from tests.clients import get_algod diff --git a/tests/integration/dryrun_mixin_docs_test.py b/tests/integration/dryrun_mixin_docs_test.py index ddf16748..ba129890 100644 --- a/tests/integration/dryrun_mixin_docs_test.py +++ b/tests/integration/dryrun_mixin_docs_test.py @@ -17,7 +17,7 @@ TealKeyValue, TealValue, ) -from blackbox.dryrun import DryrunTestCaseMixin, DryRunHelper +from graviton.dryrun import DryrunTestCaseMixin, DryRunHelper from tests.clients import get_algod From 9796c8adf65a7f2bd17e75e66e824401ecfea974 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Tue, 29 Mar 2022 17:06:12 -0400 Subject: [PATCH 58/85] remove setup.cfg --- setup.cfg | 24 --------------------- setup.py | 30 -------------------------- tests/integration/doc_examples_test.py | 16 +++++++------- 3 files changed, 8 insertions(+), 62 deletions(-) delete mode 100644 setup.cfg diff --git a/setup.cfg b/setup.cfg deleted file mode 100644 index 1f744a6e..00000000 --- a/setup.cfg +++ /dev/null @@ -1,24 +0,0 @@ -[metadata] -name = graviton -version = 0.0.1 -author = Example Author -author_email = author@example.com -description = A small example package -long_description = file: README.md -long_description_content_type = text/markdown -url = https://github.com/algorand/graviton -project_urls = - Bug Tracker = https://github.com/algorand/graviton/issues -classifiers = - Programming Language :: Python :: 3 - License :: OSI Approved :: MIT License - Operating System :: OS Independent - -[options] -# package_dir = -# =blackbox -packages = find: -python_requires = >=3.8 - -[options.packages.find] -where = . \ No newline at end of file diff --git a/setup.py b/setup.py index c52cdd22..c1be37c4 100644 --- a/setup.py +++ b/setup.py @@ -18,35 +18,5 @@ "License :: OSI Approved :: MIT License", "Operating System :: OS Independent", ], - # py_modules=["blackbox"], packages=find_packages(), ) - - -#!/usr/bin/env python3 - -# import setuptools - -# with open("README.md", "r") as fh: -# long_description = fh.read() - -# setuptools.setup( -# name="graviton", -# version="0.0.1", -# author="Algorand", -# author_email="pypiservice@algorand.com", -# description="verify your TEAL program by experiment and observation", -# long_description=long_description, -# long_description_content_type="text/markdown", -# url="https://github.com/algorand/graviton", -# packages=setuptools.find_packages(), -# # package_dir={"": "."}, -# install_requires=["py-algorand-sdk", "tabulate==0.8.9"], -# classifiers=[ -# "Programming Language :: Python :: 3", -# "License :: OSI Approved :: MIT License", -# "Operating System :: OS Independent", -# ], -# package_data={"pyteal": ["*.pyi"]}, -# python_requires=">=3.8", -# ) diff --git a/tests/integration/doc_examples_test.py b/tests/integration/doc_examples_test.py index e83e040b..f7f34702 100644 --- a/tests/integration/doc_examples_test.py +++ b/tests/integration/doc_examples_test.py @@ -18,7 +18,7 @@ def test_step4(): - from blackbox.blackbox import DryRunExecutor + from graviton.blackbox import DryRunExecutor from tests.clients import get_algod algod = get_algod() @@ -38,7 +38,7 @@ def test_step4(): def test_step5(): - from blackbox.blackbox import DryRunExecutor + from graviton.blackbox import DryRunExecutor from tests.clients import get_algod algod = get_algod() @@ -115,7 +115,7 @@ def remove_whitespace(s): def test_step6_and_7(): - from blackbox.blackbox import DryRunExecutor, DryRunInspector + from graviton.blackbox import DryRunExecutor, DryRunInspector from tests.clients import get_algod algod = get_algod() @@ -134,7 +134,7 @@ def test_step6_and_7(): def test_step8(): - from blackbox.blackbox import DryRunExecutor + from graviton.blackbox import DryRunExecutor from tests.clients import get_algod algod = get_algod() @@ -150,12 +150,12 @@ def test_step8(): def test_step9(): - from blackbox.blackbox import ( + from graviton.blackbox import ( DryRunExecutor, DryRunProperty as DRProp, ExecutionMode, ) - from blackbox.invariant import Invariant + from graviton.invariant import Invariant from tests.clients import get_algod algod = get_algod() @@ -184,12 +184,12 @@ def test_step9(): @pytest.mark.parametrize("exercise", ["A", "B"]) def test_exercises(exercise): - from blackbox.blackbox import ( + from graviton.blackbox import ( DryRunExecutor, DryRunProperty as DRProp, ExecutionMode, ) - from blackbox.invariant import Invariant + from graviton.invariant import Invariant from tests.clients import get_algod algod = get_algod() From 443307355a4568ab21a318243f6b4fdf577a0830 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Tue, 29 Mar 2022 17:45:05 -0400 Subject: [PATCH 59/85] try to make do without requirements.txt --- Makefile | 5 ++++- pyproject.toml | 6 ------ requirements.txt | 2 -- setup.py | 1 + 4 files changed, 5 insertions(+), 9 deletions(-) delete mode 100644 requirements.txt diff --git a/Makefile b/Makefile index f5adca0d..d3d72ddb 100644 --- a/Makefile +++ b/Makefile @@ -1,9 +1,12 @@ ####### Universal ###### pip: - pip install -r requirements.txt pip install -e . +pip-test: pip + pip install -e.[test] + + unit-test: pytest -sv tests/unit diff --git a/pyproject.toml b/pyproject.toml index faf95a63..1d08fd34 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -2,12 +2,6 @@ requires = ["setuptools>=42"] build-backend = "setuptools.build_meta" -# Maybe this is the culprit for why I can't get a proper -# build in dependant repos? -# [build-system] -# requires = ["setuptools", "wheel"] -# build-backend = "setuptools.build_meta" - [metadata] name = "graviton" version = "0.0.1" diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 98d98a90..00000000 --- a/requirements.txt +++ /dev/null @@ -1,2 +0,0 @@ -. -pytest==7.1.1 diff --git a/setup.py b/setup.py index c1be37c4..26c0a809 100644 --- a/setup.py +++ b/setup.py @@ -13,6 +13,7 @@ author_email="pypiservice@algorand.com", python_requires=">=3.10", install_requires=["py-algorand-sdk", "tabulate==0.8.9"], + extras_require={"test": "pytest==7.1.1"}, classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", From e2f004a0c19248cf631b24848c786f700c989af7 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Tue, 29 Mar 2022 18:06:03 -0400 Subject: [PATCH 60/85] merge with head and get act to pass --- .github/workflows/build.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 34ff00a2..655f2e10 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -26,7 +26,7 @@ jobs: with: fetch-depth: 0 - name: Install pip dependencies - run: make pip-publish + run: make pip-test - name: build-and-test run: make unit-test run-integration-tests: @@ -75,4 +75,4 @@ jobs: - name: Setup integration test environment run: make pip-test unit-test - name: Run integration tests - run: make gh-blackbox \ No newline at end of file + run: make gh-blackbox From 43de4f6f9d3f659af68e0c51a6b78f204de31aea Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Tue, 29 Mar 2022 18:35:31 -0400 Subject: [PATCH 61/85] fix import --- README.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/README.md b/README.md index 51642fa6..1696c2be 100644 --- a/README.md +++ b/README.md @@ -103,7 +103,7 @@ When executing a dry run using `DryRunExecutor` you'll get back `DryRunInspecto **STEP 4**. Back to our $`x^2`$ example, and assuming the `teal` variable is defined [as above](#teal). You can run the following: ```python -from blackbox.blackbox import DryRunExecutor +from graviton.blackbox import DryRunExecutor from tests.clients import get_algod algod = get_algod() @@ -129,7 +129,7 @@ Some available _assertable properties_ are: * `error()` * `max_stack_height()` -See the [DryRunInspector class comment](./blackbox/blackbox.py#L387) for more assertable properties and details. +See the [DryRunInspector class comment](./blackbox/graviton.py#L387) for more assertable properties and details. ### Printing out the TEAL Stack Trace for a Failing Assertion @@ -137,7 +137,7 @@ See the [DryRunInspector class comment](./blackbox/blackbox.py#L387) for more as a handy report in the case of a failing assertion. Let's intentionally break the test case above by claiming that $`x^2 = x^3`$ for $`x=2`$ and print out this _report_ when our silly assertion fails. ```python -from blackbox.blackbox import DryRunExecutor +from graviton.blackbox import DryRunExecutor from tests.clients import get_algod algod = get_algod() @@ -227,7 +227,7 @@ Let's expand our investigation from a single dry-run to multiple runs or a **run **STEP 6**. Back to our $`x^2`$ example, here's how to generate a report with 1 row for each of the inputs `0, 1, ... , 15`: ```python -from blackbox.blackbox import DryRunExecutor, DryRunInspector +from graviton.blackbox import DryRunExecutor, DryRunInspector from tests.clients import get_algod algod = get_algod() @@ -238,7 +238,7 @@ print(csv) ``` Note that each element in the `inputs` array `(x,)` is itself a tuple as `args` given to a dry run execution need to be of type `Sequence` (remember, that these will be passed to a TEAL program which may take one, several, or no inputs at all). -At this point, you'll be able to look at your [dry run sequence results](./blackbox/blackbox.py#L752) and conduct some analysis. For the $`x^2`$ example, +At this point, you'll be able to look at your [dry run sequence results](./blackbox/graviton.py#L752) and conduct some analysis. For the $`x^2`$ example, after loading the CSV in Google sheets and reformating a bit it will look like: image @@ -277,7 +277,7 @@ execution independently, or use `DryRunExecutor`'s convenience methods `dryrun_a $`x \leq 100`$: ```python -from blackbox.blackbox import DryRunExecutor +from graviton.blackbox import DryRunExecutor from tests.clients import get_algod algod = get_algod() @@ -314,7 +314,7 @@ In the parlance of the TEAL Blackbox Toolkit, a set of such declarative assertio is called a **test scenario**. Scenarios are dict's containing two keys `inputs` and `invariants` and follow [certain conventions](./blackbox/invariant.py#L101). In particular: * **inputs** gives a list of tuples, each tuple representing the `args` to be fed into a single dry run execution -* **invariants** gives a dict that maps [DryRunProperty](./blackbox/blackbox.py#L25)'s to an invariant _predicate_ +* **invariants** gives a dict that maps [DryRunProperty](./blackbox/graviton.py#L25)'s to an invariant _predicate_ In English, letting $`x`$ be the input variable for our square function, the above **test scenario**: @@ -337,12 +337,12 @@ Declarative invariants make use of the following: To employ the declarative test scenario above write the following: ```python -from blackbox.blackbox import ( +from graviton.blackbox import ( DryRunExecutor, DryRunProperty as DRProp, ExecutionMode, ) -from blackbox.invariant import Invariant +from graviton.invariant import Invariant from tests.clients import get_algod algod = get_algod() From 72d14b8b01ddd48290313718f2a9b251ab312cd7 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Tue, 29 Mar 2022 18:44:36 -0400 Subject: [PATCH 62/85] bring our changes to the top of the .gitignore --- .gitignore | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/.gitignore b/.gitignore index 4b5d70de..237c0d4a 100644 --- a/.gitignore +++ b/.gitignore @@ -13,7 +13,10 @@ sandbox # VS Code detritus .vscode -##### github recommends for Python ##### +# IDE +.idea + +##### BEGIN: github recommends for Python ##### # Byte-compiled / optimized / DLL files __pycache__/ @@ -145,5 +148,4 @@ dmypy.json # Pyre type checker .pyre/ -# IDE -.idea +##### END: github recommends for Python ##### From 621435c6da410604ca9f3618afb0ba0e58372d40 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Tue, 29 Mar 2022 19:18:38 -0400 Subject: [PATCH 63/85] Update README.md --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 1696c2be..472c6733 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,9 @@ -# TEAL Blackbox Toolkit: Program Reporting and Testing via Dry Runs +# GRAVITON (aka the TEAL Blackbox Toolkit): Program Reporting and Testing via Dry Runs + +http://cds.cern.ch/record/2315186/files/scoap3-fulltext.pdf **NOTE: to get math formulas to render here using Chrome, add the [xhub extension](https://chrome.google.com/webstore/detail/xhub/anidddebgkllnnnnjfkmjcaallemhjee/related) and reload** From d64c640b60784ca8b209884122630486e958890f Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Tue, 29 Mar 2022 19:22:48 -0400 Subject: [PATCH 64/85] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 472c6733..f403476f 100644 --- a/README.md +++ b/README.md @@ -70,7 +70,7 @@ Even better, before making fine-grained assertions we'd like to get a sense of w **STEP 3**. Next, you'll need to figure out if your TEAL program should be a Logic Signature or an Application. Each of these program _modes_ has its merits, but we won't get into the pros/cons here. From a Blackbox Test's perspective, the main difference is how external arguments are handled. Logic sigs rely on the [arg opcode](https://developer.algorand.org/docs/get-details/dapps/avm/teal/opcodes/#arg-n) while apps rely on [txna ApplicationArgs i](https://developer.algorand.org/docs/get-details/dapps/avm/teal/opcodes/#txna-f-i). In our $`x^2`$ **logic sig** example, you can see on [line 2](./tests/teal/lsig_square.teal#L2) that the `arg` opcode is used. Because each argument opcode (`arg` versus `ApplicationArgs`) is mode-exclusive, any program that takes input will execute succesfully in _one mode only_. -**STEP 4**. Write the TEAL program that you want to test. You can inline the test as described here or follow the approach of `./blackbox/blackbox_test.py` and save under `./blackbox/teal`. So following the inline +**STEP 4**. Write the TEAL program that you want to test. You can inline the test as described here or follow the approach of `./tests/integration/blackbox_test.py` and save under `./tests/teal`. So following the inline appraoch we begin our TEAL Blackbox script with an inline teal source variable: ```python From 94651abdab23ff852d43ac895d49635d71ae74e0 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Tue, 29 Mar 2022 19:26:14 -0400 Subject: [PATCH 65/85] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index f403476f..a89763c6 100644 --- a/README.md +++ b/README.md @@ -240,7 +240,7 @@ print(csv) ``` Note that each element in the `inputs` array `(x,)` is itself a tuple as `args` given to a dry run execution need to be of type `Sequence` (remember, that these will be passed to a TEAL program which may take one, several, or no inputs at all). -At this point, you'll be able to look at your [dry run sequence results](./blackbox/graviton.py#L752) and conduct some analysis. For the $`x^2`$ example, +At this point, you'll be able to look at your [dry run sequence results](./graviton/blackbox.py#L752) and conduct some analysis. For the $`x^2`$ example, after loading the CSV in Google sheets and reformating a bit it will look like: image From 8579ad9c31f1987fe761c09496a07edb687afd4c Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Tue, 29 Mar 2022 19:35:04 -0400 Subject: [PATCH 66/85] Update Makefile --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index d3d72ddb..b9127820 100644 --- a/Makefile +++ b/Makefile @@ -50,4 +50,4 @@ gh-blackbox-smoke: blackbox-smoke-prefix gh-sandbox-test gh-blackbox: gh-blackbox-smoke integration-test -.PHONY: pip-publish pip-test unit-test gh-blackbox \ No newline at end of file +.PHONY: pip-publish pip-test unit-test gh-blackbox From af96c29456d45291611728288d95c60e8a987113 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Tue, 29 Mar 2022 19:35:14 -0400 Subject: [PATCH 67/85] Update Makefile --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index b9127820..c18e2513 100644 --- a/Makefile +++ b/Makefile @@ -21,7 +21,7 @@ integration-test: pytest -sv tests/integration -###### Mac Only ###### +###### Local Only ###### # assumes installations of pipx, build and tox via: # `pip install pipx; pipx install build; pipx install tox` From b18f2c491bcb446e7ce6635986cb5ab59faf3d08 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Tue, 29 Mar 2022 19:38:23 -0400 Subject: [PATCH 68/85] Update README.md --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index a89763c6..6b5ab543 100644 --- a/README.md +++ b/README.md @@ -131,7 +131,7 @@ Some available _assertable properties_ are: * `error()` * `max_stack_height()` -See the [DryRunInspector class comment](./blackbox/graviton.py#L387) for more assertable properties and details. +See the [DryRunInspector class comment](./graviton/blackbox.py#L387) for more assertable properties and details. ### Printing out the TEAL Stack Trace for a Failing Assertion @@ -313,10 +313,10 @@ scenario = { ``` In the parlance of the TEAL Blackbox Toolkit, a set of such declarative assertions -is called a **test scenario**. Scenarios are dict's containing two keys `inputs` and `invariants` and follow [certain conventions](./blackbox/invariant.py#L101). In particular: +is called a **test scenario**. Scenarios are dict's containing two keys `inputs` and `invariants` and follow [certain conventions](./graviton/invariant.py#L101). In particular: * **inputs** gives a list of tuples, each tuple representing the `args` to be fed into a single dry run execution -* **invariants** gives a dict that maps [DryRunProperty](./blackbox/graviton.py#L25)'s to an invariant _predicate_ +* **invariants** gives a dict that maps [DryRunProperty](./graviton/blackbox.py#L25)'s to an invariant _predicate_ In English, letting $`x`$ be the input variable for our square function, the above **test scenario**: From 948d438471e6942c4be7b20930367d62932de082 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Tue, 29 Mar 2022 19:42:11 -0400 Subject: [PATCH 69/85] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 6b5ab543..d3532d59 100644 --- a/README.md +++ b/README.md @@ -249,7 +249,7 @@ Pointing out some interesting results: * column `D` **Arg 00** has the input $`x`$ (it's the argument at index 0) * column `A` contains the **Run** number -* column `E` **top of stack** does stores the program's termination, i.e. $`x^2`$ +* column `E` **top of stack** it the value at program's termination, i.e. $`x^2`$ * column `B` **Status** of each runs **PASS**es _except for **Run 1** with **Arg 00** = 0_. (The first run **REJECT**s because $`0^2 = 0`$ and TEAL programs reject when the top of the stack is 0) * column `G` shows scratch slot **s@000** which stores the value of $`x`$ (except for the case $`x = 0`$ in which appears empty; in fact, slots always default to the zero value and an **artifact** of dry-runs is that they do not report when 0-values get stored into previously empty slots as no state change actually occurs) * column `F` **max stack height** is always 2. The final observation makes sense because there is no branching or looping in the program. From 10539ac817f4e71adb9e6a191ff50d8a60896ca0 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Tue, 29 Mar 2022 19:42:40 -0400 Subject: [PATCH 70/85] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index d3532d59..23932245 100644 --- a/README.md +++ b/README.md @@ -249,7 +249,7 @@ Pointing out some interesting results: * column `D` **Arg 00** has the input $`x`$ (it's the argument at index 0) * column `A` contains the **Run** number -* column `E` **top of stack** it the value at program's termination, i.e. $`x^2`$ +* column `E` **top of stack** is the value at program's termination, i.e. $`x^2`$ * column `B` **Status** of each runs **PASS**es _except for **Run 1** with **Arg 00** = 0_. (The first run **REJECT**s because $`0^2 = 0`$ and TEAL programs reject when the top of the stack is 0) * column `G` shows scratch slot **s@000** which stores the value of $`x`$ (except for the case $`x = 0`$ in which appears empty; in fact, slots always default to the zero value and an **artifact** of dry-runs is that they do not report when 0-values get stored into previously empty slots as no state change actually occurs) * column `F` **max stack height** is always 2. The final observation makes sense because there is no branching or looping in the program. From 815ce5f3962715f6ce4598f0d443d9194625bcc4 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Tue, 29 Mar 2022 19:46:57 -0400 Subject: [PATCH 71/85] i -> args --- README.md | 2 +- tests/integration/doc_examples_test.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 23932245..f4b4de51 100644 --- a/README.md +++ b/README.md @@ -354,7 +354,7 @@ scenario = { "invariants": { DRProp.stackTop: lambda args: args[0] ** 2, DRProp.maxStackHeight: 2, - DRProp.status: lambda i: "REJECT" if i[0] == 0 else "PASS", + DRProp.status: lambda args: "REJECT" if args[0] == 0 else "PASS", DRProp.finalScratch: lambda args: ({0: args[0]} if args[0] else {}), }, } diff --git a/tests/integration/doc_examples_test.py b/tests/integration/doc_examples_test.py index f7f34702..bf328ede 100644 --- a/tests/integration/doc_examples_test.py +++ b/tests/integration/doc_examples_test.py @@ -165,7 +165,7 @@ def test_step9(): "invariants": { DRProp.stackTop: lambda args: args[0] ** 2, DRProp.maxStackHeight: 2, - DRProp.status: lambda i: "REJECT" if i[0] == 0 else "PASS", + DRProp.status: lambda args: "REJECT" if args[0] == 0 else "PASS", DRProp.finalScratch: lambda args: ({0: args[0]} if args[0] else {}), }, } From 29f2f7e608e7991e12a1555cb4a85968c8268020 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Tue, 29 Mar 2022 22:51:25 -0400 Subject: [PATCH 72/85] per CR suggestions --- Makefile | 2 -- README.md | 4 ++-- graviton/invariant.py | 8 ++++---- tests/integration/blackbox_test.py | 24 ++++++++++++------------ tests/integration/doc_examples_test.py | 8 ++++---- 5 files changed, 22 insertions(+), 24 deletions(-) diff --git a/Makefile b/Makefile index c18e2513..099f005a 100644 --- a/Makefile +++ b/Makefile @@ -49,5 +49,3 @@ gh-sandbox-test: gh-blackbox-smoke: blackbox-smoke-prefix gh-sandbox-test gh-blackbox: gh-blackbox-smoke integration-test - -.PHONY: pip-publish pip-test unit-test gh-blackbox diff --git a/README.md b/README.md index f4b4de51..f8370eb3 100644 --- a/README.md +++ b/README.md @@ -368,8 +368,8 @@ inputs, invariants = Invariant.inputs_and_invariants( inspectors = DryRunExecutor.dryrun_logicsig_on_sequence(algod, teal, inputs) # Invariant assertions on sequence: -for property, invariant in invariants.items(): - invariant.validates(property, inputs, inspectors) +for dr_property, invariant in invariants.items(): + invariant.validates(dr_property, inputs, inspectors) ``` **STEP 10**. _**Deep Dive into Invariants via Exercises**_ diff --git a/graviton/invariant.py b/graviton/invariant.py index 47a1fac4..cfdb5e48 100644 --- a/graviton/invariant.py +++ b/graviton/invariant.py @@ -41,7 +41,7 @@ def expected(self, args: list) -> Union[str, int]: def validates( self, - property: DryRunProperty, + dr_property: DryRunProperty, inputs: List[list], inspectors: List[DryRunInspector], ): @@ -51,12 +51,12 @@ def validates( ), f"inputs (len={N}) and dryrun responses (len={len(inspectors)}) must have the same length" assert isinstance( - property, DryRunProperty - ), f"invariants types must be DryRunProperty's but got [{property}] which is a {type(property)}" + dr_property, DryRunProperty + ), f"invariants types must be DryRunProperty's but got [{dr_property}] which is a {type(dr_property)}" for i, args in enumerate(inputs): res = inspectors[i] - actual = res.dig(property) + actual = res.dig(dr_property) ok, msg = self(args, actual) assert ok, res.report(args, msg, row=i + 1) diff --git a/tests/integration/blackbox_test.py b/tests/integration/blackbox_test.py index f69cfbff..eeae9f69 100644 --- a/tests/integration/blackbox_test.py +++ b/tests/integration/blackbox_test.py @@ -382,17 +382,17 @@ def test_app_with_report(filebase: str): # 4. Sequential invariants (if provided any) for i, type_n_invariant in enumerate(invariants.items()): - property, invariant = type_n_invariant + dr_property, invariant = type_n_invariant assert mode_has_property( - mode, property - ), f"assert_type {property} is not applicable for {mode}. Please REMOVE or MODIFY" + mode, dr_property + ), f"assert_type {dr_property} is not applicable for {mode}. Please REMOVE or MODIFY" - invariant = Invariant(invariant, name=f"{case_name}[{i}]@{mode}-{property}") + invariant = Invariant(invariant, name=f"{case_name}[{i}]@{mode}-{dr_property}") print( - f"{i+1}. Semantic invariant for {case_name}-{mode}: {property} <<{invariant}>>" + f"{i+1}. Semantic invariant for {case_name}-{mode}: {dr_property} <<{invariant}>>" ) - invariant.validates(property, inputs, dryrun_results) + invariant.validates(dr_property, inputs, dryrun_results) # NOTE: logic sig dry runs are missing some information when compared with app dry runs. @@ -574,14 +574,14 @@ def test_logicsig_with_report(filebase: str): # 4. Sequential invariants (if provided any) for i, type_n_invariant in enumerate(invariants.items()): - property, invariant = type_n_invariant + dr_property, invariant = type_n_invariant assert mode_has_property( - mode, property - ), f"assert_type {property} is not applicable for {mode}. Please REMOVE of MODIFY" + mode, dr_property + ), f"assert_type {dr_property} is not applicable for {mode}. Please REMOVE of MODIFY" - invariant = Invariant(invariant, name=f"{case_name}[{i}]@{mode}-{property}") + invariant = Invariant(invariant, name=f"{case_name}[{i}]@{mode}-{dr_property}") print( - f"{i+1}. Semantic invariant for {case_name}-{mode}: {property} <<{invariant}>>" + f"{i+1}. Semantic invariant for {case_name}-{mode}: {dr_property} <<{invariant}>>" ) - invariant.validates(property, inputs, dryrun_results) + invariant.validates(dr_property, inputs, dryrun_results) diff --git a/tests/integration/doc_examples_test.py b/tests/integration/doc_examples_test.py index bf328ede..3ccda6f9 100644 --- a/tests/integration/doc_examples_test.py +++ b/tests/integration/doc_examples_test.py @@ -178,8 +178,8 @@ def test_step9(): inspectors = DryRunExecutor.dryrun_logicsig_on_sequence(algod, teal, inputs) # Invariant assertions on sequence: - for property, invariant in invariants.items(): - invariant.validates(property, inputs, inspectors) + for dr_property, invariant in invariants.items(): + invariant.validates(dr_property, inputs, inspectors) @pytest.mark.parametrize("exercise", ["A", "B"]) @@ -217,5 +217,5 @@ def test_exercises(exercise): inspectors = DryRunExecutor.dryrun_logicsig_on_sequence(algod, teal, inputs) # Invariant assertions on sequence: - for property, invariant in invariants.items(): - invariant.validates(property, inputs, inspectors) + for dr_property, invariant in invariants.items(): + invariant.validates(dr_property, inputs, inspectors) From ef4987c9243b2a21fa05e08c036ceb7ba34f3301 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Tue, 29 Mar 2022 23:04:33 -0400 Subject: [PATCH 73/85] black --- Makefile | 4 ++++ setup.py | 2 +- tests/integration/blackbox_test.py | 24 ++++++++++++------------ tests/integration/doc_examples_test.py | 8 ++++---- tests/unit/sanity_test.py | 1 - 5 files changed, 21 insertions(+), 18 deletions(-) diff --git a/Makefile b/Makefile index 099f005a..919d781e 100644 --- a/Makefile +++ b/Makefile @@ -1,11 +1,15 @@ ####### Universal ###### pip: + pip install --upgrade pip pip install -e . pip-test: pip pip install -e.[test] +black: + black . + unit-test: pytest -sv tests/unit diff --git a/setup.py b/setup.py index 26c0a809..aed4691a 100644 --- a/setup.py +++ b/setup.py @@ -13,7 +13,7 @@ author_email="pypiservice@algorand.com", python_requires=">=3.10", install_requires=["py-algorand-sdk", "tabulate==0.8.9"], - extras_require={"test": "pytest==7.1.1"}, + extras_require={"test": ["pytest==7.1.1", "black==22.3.0", "flake8==4.0.1"]}, classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", diff --git a/tests/integration/blackbox_test.py b/tests/integration/blackbox_test.py index eeae9f69..3710eb0a 100644 --- a/tests/integration/blackbox_test.py +++ b/tests/integration/blackbox_test.py @@ -122,19 +122,19 @@ def prop_assert(dr_resp, actual, expected): prop_assert(lsig_res, lsig_res.cost(), None) prop_assert(app_res, app_res.last_log(), None) - prop_assert(app_log_res, app_log_res.last_log(), (x**2).to_bytes(8, "big").hex()) - prop_assert(app_log_res, app_log_res.last_log(), Encoder.hex(x**2)) + prop_assert(app_log_res, app_log_res.last_log(), (x ** 2).to_bytes(8, "big").hex()) + prop_assert(app_log_res, app_log_res.last_log(), Encoder.hex(x ** 2)) prop_assert(lsig_res, lsig_res.last_log(), None) prop_assert(app_res, app_res.final_scratch(), {0: x}) - prop_assert(app_log_res, app_log_res.final_scratch(), {0: x, 1: x**2}) + prop_assert(app_log_res, app_log_res.final_scratch(), {0: x, 1: x ** 2}) prop_assert(lsig_res, lsig_res.final_scratch(), {0: x}) - prop_assert(bad_lsig_res, bad_lsig_res.final_scratch(), {0: x, 1: x**2}) + prop_assert(bad_lsig_res, bad_lsig_res.final_scratch(), {0: x, 1: x ** 2}) - prop_assert(app_res, app_res.stack_top(), x**2) - prop_assert(app_log_res, app_log_res.stack_top(), x**2) - prop_assert(lsig_res, lsig_res.stack_top(), x**2) - prop_assert(bad_lsig_res, bad_lsig_res.stack_top(), Encoder.hex0x(x**2)) + prop_assert(app_res, app_res.stack_top(), x ** 2) + prop_assert(app_log_res, app_log_res.stack_top(), x ** 2) + prop_assert(lsig_res, lsig_res.stack_top(), x ** 2) + prop_assert(bad_lsig_res, bad_lsig_res.stack_top(), Encoder.hex0x(x ** 2)) prop_assert(app_res, app_res.max_stack_height(), 2) prop_assert(app_log_res, app_log_res.max_stack_height(), 2) @@ -181,11 +181,11 @@ def prop_assert(dr_resp, actual, expected): # since only a single input, just assert a constant in each case "invariants": { DRProp.cost: 11, - DRProp.lastLog: Encoder.hex(2**10), + DRProp.lastLog: Encoder.hex(2 ** 10), # dicts have a special meaning as invariants. So in the case of "finalScratch" # which is supposed to _ALSO_ output a dict, we need to use a lambda as a work-around - DRProp.finalScratch: lambda _: {0: 2**10}, - DRProp.stackTop: 2**10, + DRProp.finalScratch: lambda _: {0: 2 ** 10}, + DRProp.stackTop: 2 ** 10, DRProp.maxStackHeight: 2, DRProp.status: "PASS", DRProp.passed: True, @@ -404,7 +404,7 @@ def test_app_with_report(filebase: str): # DRA.cost: 11, # DRA.lastLog: lightly_encode_output(2 ** 10, logs=True), DRProp.finalScratch: lambda _: {}, - DRProp.stackTop: 2**10, + DRProp.stackTop: 2 ** 10, DRProp.maxStackHeight: 2, DRProp.status: "PASS", DRProp.passed: True, diff --git a/tests/integration/doc_examples_test.py b/tests/integration/doc_examples_test.py index 3ccda6f9..020f1783 100644 --- a/tests/integration/doc_examples_test.py +++ b/tests/integration/doc_examples_test.py @@ -26,7 +26,7 @@ def test_step4(): args = (x,) inspector = DryRunExecutor.dryrun_logicsig(algod, teal, args) assert inspector.status() == "PASS" - assert inspector.stack_top() == x**2 + assert inspector.stack_top() == x ** 2 print(inspector.stack_top()) print(inspector.last_log()) @@ -53,7 +53,7 @@ def test_step5(): ) # This one's absurd! x^3 != x^2 - expected, actual = x**3, inspector.stack_top() + expected, actual = x ** 3, inspector.stack_top() # wrap for test purposes only with pytest.raises(AssertionError) as ae: @@ -127,7 +127,7 @@ def test_step6_and_7(): for i, inspector in enumerate(run_results): args = inputs[i] x = args[0] - inspector.stack_top() == x**2 + inspector.stack_top() == x ** 2 inspector.max_stack_height() == 2 inspector.status() == ("REJECT" if x == 0 else "PASS") inspector.final_scratch() == ({} if x == 0 else {0: x}) @@ -143,7 +143,7 @@ def test_step8(): for i, inspector in enumerate(dryrun_results): args = inputs[i] x = args[0] - assert inspector.stack_top() == x**2 + assert inspector.stack_top() == x ** 2 assert inspector.max_stack_height() == 2 assert inspector.status() == ("REJECT" if x == 0 else "PASS") assert inspector.final_scratch() == ({} if x == 0 else {0: x}) diff --git a/tests/unit/sanity_test.py b/tests/unit/sanity_test.py index 697d490f..f4455d4e 100644 --- a/tests/unit/sanity_test.py +++ b/tests/unit/sanity_test.py @@ -1,3 +1,2 @@ def test_noop(): assert True, "oh no" - From cf99b0e789f106e01c8f17106c527ae919d4b856 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Tue, 29 Mar 2022 23:45:39 -0400 Subject: [PATCH 74/85] flake8 --- .github/workflows/build.yml | 6 ++---- Makefile | 8 ++++++- graviton/blackbox.py | 30 +++++++++++++------------- graviton/dryrun.py | 5 ++--- graviton/invariant.py | 2 +- setup.cfg | 2 ++ tests/integration/blackbox_test.py | 24 ++++++++++----------- tests/integration/doc_examples_test.py | 8 +++---- 8 files changed, 45 insertions(+), 40 deletions(-) create mode 100644 setup.cfg diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 655f2e10..aa63c046 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -25,10 +25,8 @@ jobs: uses: actions/checkout@v2 with: fetch-depth: 0 - - name: Install pip dependencies - run: make pip-test - - name: build-and-test - run: make unit-test + - name: pip, lint, and units + run: make build-and-test run-integration-tests: runs-on: ubuntu-20.04 steps: diff --git a/Makefile b/Makefile index 919d781e..f4d17482 100644 --- a/Makefile +++ b/Makefile @@ -7,13 +7,19 @@ pip: pip-test: pip pip install -e.[test] +flake8: + flake8 graviton tests + black: - black . + black --check . +lint: black flake8 unit-test: pytest -sv tests/unit +build-and-test: pip-test lint unit-test + blackbox-smoke-prefix: echo "hello blackbox!" pwd diff --git a/graviton/blackbox.py b/graviton/blackbox.py index ab776c90..1016e177 100644 --- a/graviton/blackbox.py +++ b/graviton/blackbox.py @@ -75,7 +75,7 @@ def __str__(self) -> str: if self.hide_empty and self.is_empty(): return "" - assert self.is_b is not None, f"can't handle StackVariable with empty type" + assert self.is_b is not None, "can't handle StackVariable with empty type" return f"0x{b64decode(self.b).hex()}" if self.is_b else str(self.i) def as_python_type(self) -> Union[int, str, None]: @@ -456,28 +456,28 @@ def from_single_response(cls, dryrun_resp: dict) -> "DryRunInspector": return cls(dryrun_resp, 0) - def dig(self, property: DryRunProperty, **kwargs: Dict[str, Any]) -> Any: + def dig(self, dr_property: DryRunProperty, **kwargs: Dict[str, Any]) -> Any: """Main router for assertable properties""" txn = self.txn bbr = self.black_box_results assert mode_has_property( - self.mode, property - ), f"{self.mode} cannot handle dig information from txn for assertion type {property}" + self.mode, dr_property + ), f"{self.mode} cannot handle dig information from txn for assertion type {dr_property}" - if property == DryRunProperty.cost: + if dr_property == DryRunProperty.cost: return txn["cost"] - if property == DryRunProperty.lastLog: + if dr_property == DryRunProperty.lastLog: last_log = txn.get("logs", [None])[-1] if last_log is None: return last_log return b64decode(last_log).hex() - if property == DryRunProperty.finalScratch: + if dr_property == DryRunProperty.finalScratch: return {k: v.as_python_type() for k, v in bbr.final_scratch_state.items()} - if property == DryRunProperty.stackTop: + if dr_property == DryRunProperty.stackTop: trace = self.extracts["trace"] stack = trace[-1]["stack"] if not stack: @@ -485,19 +485,19 @@ def dig(self, property: DryRunProperty, **kwargs: Dict[str, Any]) -> Any: tv = TealVal.from_scratch(stack[-1]) return tv.as_python_type() - if property == DryRunProperty.maxStackHeight: + if dr_property == DryRunProperty.maxStackHeight: return max(len(t["stack"]) for t in self.extracts["trace"]) - if property == DryRunProperty.status: + if dr_property == DryRunProperty.status: return self.extracts["status"] - if property == DryRunProperty.passed: + if dr_property == DryRunProperty.passed: return self.extracts["status"] == "PASS" - if property == DryRunProperty.rejected: + if dr_property == DryRunProperty.rejected: return self.extracts["status"] == "REJECT" - if property == DryRunProperty.error: + if dr_property == DryRunProperty.error: contains = kwargs.get("contains") ok, msg = assert_error( self.parent_dryrun_response, contains=contains, enforce=False @@ -505,12 +505,12 @@ def dig(self, property: DryRunProperty, **kwargs: Dict[str, Any]) -> Any: # when there WAS an error, we return its msg, else False return ok - if property == DryRunProperty.errorMessage: + if dr_property == DryRunProperty.errorMessage: _, msg = assert_no_error(self.parent_dryrun_response, enforce=False) # when there was no error, we return None, else return its msg return msg if msg else None - raise Exception(f"Unknown assert_type {property}") + raise Exception(f"Unknown assert_type {dr_property}") def cost(self) -> Optional[int]: """Assertable property for the total opcode cost that was used during dry run execution diff --git a/graviton/dryrun.py b/graviton/dryrun.py index 66a81265..73a0cef2 100644 --- a/graviton/dryrun.py +++ b/graviton/dryrun.py @@ -1,8 +1,7 @@ import base64 import binascii -import string from dataclasses import dataclass -import pytest +import string from typing import List, Union from algosdk.constants import payment_txn, appcall_txn @@ -43,7 +42,7 @@ class App: global_state: List[TealKeyValue] = None -#### LIGHTWEIGHT ASSERTIONS FOR RE-USE #### +# ### LIGHTWEIGHT ASSERTIONS FOR RE-USE ### # def _msg_if(msg): return "" if msg is None else f": {msg}" diff --git a/graviton/invariant.py b/graviton/invariant.py index cfdb5e48..07c43efd 100644 --- a/graviton/invariant.py +++ b/graviton/invariant.py @@ -144,7 +144,7 @@ def inputs_and_invariants( invariants = {} predicates = scenario.get("invariants", {}) if predicates: - assert isinstance(predicates, dict), f"invariants must be a dict" + assert isinstance(predicates, dict), "invariants must be a dict" for key, predicate in predicates.items(): assert isinstance(key, DryRunProperty) and mode_has_property( diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 00000000..9214cb9e --- /dev/null +++ b/setup.cfg @@ -0,0 +1,2 @@ +[flake8] +ignore = E501, W503 \ No newline at end of file diff --git a/tests/integration/blackbox_test.py b/tests/integration/blackbox_test.py index 3710eb0a..eeae9f69 100644 --- a/tests/integration/blackbox_test.py +++ b/tests/integration/blackbox_test.py @@ -122,19 +122,19 @@ def prop_assert(dr_resp, actual, expected): prop_assert(lsig_res, lsig_res.cost(), None) prop_assert(app_res, app_res.last_log(), None) - prop_assert(app_log_res, app_log_res.last_log(), (x ** 2).to_bytes(8, "big").hex()) - prop_assert(app_log_res, app_log_res.last_log(), Encoder.hex(x ** 2)) + prop_assert(app_log_res, app_log_res.last_log(), (x**2).to_bytes(8, "big").hex()) + prop_assert(app_log_res, app_log_res.last_log(), Encoder.hex(x**2)) prop_assert(lsig_res, lsig_res.last_log(), None) prop_assert(app_res, app_res.final_scratch(), {0: x}) - prop_assert(app_log_res, app_log_res.final_scratch(), {0: x, 1: x ** 2}) + prop_assert(app_log_res, app_log_res.final_scratch(), {0: x, 1: x**2}) prop_assert(lsig_res, lsig_res.final_scratch(), {0: x}) - prop_assert(bad_lsig_res, bad_lsig_res.final_scratch(), {0: x, 1: x ** 2}) + prop_assert(bad_lsig_res, bad_lsig_res.final_scratch(), {0: x, 1: x**2}) - prop_assert(app_res, app_res.stack_top(), x ** 2) - prop_assert(app_log_res, app_log_res.stack_top(), x ** 2) - prop_assert(lsig_res, lsig_res.stack_top(), x ** 2) - prop_assert(bad_lsig_res, bad_lsig_res.stack_top(), Encoder.hex0x(x ** 2)) + prop_assert(app_res, app_res.stack_top(), x**2) + prop_assert(app_log_res, app_log_res.stack_top(), x**2) + prop_assert(lsig_res, lsig_res.stack_top(), x**2) + prop_assert(bad_lsig_res, bad_lsig_res.stack_top(), Encoder.hex0x(x**2)) prop_assert(app_res, app_res.max_stack_height(), 2) prop_assert(app_log_res, app_log_res.max_stack_height(), 2) @@ -181,11 +181,11 @@ def prop_assert(dr_resp, actual, expected): # since only a single input, just assert a constant in each case "invariants": { DRProp.cost: 11, - DRProp.lastLog: Encoder.hex(2 ** 10), + DRProp.lastLog: Encoder.hex(2**10), # dicts have a special meaning as invariants. So in the case of "finalScratch" # which is supposed to _ALSO_ output a dict, we need to use a lambda as a work-around - DRProp.finalScratch: lambda _: {0: 2 ** 10}, - DRProp.stackTop: 2 ** 10, + DRProp.finalScratch: lambda _: {0: 2**10}, + DRProp.stackTop: 2**10, DRProp.maxStackHeight: 2, DRProp.status: "PASS", DRProp.passed: True, @@ -404,7 +404,7 @@ def test_app_with_report(filebase: str): # DRA.cost: 11, # DRA.lastLog: lightly_encode_output(2 ** 10, logs=True), DRProp.finalScratch: lambda _: {}, - DRProp.stackTop: 2 ** 10, + DRProp.stackTop: 2**10, DRProp.maxStackHeight: 2, DRProp.status: "PASS", DRProp.passed: True, diff --git a/tests/integration/doc_examples_test.py b/tests/integration/doc_examples_test.py index 020f1783..3ccda6f9 100644 --- a/tests/integration/doc_examples_test.py +++ b/tests/integration/doc_examples_test.py @@ -26,7 +26,7 @@ def test_step4(): args = (x,) inspector = DryRunExecutor.dryrun_logicsig(algod, teal, args) assert inspector.status() == "PASS" - assert inspector.stack_top() == x ** 2 + assert inspector.stack_top() == x**2 print(inspector.stack_top()) print(inspector.last_log()) @@ -53,7 +53,7 @@ def test_step5(): ) # This one's absurd! x^3 != x^2 - expected, actual = x ** 3, inspector.stack_top() + expected, actual = x**3, inspector.stack_top() # wrap for test purposes only with pytest.raises(AssertionError) as ae: @@ -127,7 +127,7 @@ def test_step6_and_7(): for i, inspector in enumerate(run_results): args = inputs[i] x = args[0] - inspector.stack_top() == x ** 2 + inspector.stack_top() == x**2 inspector.max_stack_height() == 2 inspector.status() == ("REJECT" if x == 0 else "PASS") inspector.final_scratch() == ({} if x == 0 else {0: x}) @@ -143,7 +143,7 @@ def test_step8(): for i, inspector in enumerate(dryrun_results): args = inputs[i] x = args[0] - assert inspector.stack_top() == x ** 2 + assert inspector.stack_top() == x**2 assert inspector.max_stack_height() == 2 assert inspector.status() == ("REJECT" if x == 0 else "PASS") assert inspector.final_scratch() == ({} if x == 0 else {0: x}) From 4802591ed800e8cef268dac14b79bc8ea5fc8b15 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Wed, 30 Mar 2022 00:12:26 -0400 Subject: [PATCH 75/85] Update setup.cfg --- setup.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index 9214cb9e..a6f727b8 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,2 +1,2 @@ [flake8] -ignore = E501, W503 \ No newline at end of file +ignore = E501, W503 From 54c2b609ce0112e7238f9779fc05c3a039d07185 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Wed, 30 Mar 2022 00:13:59 -0400 Subject: [PATCH 76/85] per CR suggestions --- .github/workflows/build.yml | 2 +- README.md | 2 ++ graviton/dryrun.py | 2 +- setup.py | 2 +- tests/clients.py | 8 -------- 5 files changed, 5 insertions(+), 11 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index aa63c046..0e45df2a 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -18,7 +18,7 @@ jobs: container: python:${{ matrix.python }} strategy: matrix: - python: [ "3.10" ] + python: [ "3.8", "3.9", "3.10" ] steps: - run: python3 --version - name: Check out code diff --git a/README.md b/README.md index f8370eb3..30aee645 100644 --- a/README.md +++ b/README.md @@ -7,6 +7,8 @@ **NOTE: to get math formulas to render here using Chrome, add the [xhub extension](https://chrome.google.com/webstore/detail/xhub/anidddebgkllnnnnjfkmjcaallemhjee/related) and reload** +**DISCLAIMER**: Graviton is subject to change and makes no backwards compatability guarantees. + ## Blackbox Testing Howto ### What is TEAL Blackbox Testing? diff --git a/graviton/dryrun.py b/graviton/dryrun.py index 73a0cef2..7ce822f0 100644 --- a/graviton/dryrun.py +++ b/graviton/dryrun.py @@ -112,7 +112,6 @@ def assert_status(status, txn_index, msg, txns_res, enforce=True): def assert_error(drr, contains=None, txn_index=None, msg=None, enforce=True): error = DryRunHelper.find_error(drr, txn_index=txn_index) ok = bool(error) - result = None if not ok: # the expected error did NOT occur result = f"expected truthy error but got {error}" + _msg_if(msg) if enforce: @@ -190,6 +189,7 @@ def assert_local_state_contains(addr, delta_value, txn_index, txns_res, msg=None and txn_res["local-deltas"] is not None and len(txn_res["local-deltas"]) > 0 ): + addr_found = False for local_delta in txn_res["local-deltas"]: addr_found = False if local_delta["address"] == addr: diff --git a/setup.py b/setup.py index aed4691a..77fd7bbb 100644 --- a/setup.py +++ b/setup.py @@ -11,7 +11,7 @@ long_description=long_description, author="Algorand", author_email="pypiservice@algorand.com", - python_requires=">=3.10", + python_requires=">=3.8", install_requires=["py-algorand-sdk", "tabulate==0.8.9"], extras_require={"test": ["pytest==7.1.1", "black==22.3.0", "flake8==4.0.1"]}, classifiers=[ diff --git a/tests/clients.py b/tests/clients.py index 7b039143..012bb8bd 100644 --- a/tests/clients.py +++ b/tests/clients.py @@ -11,11 +11,3 @@ def get_algod() -> AlgodClient: return AlgodClient(DEVNET_TOKEN, f"http://localhost:{ALGOD_PORT}") - - -# def get_kmd() -> KMDClient: -# return KMDClient(DEVNET_TOKEN, f"http://localhost:{KMD_PORT}") - - -# def get_indexer() -> IndexerClient: -# return IndexerClient(DEVNET_TOKEN, f"http://localhost:{INDEXER_PORT}") From 73938804c61d5d640c7734b49bf54dfe2044e3a8 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Wed, 30 Mar 2022 00:25:26 -0400 Subject: [PATCH 77/85] remove commented out code --- tests/clients.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/clients.py b/tests/clients.py index 012bb8bd..2c7f220b 100644 --- a/tests/clients.py +++ b/tests/clients.py @@ -5,8 +5,6 @@ DEVNET_TOKEN = "a" * 64 ALGOD_PORT = 4001 -# KMD_PORT = 4002 -# INDEXER_PORT = 8980 def get_algod() -> AlgodClient: From be41d271f5d08b1132cd8ff9ecf7d2608ab88501 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Wed, 30 Mar 2022 00:27:10 -0400 Subject: [PATCH 78/85] delete commented out code --- tests/clients.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/tests/clients.py b/tests/clients.py index 2c7f220b..e5a33c6f 100644 --- a/tests/clients.py +++ b/tests/clients.py @@ -1,8 +1,5 @@ from algosdk.v2client.algod import AlgodClient -# from algosdk.kmd import KMDClient -# from algosdk.v2client.indexer import IndexerClient - DEVNET_TOKEN = "a" * 64 ALGOD_PORT = 4001 From b0533b7678aca82dc471827e74f330bfb6968c98 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Wed, 30 Mar 2022 14:05:07 -0400 Subject: [PATCH 79/85] changelog with versioning proposal --- CHANGELOG.md | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) create mode 100644 CHANGELOG.md diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 00000000..10aa9d96 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,24 @@ +# Changelog + +## Versioning Legend + +Versioning is in Animal Emoji Lexicographical Order (AELO). For example: + +1. 🦙 (Alpaca) +2. 🐗 (Boar) +3. 🐈 (Cat) + +... etc ... + +## Tagging Cheatsheet + +* create an annotated tag: + * `git tag -a 🦙 -m "productionize graviton" && git push origin 🦙` +* get tag details: + * `git show 🦙` + +## 🦙 (Alpaca) + +### Added + +* Basic functionality From 537767a6d6aa1290782068dc00960cf27240f6cf Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Wed, 30 Mar 2022 14:12:27 -0400 Subject: [PATCH 80/85] sign the tag too! --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 10aa9d96..29ad3705 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -13,7 +13,7 @@ Versioning is in Animal Emoji Lexicographical Order (AELO). For example: ## Tagging Cheatsheet * create an annotated tag: - * `git tag -a 🦙 -m "productionize graviton" && git push origin 🦙` + * `git tag -as 🦙 -m "productionize graviton" && git push origin 🦙` * get tag details: * `git show 🦙` From 7a06838a52b35173744d0f605efbd4c583f85eff Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Wed, 30 Mar 2022 15:17:13 -0400 Subject: [PATCH 81/85] try 3 python version of integration tests on github --- .github/workflows/build.yml | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 0e45df2a..0690fc7e 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -29,6 +29,10 @@ jobs: run: make build-and-test run-integration-tests: runs-on: ubuntu-20.04 + # container: python:${{ matrix.python }} + strategy: + matrix: + python: [ "3.8", "3.9", "3.10" ] steps: - name: Check out code uses: actions/checkout@v2 @@ -36,11 +40,13 @@ jobs: fetch-depth: 0 - uses: actions/setup-python@v3 with: - python-version: "${{ env.PYTHON_VERSION }}" + python-version: "${{ matrix.python }}" + # python-version: "${{ env.PYTHON_VERSION }}" - name: Test Python version + # expected="${{ env.PYTHON_VERSION }}" run: | installed="$(python --version)" - expected="${{ env.PYTHON_VERSION }}" + expected="${{ matrix.python }}" echo $installed [[ $installed =~ "Python ${expected}" ]] && echo "Configured Python" || (echo "Failed to configure Python" && exit 1) - name: Install required os level applications From 1c110f3408759304f2931431102f1ee2010953c3 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Wed, 30 Mar 2022 16:27:07 -0400 Subject: [PATCH 82/85] per CR suggestions and remove (confusing and unused) PYTHON_VERSION --- .github/workflows/build.yml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 0690fc7e..9281ae23 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -8,7 +8,6 @@ on: - main env: - PYTHON_VERSION: "3.10" # Duplicated in `build-test` due to Workflow limitations. SANDBOX_CONFIG: dev SANDBOX_GENESIS: genesis/dev/genesis.json @@ -29,7 +28,6 @@ jobs: run: make build-and-test run-integration-tests: runs-on: ubuntu-20.04 - # container: python:${{ matrix.python }} strategy: matrix: python: [ "3.8", "3.9", "3.10" ] @@ -41,9 +39,7 @@ jobs: - uses: actions/setup-python@v3 with: python-version: "${{ matrix.python }}" - # python-version: "${{ env.PYTHON_VERSION }}" - name: Test Python version - # expected="${{ env.PYTHON_VERSION }}" run: | installed="$(python --version)" expected="${{ matrix.python }}" From 4443feadaf79330e2976cca0febabd7ad6c54e3d Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Wed, 30 Mar 2022 16:59:12 -0400 Subject: [PATCH 83/85] per CR suggestions --- setup.cfg => .flake8 | 0 Makefile | 5 ++--- setup.py | 2 +- 3 files changed, 3 insertions(+), 4 deletions(-) rename setup.cfg => .flake8 (100%) diff --git a/setup.cfg b/.flake8 similarity index 100% rename from setup.cfg rename to .flake8 diff --git a/Makefile b/Makefile index f4d17482..009010a1 100644 --- a/Makefile +++ b/Makefile @@ -1,11 +1,10 @@ ####### Universal ###### pip: - pip install --upgrade pip pip install -e . -pip-test: pip - pip install -e.[test] +pip-development: pip + pip install -e.[development] flake8: flake8 graviton tests diff --git a/setup.py b/setup.py index 77fd7bbb..b56da932 100644 --- a/setup.py +++ b/setup.py @@ -13,7 +13,7 @@ author_email="pypiservice@algorand.com", python_requires=">=3.8", install_requires=["py-algorand-sdk", "tabulate==0.8.9"], - extras_require={"test": ["pytest==7.1.1", "black==22.3.0", "flake8==4.0.1"]}, + extras_require={"development": ["pytest==7.1.1", "black==22.3.0", "flake8==4.0.1"]}, classifiers=[ "Programming Language :: Python :: 3", "License :: OSI Approved :: MIT License", From 0f49517600fd1fd3c9f08ff99899cc0b08dc4119 Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Wed, 30 Mar 2022 17:02:06 -0400 Subject: [PATCH 84/85] yes, github actions depend on Makefile --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 9281ae23..8690ef80 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -73,6 +73,6 @@ jobs: with: config: ${{ env.SANDBOX_CONFIG }} - name: Setup integration test environment - run: make pip-test unit-test + run: make pip-development unit-test - name: Run integration tests run: make gh-blackbox From ba0315185956821e8448065fae784a5d2566985d Mon Sep 17 00:00:00 2001 From: Zeph Grunschlag Date: Wed, 30 Mar 2022 17:05:57 -0400 Subject: [PATCH 85/85] bad ref --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 009010a1..5b9bc379 100644 --- a/Makefile +++ b/Makefile @@ -17,7 +17,7 @@ lint: black flake8 unit-test: pytest -sv tests/unit -build-and-test: pip-test lint unit-test +build-and-test: pip-development lint unit-test blackbox-smoke-prefix: echo "hello blackbox!"