From 2efe049dfc1a7b2dfe58a9245947d0c3f4e2c240 Mon Sep 17 00:00:00 2001 From: perama-v <83961755+perama-v@users.noreply.github.com> Date: Mon, 13 Feb 2023 12:22:52 +1100 Subject: [PATCH] add mdbook --- .dockerignore | 1 + .github/workflows/book.yml | 70 +++++++++++ README.md | 4 +- book/.gitignore | 4 + book/README.md | 22 ++++ book/book.toml | 17 +++ book/src/SUMMARY.md | 64 ++++++++++ book/src/developers/README.md | 13 ++ book/src/developers/architecture/README.md | 8 ++ book/src/developers/architecture/database.md | 33 +++++ .../developers/architecture/process_flow.md | 109 ++++++++++++++++ book/src/developers/architecture/testing.md | 35 ++++++ .../src/developers/architecture/workspaces.md | 88 +++++++++++++ book/src/developers/contributing/README.md | 3 + book/src/developers/contributing/book.md | 104 +++++++++++++++ .../src/developers/contributing/git/README.md | 4 + .../contributing/git/code_review.md | 45 +++++++ .../developers/contributing/git/commits.md | 34 +++++ .../git/fetching_pull_requests.md | 36 ++++++ .../developers/contributing/git/merging.md | 5 + .../contributing/git/pull_requests.md | 29 +++++ .../developers/contributing/git/rebasing.md | 5 + .../contributing/git/release_notes.md | 3 + .../contributing/releases/README.md | 3 + .../contributing/releases/generation.md | 29 +++++ .../contributing/releases/release_notes.md | 14 +++ .../contributing/releases/versioning.md | 5 + .../developers/contributing/rust/README.md | 4 + .../developers/contributing/rust/comments.md | 4 + .../contributing/rust/error_handling.md | 9 ++ .../developers/contributing/rust/imports.md | 7 ++ .../developers/contributing/rust/logging.md | 15 +++ .../src/developers/contributing/rust/style.md | 10 ++ book/src/developers/contributing/tests.md | 7 ++ book/src/developers/core_concepts/README.md | 4 + .../developers/core_concepts/archive_nodes.md | 105 ++++++++++++++++ book/src/developers/core_concepts/bridge.md | 20 +++ .../src/developers/core_concepts/chain_tip.md | 13 ++ .../cryptographic_accumulator.md | 118 ++++++++++++++++++ .../developers/core_concepts/finding_peers.md | 110 ++++++++++++++++ book/src/developers/developer_stories.md | 43 +++++++ book/src/developers/goals.md | 24 ++++ book/src/developers/progress_status.md | 15 +++ book/src/developers/protocols/README.md | 7 ++ book/src/developers/protocols/discovery.md | 34 +++++ .../src/developers/protocols/json_rpc.md | 88 +++---------- book/src/developers/protocols/kademlia.md | 50 ++++++++ book/src/developers/protocols/portal_wire.md | 69 ++++++++++ book/src/developers/protocols/ssz.md | 66 ++++++++++ book/src/developers/protocols/utp.md | 25 ++++ .../src/developers/quick_setup.md | 104 ++------------- book/src/introduction/README.md | 31 +++++ book/src/introduction/portal_network.md | 79 ++++++++++++ book/src/users/README.md | 56 +++++++++ book/src/users/faq.md | 23 ++++ book/src/users/installation.md | 4 + .../src/users/installation/linux.md | 44 +++---- book/src/users/installation/mac_os.md | 9 ++ book/src/users/installation/raspberry_pi.md | 3 + book/src/users/installation/windows.md | 3 + book/src/users/monitoring.md | 39 ++++++ book/src/users/problems.md | 17 +++ book/src/users/requirements.md | 25 ++++ book/src/users/startup.md | 68 ++++++++++ book/src/users/use/README.md | 39 ++++++ book/src/users/use/ethereum_data.md | 20 +++ book/src/users/use/making_queries.md | 46 +++++++ book/src/users/use/portal_network_data.md | 37 ++++++ docs/contributing.md | 9 +- newsfragments/584.doc.md | 1 + trin-cli/README.md | 81 ++++++++++++ 71 files changed, 2177 insertions(+), 195 deletions(-) create mode 100644 .github/workflows/book.yml create mode 100644 book/.gitignore create mode 100644 book/README.md create mode 100644 book/book.toml create mode 100644 book/src/SUMMARY.md create mode 100644 book/src/developers/README.md create mode 100644 book/src/developers/architecture/README.md create mode 100644 book/src/developers/architecture/database.md create mode 100644 book/src/developers/architecture/process_flow.md create mode 100644 book/src/developers/architecture/testing.md create mode 100644 book/src/developers/architecture/workspaces.md create mode 100644 book/src/developers/contributing/README.md create mode 100644 book/src/developers/contributing/book.md create mode 100644 book/src/developers/contributing/git/README.md create mode 100644 book/src/developers/contributing/git/code_review.md create mode 100644 book/src/developers/contributing/git/commits.md create mode 100644 book/src/developers/contributing/git/fetching_pull_requests.md create mode 100644 book/src/developers/contributing/git/merging.md create mode 100644 book/src/developers/contributing/git/pull_requests.md create mode 100644 book/src/developers/contributing/git/rebasing.md create mode 100644 book/src/developers/contributing/git/release_notes.md create mode 100644 book/src/developers/contributing/releases/README.md create mode 100644 book/src/developers/contributing/releases/generation.md create mode 100644 book/src/developers/contributing/releases/release_notes.md create mode 100644 book/src/developers/contributing/releases/versioning.md create mode 100644 book/src/developers/contributing/rust/README.md create mode 100644 book/src/developers/contributing/rust/comments.md create mode 100644 book/src/developers/contributing/rust/error_handling.md create mode 100644 book/src/developers/contributing/rust/imports.md create mode 100644 book/src/developers/contributing/rust/logging.md create mode 100644 book/src/developers/contributing/rust/style.md create mode 100644 book/src/developers/contributing/tests.md create mode 100644 book/src/developers/core_concepts/README.md create mode 100644 book/src/developers/core_concepts/archive_nodes.md create mode 100644 book/src/developers/core_concepts/bridge.md create mode 100644 book/src/developers/core_concepts/chain_tip.md create mode 100644 book/src/developers/core_concepts/cryptographic_accumulator.md create mode 100644 book/src/developers/core_concepts/finding_peers.md create mode 100644 book/src/developers/developer_stories.md create mode 100644 book/src/developers/goals.md create mode 100644 book/src/developers/progress_status.md create mode 100644 book/src/developers/protocols/README.md create mode 100644 book/src/developers/protocols/discovery.md rename docs/jsonrpc_api.md => book/src/developers/protocols/json_rpc.md (57%) create mode 100644 book/src/developers/protocols/kademlia.md create mode 100644 book/src/developers/protocols/portal_wire.md create mode 100644 book/src/developers/protocols/ssz.md create mode 100644 book/src/developers/protocols/utp.md rename docs/getting_started.md => book/src/developers/quick_setup.md (52%) create mode 100644 book/src/introduction/README.md create mode 100644 book/src/introduction/portal_network.md create mode 100644 book/src/users/README.md create mode 100644 book/src/users/faq.md create mode 100644 book/src/users/installation.md rename docs/ubuntu_guide.md => book/src/users/installation/linux.md (97%) create mode 100644 book/src/users/installation/mac_os.md create mode 100644 book/src/users/installation/raspberry_pi.md create mode 100644 book/src/users/installation/windows.md create mode 100644 book/src/users/monitoring.md create mode 100644 book/src/users/problems.md create mode 100644 book/src/users/requirements.md create mode 100644 book/src/users/startup.md create mode 100644 book/src/users/use/README.md create mode 100644 book/src/users/use/ethereum_data.md create mode 100644 book/src/users/use/making_queries.md create mode 100644 book/src/users/use/portal_network_data.md create mode 100644 newsfragments/584.doc.md diff --git a/.dockerignore b/.dockerignore index c656cb508..de85cd77e 100644 --- a/.dockerignore +++ b/.dockerignore @@ -2,3 +2,4 @@ .env venv target +/book diff --git a/.github/workflows/book.yml b/.github/workflows/book.yml new file mode 100644 index 000000000..0651fd91c --- /dev/null +++ b/.github/workflows/book.yml @@ -0,0 +1,70 @@ +name: book +on: + push: + branches: [main] + paths: + - 'book/**' + - 'book.toml' + pull_request: + branches: [main] + paths: + - 'book/**' + - 'book.toml' + +jobs: + test: + runs-on: ubuntu-latest + name: test + + steps: + - uses: actions/checkout@v3 + + - name: Install mdbook + run: | + mkdir mdbook + curl -sSL https://github.com/rust-lang/mdBook/releases/download/v0.4.14/mdbook-v0.4.14-x86_64-unknown-linux-gnu.tar.gz | tar -xz --directory=./mdbook + echo `pwd`/mdbook >> $GITHUB_PATH + + - name: Run tests + run: mdbook test + + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Install mdbook + run: | + mkdir mdbook + curl -sSL https://github.com/rust-lang/mdBook/releases/download/v0.4.14/mdbook-v0.4.14-x86_64-unknown-linux-gnu.tar.gz | tar -xz --directory=./mdbook + echo `pwd`/mdbook >> $GITHUB_PATH + + - name: Build + run: mdbook build + + - name: Save pages artifact + uses: actions/upload-pages-artifact@v1 + with: + path: target/book + + deploy: + # Only deploy if a push to main + if: github.ref_name == 'main' && github.event_name == 'push' + runs-on: ubuntu-latest + needs: [test, build] + + # Grant GITHUB_TOKEN the permissions required to make a Pages deployment + permissions: + pages: write + id-token: write + + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + + steps: + - name: Deploy to GitHub Pages + id: deployment + uses: actions/deploy-pages@v1 diff --git a/README.md b/README.md index 412fc4751..b48598084 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,7 @@ Trin currently only runs on Unix-based platforms (Linux, macOS). We plan to even ## How to use Trin -Check out the [Getting Started](/docs/getting_started.md) guide to quickly get up and running with Trin. +Check out the [Trin book](https://ethereum.github.io/trin) to quickly get up and running with Trin. ## Experimental Status @@ -23,7 +23,7 @@ In this stage of development, Trin relies on a separate execution node (local no ## Want to help? Want to file a bug, contribute some code, or improve documentation? Excellent! Read up on our -guidelines for [contributing](/docs/contributing.md), +guidelines for contributing in the [Trin book](https://ethereum.github.io/trin), then check out issues that are labeled [Good First Issue](https://github.com/ethereum/trin/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22). diff --git a/book/.gitignore b/book/.gitignore new file mode 100644 index 000000000..5fc22d0c0 --- /dev/null +++ b/book/.gitignore @@ -0,0 +1,4 @@ +book +mermaid.min.js +mermaid-init.js + diff --git a/book/README.md b/book/README.md new file mode 100644 index 000000000..f69934777 --- /dev/null +++ b/book/README.md @@ -0,0 +1,22 @@ +## Using the book + +The book can be built and served locally. +```sh +cargo install mdbook +``` +Install support for `mermaid` diagrams: +```sh +cd book +cargo install mdbook-mermaid +mdbook-mermaid install +``` +This will create `mermaid.min.js` and `mermaid-init.js` files. + +Then run the book from the book crate: +```sh +mdbook serve --open +``` +Or the project root: +```sh +mdbook serve --open ./book +``` diff --git a/book/book.toml b/book/book.toml new file mode 100644 index 000000000..a19065751 --- /dev/null +++ b/book/book.toml @@ -0,0 +1,17 @@ +[book] +language = "en" +multilingual = false +src = "src" +title = "Trin" + +[output.html] +additional-js = ["mermaid.min.js", "mermaid-init.js"] + +[output.html.fold] +enable = true + +[preprocessor] + +[preprocessor.mermaid] +command = "mdbook-mermaid" + diff --git a/book/src/SUMMARY.md b/book/src/SUMMARY.md new file mode 100644 index 000000000..94956fc22 --- /dev/null +++ b/book/src/SUMMARY.md @@ -0,0 +1,64 @@ +# Summary + +- [Introduction](introduction/README.md) + - [Portal Network](introduction/portal_network.md) +- [Users](users/README.md) + - [Requirements](users/requirements.md) + - [Installation](users/installation.md) + - [Mac Os](users/installation/mac_os.md) + - [Linux](users/installation/linux.md) + - [Raspberry Pi](users/installation/raspberry_pi.md) + - [Windows](users/installation/windows.md) + - [Startup](users/startup.md) + - [Use](users/use/README.md) + - [Making queries](users/use/making_queries.md) + - [Ethereum data](users/use/ethereum_data.md) + - [Portal network data](users/use/portal_network_data.md) + - [Monitoring](users/monitoring.md) + - [Problems](users/problems.md) + - [FAQ](users/faq.md) +- [Developers](developers/README.md) + - [Quick setup](developers/quick_setup.md) + - [Developer stories](developers/developer_stories.md) + - [Goals](developers/goals.md) + - [Progress status](developers/progress_status.md) + - [Architecture](developers/architecture/README.md) + - [Workspaces](developers/architecture/workspaces.md) + - [Process flow](developers/architecture/process_flow.md) + - [Database](developers/architecture/database.md) + - [Testing](developers/architecture/testing.md) + - [Protocols](developers/protocols/README.md) + - [Portal wire protocol](developers/protocols/portal_wire.md) + - [Discovery](developers/protocols/discovery.md) + - [uTP](developers/protocols/utp.md) + - [JSON-RPC](developers/protocols/json_rpc.md) + - [SSZ](developers/protocols/ssz.md) + - [Kademlia](developers/protocols/kademlia.md) + - [Core concepts](developers/core_concepts/README.md) + - [Finding peers](developers/core_concepts/finding_peers.md) + - [Chain tip](developers/core_concepts/chain_tip.md) + - [Cryptographic accumulator](developers/core_concepts/cryptographic_accumulator.md) + - [Bridge](developers/core_concepts/bridge.md) + - [Archive nodes](developers/core_concepts/archive_nodes.md) + - [Contributor guidelines](developers/contributing/README.md) + - [Rust](developers/contributing/rust/README.md) + - [Comments](developers/contributing/rust/comments.md) + - [Imports](developers/contributing/rust/imports.md) + - [Logging](developers/contributing/rust/logging.md) + - [Error handling](developers/contributing/rust/error_handling.md) + - [Style](developers/contributing/rust/style.md) + - [Git](developers/contributing/git/README.md) + - [Commits](developers/contributing/git/commits.md) + - [Rebasing](developers/contributing/git/rebasing.md) + - [Release notes](developers/contributing/git/release_notes.md) + - [Pull requests](developers/contributing/git/pull_requests.md) + - [Code review](developers/contributing/git/code_review.md) + - [Fetching a pull request](developers/contributing/git/fetching_pull_requests.md) + - [Merging](developers/contributing/git/merging.md) + - [Releases](developers/contributing/releases/README.md) + - [Release notes](developers/contributing/releases/release_notes.md) + - [Versioning](developers/contributing/releases/versioning.md) + - [Generation](developers/contributing/releases/generation.md) + - [Tests](developers/contributing/tests.md) + - [Book](developers/contributing/book.md) + diff --git a/book/src/developers/README.md b/book/src/developers/README.md new file mode 100644 index 000000000..45ffd61f6 --- /dev/null +++ b/book/src/developers/README.md @@ -0,0 +1,13 @@ +# Developers + +This part of the book is for understanding Trin, and processes around +building Trin better. + +Where the Trin crates and the Portal Network specification are the source of +truth, this section seeks to offer a quicker "key concepts" for getting started. + +It seeks to answer questions like: +- What do I need to know about the Portal Network? +- How do the different components of Trin work together? +- What sort of data guarantees are made and how are they achieved? +- What things should a new contributor be mindful of? \ No newline at end of file diff --git a/book/src/developers/architecture/README.md b/book/src/developers/architecture/README.md new file mode 100644 index 000000000..888b853f0 --- /dev/null +++ b/book/src/developers/architecture/README.md @@ -0,0 +1,8 @@ +# Architecture + +Trin can be understood a from different perspectives. + +- How is code organised? +- How does data flow through trin? +- How is data stored? +- How does testing work? \ No newline at end of file diff --git a/book/src/developers/architecture/database.md b/book/src/developers/architecture/database.md new file mode 100644 index 000000000..5763a9e2b --- /dev/null +++ b/book/src/developers/architecture/database.md @@ -0,0 +1,33 @@ +# Database + +The database related code is located in `./trin-core/src/portalnet/storage.rs`. + +There are three main database kinds: + +|DB Name|Kind|Location|Purpose|Keys|Values| +|-|-|-|-|-|-| +|Main|RocksDB|Disk|Data store|Content ID|Content data bytes| +|Memory|HashMap|Memory|Kademlia cache|Content key|Content data bytes| +|Meta|SQLite|Disk|Manage DB size|Content ID|Content key, content size| + +## Main content database + +This is a persistent file-based database that uses RocksDB. +It is also called the "radius" database because content management rules are based on +the radius of content (specifically the content distance to the node ID). + +## Memory content database + +This uses is an in-memory hashmap to keep content that may not be required for long term +storage. An overlay service uses this database when receiving data from a peer as +part of Kademlia-related actions. If required, data is later moved to disk in the +main content database. + +## Meta database + +This is an SQLite database that stores metadata. For a piece of content, this includes +the content ID, content key and the size of the content. It makes assess the size of +the main database quicker by avoiding the need to repeatedly compute the size of each content. + +Database updates occur in tandum with the main database, where if an operation in one database +fails, the other can revert the operation to remain synced. diff --git a/book/src/developers/architecture/process_flow.md b/book/src/developers/architecture/process_flow.md new file mode 100644 index 000000000..947c7c6ef --- /dev/null +++ b/book/src/developers/architecture/process_flow.md @@ -0,0 +1,109 @@ +# Process flow + +The following main threads are spawned when Trin is started via `./src/main.rs`. + +```mermaid +stateDiagram-v2 + trin: trin + + state trin { + utplistner: UTP listner + subprotocolhandler: sub-protocol handler + subprotocolnetworktask: sub-protocol network task + portaleventshandler: portal events handler + jsonrpcserver: JSON-RPC server + + main() --> utplistner + main() --> subprotocolhandler + main() --> subprotocolnetworktask + main() --> portaleventshandler + main() --> jsonrpcserver + + } + +``` +Where for each sub-protocol implemented (History, State, Etc.,), a new thread is started. + +Here are some of the major components of trin-core that are called on startup within `./trin-core/src/lib.rs`. + +```mermaid +stateDiagram-v2 + trincore: trin-core + collection: configs and services + + state trin { + main() --> from_cli() + from_cli() --> run_trin() + run_trin() --> discovery() + run_trin() --> utp_listener() + run_trin() --> header_oracle() + run_trin() --> portalnet_config + run_trin() --> storage_config + + } + + state trincore { + portalnet_config --> collection + storage_config --> collection + discovery() --> collection + header_oracle() --> collection + utp_listener() --> collection + + + state portalnet { + portalnet_config + storage_config + discovery() + } + state utp { + utp_listener() + } + state validation { + header_oracle() + } + } +``` + +Once the initial collection of important configs and services have +been aggregated, they are passed to the crates for each sub-protocol (`trin-history` shown here). The received data structures are then +used to start the JSON-RPC server. + +An events listener awaits network activity that can be actioned. +```mermaid +stateDiagram-v2 + trincore: trin-core + trinhistory: trin-history + jsonrpchistory: JSON-RPC History details + historyhandler: History handler + collection: configs and services + + state trin { + collection --> initialize_history_network() + collection --> HistoryRequestHandler + initialize_history_network() --> jsonrpchistory + jsonrpchistory --> launch_jsonrpc_server() + HistoryRequestHandler --> historyhandler + collection --> events() + historyhandler --> events() + } + + state trincore { + state portalnet { + events() + } + + } + state trinhistory { + initialize_history_network() + state jsonrpc { + HistoryRequestHandler + } + } + state rpc { + launch_jsonrpc_server() + } +``` + +Then `./trin-core/portalnet/events.rs` is handles events at the level of the Portal Wire Protocol. +These are defined messages that are compliant with the Discv5 protocol, and specific +to the Portal Network. \ No newline at end of file diff --git a/book/src/developers/architecture/testing.md b/book/src/developers/architecture/testing.md new file mode 100644 index 000000000..756669484 --- /dev/null +++ b/book/src/developers/architecture/testing.md @@ -0,0 +1,35 @@ +# Testing + +Testing occurs at different levels of abstraction. + +## Unit testing + +Unit tests are for checking individual data structures and methods. +These tests are included within each workspace, at the bottom the file that contains the +code being tested. Tests are run by CI tasks on pull requests to the Trin repository. + +## Integration testing + +Tests that involve testing different parts of a crate at the same time are included in a `/tests` +directory within the relevant module or crate. They are also run by CI tasks on pull +requests to the Trin repository. + +## Network simulation + +The `test-utp` crate is part of continuous integration (CI). This sets up +client and server insfrastructure on a single machine to test data streaming with +simulated packet loss. + +## Hive + +Hive testing runs Trin as a node and challenges it in a peer to peer envorinment. This +involves creating a docker image with the Trin binary and passing it to Hive. + +Hive itself is a fork of Ethereum hive testing and exists as `portal-hive`, an +external repository ([here](https://github.com/ogenev/portal-hive)). It can be started with docker images of other clients for cross-client testing. +The nodes are started, fed a small amount of data and then challenged with RPC requests +related to that data. + +Testing is automated, using docker configurations in the Trin repository to build test Trin +and other clients at a regular cadence. Results of the latest test are displayed +at [https://portal-hive.ethdevops.io/](https://portal-hive.ethdevops.io/). diff --git a/book/src/developers/architecture/workspaces.md b/book/src/developers/architecture/workspaces.md new file mode 100644 index 000000000..1514ec070 --- /dev/null +++ b/book/src/developers/architecture/workspaces.md @@ -0,0 +1,88 @@ +# Workspaces + +Trin is a package that can be run: +```sh +cargo run -p trin +``` + +The trin repository is composed of workspaces that are used by the main Trin package. +Their relationship is outlined below. + +## `trin` + +Code for the `trin` package is located in `./src`. + +This crate is responsible for the operation of the Trin node functionality. + +- Startup with different configurations via command line arguments +- Starting threads for different important functions such as uTP, Discovery & JSON-RPC. +- These threads perform tasks such as listening for peers or requests from a user. + +## `trin-core` + +This crate is responsible for the code that defines the main functions and data structures required for the operation of a Trin node. This includes code for: + +- Interacting with and managing peers +- Determining content to store and share +- Database management +- Ethereum related data structures + +## `trin-history` + +This crate is responsible for the History sub-protocol. This means interacting with peers +to retrieve and distribute the following: +- Block headers +- Block bodies +- Block receipts + +Additionally, it is also responsible for the header accumulator, a structure which provides a +mechanism to determine whether a given block hash is part of the canonical set of block hashes. + +The crate uses the `ethportal-api` crate to represent the main data type in this crate: the +`HistoryContentKey`. This struct implements the OverlayContentKey trait, which allows it to +be treated as a member of the broader family of `OverlayContentKey`s. + +## `trin-state` + +> This crate exists mostly as a stub for future work. + +This crate is equivalent in function to the `trin-history` crate, but instead is responsible +for the State sub-protocol. + +This means that it is responsible for: +- The state of all accounts. +- The state of all contracts. +- The bytecode of all contracts. + +Data in the state network is represented as a tries (tree structures). The network uses proofs +against these tries to allow Trin nodes to verify the correctness of data. + +## `ethportal-api` + +This crate seeks to expose the data structures in the Portal Network specification. +This includes features such as derived SSZ encoding and convenience functions. + +The crate defines traits that may be used across different sub-protocols. For +example, the `OverlayContentKey` may be implemented for content on both the History and State +sub-protocols. Thus a function can accept content from both networks via `T: OverlayContentKey`. + +```rs,no_run +fn handles_content_keys(key: T) { + // Snip +} +``` +The crate will evolve to provide the types required for the other sub-protocols. + +## `rpc` + +This crate contains implementations of `ethportal-api` jsonrpsee server API traits in Trin and interface for running the JSON-RPC server. + +## `utp-testing` + +Trin uses Micro Transport Protocol (uTP) a UDP based protocol similar to the BitTorrent protocol. +This crate can be used to set up clients and servers to test the protocol on a single machine. + +## `ethportal-peertest` (for deprecation) + +This crate is marked for deprecation and was previously used for automated peer testing in CI. +Now that a multi-client network exists, peer testing happens there. \ No newline at end of file diff --git a/book/src/developers/contributing/README.md b/book/src/developers/contributing/README.md new file mode 100644 index 000000000..e8f9d56a2 --- /dev/null +++ b/book/src/developers/contributing/README.md @@ -0,0 +1,3 @@ +# Contributor guidelines + +These guidelines are heavily influenced by the [Snake-Charmer Tactical Manual](https://github.com/ethereum/snake-charmers-tactical-manual). While the manual is written with a focus on Python projects, there is tons of relevant information in there for how to effectively contribute to open-source projects, and it's recommended that you look through the manual before contributing. diff --git a/book/src/developers/contributing/book.md b/book/src/developers/contributing/book.md new file mode 100644 index 000000000..2545481d2 --- /dev/null +++ b/book/src/developers/contributing/book.md @@ -0,0 +1,104 @@ +# Book + +## Using the book + +The book can be built and served locally. +```sh +cargo install mdbook +``` +Install support for `mermaid` diagrams: +```sh +cd book +cargo install mdbook-mermaid +mdbook-mermaid install +``` +This will create `mermaid.min.js` and `mermaid-init.js` files. + +Then run the book from the book crate: +```sh +mdbook serve --open +``` +Or the project root: +```sh +mdbook serve --open ./book +``` + +## Adding new pages + +Add a new entry to `./book/SUMMARY.md`. Follow the style there, which +follows strict formatting. There are two kinds of additions: + +- New single section + - Tab to the appropriate depth + - Add a `[Section name](section_name.md)` +- New nested section + - Tab to the appropriate depth + - Add a `[Section name](section_name/README.md)` + - Add `[Subsection one](section_name/subsection_one.md)` + - Add `[Subsection two](section_name/subsection_two.md)` + +Don't ceate these pages, the `./book/SUMMARY.md` file is parsed and any missing +pages are generated when `mdbook serve` is run. Content can then be added to the +generated pages. + +Then run serve: +```sh +mdbook serve --open +``` + +## Test + +To test the code within the book run: +```sh +mdbook test +``` + +## Links + +To keep the book easy to manage, avoid: +- External links likely to change +- Internal links to other pages or sections + +Relative links to locations outside the `./book` directory are not possible. + +## Diagrams + +Diagrams can be added using mermaid annotations on a normal code block: + +```sh + ```mermaid + graph TD; + A-->B; + A-->C; + B-->D; + C-->D; + ``` +``` +The above be converted to the following during book-building: +```mermaid +graph TD; + A-->B; + A-->C; + B-->D; + C-->D; +``` + +### Installation + +Installation is required to enable diagram generation + +```sh +cd book +cargo install mdbook-mermaid +mdbook-mermaid install +``` + +## Crate documentation location + +Workspace crates are published to crates.io and include the `README.md` in the root of the crate. +This is valuable to have when using the crates outside of the context of Trin +E.g., `ethportal-api`. + +Any documentation of workspace crates in the book should therefore be limited to explaining +how the crate interacts with the other workspaces in the context of Trin. Rather than moving +the workspace `README.md`'s to the book. \ No newline at end of file diff --git a/book/src/developers/contributing/git/README.md b/book/src/developers/contributing/git/README.md new file mode 100644 index 000000000..3d05a8633 --- /dev/null +++ b/book/src/developers/contributing/git/README.md @@ -0,0 +1,4 @@ +# Git + +This section covers guidelines and common scenarios encountered with +using git and github for Trin development. \ No newline at end of file diff --git a/book/src/developers/contributing/git/code_review.md b/book/src/developers/contributing/git/code_review.md new file mode 100644 index 000000000..fe8deb627 --- /dev/null +++ b/book/src/developers/contributing/git/code_review.md @@ -0,0 +1,45 @@ +# Code review + +## Reviewing + +Every team member is responsible for reviewing code. The designations :speech_balloon:, :heavy_check_mark:, and :x: **should** be left by a reviewer as follows: + +- :speech_balloon: (Comment) should be used when there is not yet an opinion on overall validity of complete PR, for example: + - comments from a partial review + - comments from a complete review on a Work in Progress PR + - questions or non-specific concerns, where the answer might trigger an expected change before merging +- :heavy_check_mark: (Approve) should be used when the reviewer would consider it acceptable for the contributor to merge, *after addressing* all the comments. For example: + - style nitpick comments + - compliments or highlights of excellent patterns ("addressing" might be in the form of a reply that defines scenarios where the pattern could be used more in the code, or a simple :+1:) + - a specific concern, where multiple reasonable solutions can adequately resolve the concern + - a Work in Progress PR that is far enough along +- :x: (Request changes) should be used when the reviewer considers it unacceptable to merge without another review of changes that address the comments. For example: + - a specific concern, without a satisfactory solution in mind + - a specific concern with a satisfactory solution provided, but *alternative* solutions **may** be unacceptable + - any concern with significant subtleties + +## Responding + +Contributors **should** react to reviews as follows: +- :x: if *any* review is marked as "Request changes": + - make changes and/or request clarification + - **should not** merge until reviewer has reviewed again and changed the status +- (none) if there are no reviews, contributor should not merge. +- :speech_balloon: if *all* reviews are comments, then address the comments. Otherwise, treat as if no one has reviewed the PR. +- :heavy_check_mark: if *at least one* review is Approved, contributor **should** do these things before merging: + - make requested changes + - if any concern is unclear in any way, ask the reviewer for clarification before merging + - solve a concern with suggested, or alternative, solution + - if the reviewer's concern is clearly a misunderstanding, explain and merge. Contributor should be on the lookout for followup clarifications on the closed PR + - if the contributor simply disagrees with the concern, it would be best to communicate with the reviewer before merging + - if the PR is approved as a work-in-progress: consider reducing the scope of the PR to roughly the current state, and merging. (multiple smaller PRs is better than one big one) + +It is also recommended to use the emoji responses to signal agreement or that +you've seen a comment and will address it rather than replying. This reduces +github inbox spam. + +Everyone is free to review any pull request. + +Recommended Reading: + + - [How to Do Code Reviews Like a Human](https://mtlynch.io/human-code-reviews-1/) diff --git a/book/src/developers/contributing/git/commits.md b/book/src/developers/contributing/git/commits.md new file mode 100644 index 000000000..a5ce2019a --- /dev/null +++ b/book/src/developers/contributing/git/commits.md @@ -0,0 +1,34 @@ +# Commit messages + +## Commit Hygiene + +We do not have any stringent requirements on how you commit your work, however +you should work towards the following with your git habits. + +## Logical Commits + +This means that each commit contains one logical change to the code. For example: + +- commit `A` introduces new API +- commit `B` deprecates or removes the old API being replaced. +- commit `C` modifies the configuration for CI. + +This approach is sometimes easier to do *after* all of the code has been +written. Once things are complete, you can `git reset master` to unstage all +of the changes you've made, and then re-commit them in small chunks using `git +add -p`. + +## Commit Messages + +We don't care much about commit messages other than that they be sufficiently +descriptive of what is being done in the commit. + +The *correct* phrasing of a commit message. + +- `fix bug #1234` (correct) +- `fixes bug #1234` (wrong) +- `fixing bug #1234` (wrong) + +One way to test whether you have it right is to complete the following sentence. + +> If you apply this commit it will ________________. diff --git a/book/src/developers/contributing/git/fetching_pull_requests.md b/book/src/developers/contributing/git/fetching_pull_requests.md new file mode 100644 index 000000000..d0f7cd58c --- /dev/null +++ b/book/src/developers/contributing/git/fetching_pull_requests.md @@ -0,0 +1,36 @@ +# Fetching a pull request + +We often want or need to run code that someone proposes in a PR. Typically this involves adding the remote of the PR author locally and then fetching their branches. + +Example: + +```sh +git remote add someone https://github.com/someone/reponame.git +git fetch someone +git checkout someone/branch-name +``` + +With an increasing number of different contributors this workflow becomes tedious. +Luckily, there's a little trick that greatly improves the workflow as it lets us +pull down any PR without adding another remote. + +To do this, we just have to add the following line in the `[remote "origin"]` +section of the `.git/config` file in our local repository. + +```sh +fetch = +refs/pull/*/head:refs/remotes/origin/pr/* +``` + +Then, checking out a PR locally becomes as easy as: + +```sh +git fetch origin +git checkout origin/pr/ +``` + +>Replace `origin` ☝ with the actual name (e.g. `upstream`) that we use for the +remote that we want to fetch PRs from. + +Notice that fetching PRs this way is *read-only* which means that in case we do +want to contribute back to the PR (and the author has this enabled), we would +still need to add their remote explicitly. diff --git a/book/src/developers/contributing/git/merging.md b/book/src/developers/contributing/git/merging.md new file mode 100644 index 000000000..0c4fe867e --- /dev/null +++ b/book/src/developers/contributing/git/merging.md @@ -0,0 +1,5 @@ +# Merging + +Once your pull request has been *Approved* it may be merged at your discretion. In most cases responsibility for merging is left to the person who opened the pull request, however for simple pull requests it is fine for anyone to merge. + +If substantive changes are made **after** the pull request has been marked *Approved* you should ask for an additional round of review. diff --git a/book/src/developers/contributing/git/pull_requests.md b/book/src/developers/contributing/git/pull_requests.md new file mode 100644 index 000000000..fdc196687 --- /dev/null +++ b/book/src/developers/contributing/git/pull_requests.md @@ -0,0 +1,29 @@ +# Pull requests + + +We are a distributed team. The primary way we communicate about our code is +through github via pull requests. + +* When you start work on something you should have a pull request opened that + same day. +* Mark unfinished pull requests with the "Work in Progress" label. +* Before submitting a pr for review, you should run the following commands + locally and make sure they are passing, otherwise CI will raise an error. + * `cargo fmt --all -- --check` and `cargo clippy --all -- --deny warnings` for linting checks + * `RUSTFLAGS='-D warnings' cargo test --workspace` to run all tests + * Run the `ethportal-peertest` harness against a locally running node. Instructions + can be found in [README](../ethportal-peertest/README.md). +* Pull requests **should** always be reviewed by another member of the team + prior to being merged. + * Obvious exceptions include very small pull requests. + * Less obvious examples include things like time-sensitive fixes. +* You should not expect feedback on a pull request which is not passing CI. + * Obvious exceptions include soliciting high-level feedback on your approach. + + +Large pull requests (above 200-400 lines of code changed) cannot be effectively +reviewed. If your pull request exceeds this threshold you **should** make +every effort to divide it into smaller pieces. + +You as the person opening the pull request should assign a reviewer. + diff --git a/book/src/developers/contributing/git/rebasing.md b/book/src/developers/contributing/git/rebasing.md new file mode 100644 index 000000000..993b06509 --- /dev/null +++ b/book/src/developers/contributing/git/rebasing.md @@ -0,0 +1,5 @@ +# Rebasing + +You should be using `git rebase` when there are *upstream* changes that you +need in your branch. You **should not** use `git merge` to pull in these +changes. diff --git a/book/src/developers/contributing/git/release_notes.md b/book/src/developers/contributing/git/release_notes.md new file mode 100644 index 000000000..5d8d27477 --- /dev/null +++ b/book/src/developers/contributing/git/release_notes.md @@ -0,0 +1,3 @@ +# Release notes + +Every pull request should include a Newsfragment markdown file to describe the contents of the pull request. These files are automatically formatted & collected upon each new release. The format for creating a Newsfragment file can be found in the [README](../newsfragments/README.md). diff --git a/book/src/developers/contributing/releases/README.md b/book/src/developers/contributing/releases/README.md new file mode 100644 index 000000000..2461bdcfe --- /dev/null +++ b/book/src/developers/contributing/releases/README.md @@ -0,0 +1,3 @@ +# Releases + +This section covers the process of making a Trin release. \ No newline at end of file diff --git a/book/src/developers/contributing/releases/generation.md b/book/src/developers/contributing/releases/generation.md new file mode 100644 index 000000000..9d7821215 --- /dev/null +++ b/book/src/developers/contributing/releases/generation.md @@ -0,0 +1,29 @@ +# Generation + +## Crate versions + +When cutting a new release, the versions of every crate in this repo should be updated simultaneously to the new version. + +## Generate the release + +**Prerequisite**: Make sure the central repository is configured as `origin`. + +Run `make release version=`. + +Example: + +```sh +make release version=0.2.0-alpha +``` + +### Update testnet nodes +Run `make create-docker-image` and `make push-docker-image` commands with the appropriate version. + +Example: + +```sh +make create-docker-image version=0.2.0-alpha +make push-docker-image version=0.2.0-alpha +``` + +Run the Ansible playbook to fetch the newly available docker image and update the testnet nodes. diff --git a/book/src/developers/contributing/releases/release_notes.md b/book/src/developers/contributing/releases/release_notes.md new file mode 100644 index 000000000..53d8dab2b --- /dev/null +++ b/book/src/developers/contributing/releases/release_notes.md @@ -0,0 +1,14 @@ +# Notes + +**Prerequisite**: Release notes are generated with [towncrier](https://pypi.org/project/towncrier/). Ensure to have `towncrier` installed and the command is available. + +Run `make notes version=` where `` is the version we are generating the release notes for e.g. `0.2.0-alpha`. + +Example: + +```sh +make notes version=0.2.0-alpha +``` + +Examine the generated release notes and if needed perform and commit any manual changes. +Generated notes are located in `/docs/release_notes.md`. diff --git a/book/src/developers/contributing/releases/versioning.md b/book/src/developers/contributing/releases/versioning.md new file mode 100644 index 000000000..f9a42a9bc --- /dev/null +++ b/book/src/developers/contributing/releases/versioning.md @@ -0,0 +1,5 @@ +# Versioning + +Make sure that version follows [semver](https://semver.org/) rules e.g (`0.2.0-alpha`). + +**For the time being, ALWAYS specify the `-alpha` suffix.** diff --git a/book/src/developers/contributing/rust/README.md b/book/src/developers/contributing/rust/README.md new file mode 100644 index 000000000..963931567 --- /dev/null +++ b/book/src/developers/contributing/rust/README.md @@ -0,0 +1,4 @@ +# Rust + +Trin is written in Rust. This section includes guidelines for Rust-specific +patterns and principles. \ No newline at end of file diff --git a/book/src/developers/contributing/rust/comments.md b/book/src/developers/contributing/rust/comments.md new file mode 100644 index 000000000..835938195 --- /dev/null +++ b/book/src/developers/contributing/rust/comments.md @@ -0,0 +1,4 @@ +# Comments + +Any datatype of significance **should** have an accompanying comment briefly describing its role and responsibilities. Comments are an extremely valuable tool in open-source projects with many different contributors, and can greatly improve development speed. Explain your assumptions clearly so others don't need to dig through the code. +- Rust [doc comments](https://doc.rust-lang.org/rust-by-example/meta/doc.html) are the most best way to comment your code. diff --git a/book/src/developers/contributing/rust/error_handling.md b/book/src/developers/contributing/rust/error_handling.md new file mode 100644 index 000000000..c059e19a3 --- /dev/null +++ b/book/src/developers/contributing/rust/error_handling.md @@ -0,0 +1,9 @@ +# Error handling + +- Handle errors. Naked `.unwrap()`s aren't allowed, except for in unit tests. +Exceptions must be accompanied by a note justifying usage. + - In most cases where an exception can be made (E.g., parsing a static value) `.expect()` with a relevant message should be used over a naked unwrap. +- Write descriptive error messages that give context of the problem that occurred. Error messages should be unique, to aid with debugging. +- Meaningful error types should be used in place of `Result< _, String>`. + - General errors should use the [anyhow](https://docs.rs/anyhow/latest/anyhow/) crate. + - Custom / typed errors should derive from the `std::error::Error` trait. The [`thiserror`](https://docs.rs/thiserror/1.0.30/thiserror/) crate provides a useful macro to simplify creating custom error types. diff --git a/book/src/developers/contributing/rust/imports.md b/book/src/developers/contributing/rust/imports.md new file mode 100644 index 000000000..7e46025e2 --- /dev/null +++ b/book/src/developers/contributing/rust/imports.md @@ -0,0 +1,7 @@ +# Imports + +- In `*.rs` files, imports should be split into 3 groups [src](https://github.com/rust-dev-tools/fmt-rfcs/issues/131) and separated by a single line. Within a single group, imported items should be sorted alphabetically. + - Imports from `'std'` + - Imports from external crates + - Imports from within the same crate (`trin-core`, `trin-history`, `trin-state` inclusive). +- Alphabetize imports in `Cargo.toml` diff --git a/book/src/developers/contributing/rust/logging.md b/book/src/developers/contributing/rust/logging.md new file mode 100644 index 000000000..6a635d00e --- /dev/null +++ b/book/src/developers/contributing/rust/logging.md @@ -0,0 +1,15 @@ +# Logging + +- All logging should be done with the `log` library and not `println!()` statements. +- Appropriate log levels (`debug`, `warn`, `info`, etc.) should be used with respect to their content. +- Log statements should be declarative, useful, succinct and formatted for readability. + +Bad: +```sh +Oct 25 23:42:11.079 DEBUG trin_core::portalnet::events: Got discv5 event TalkRequest(TalkRequest { id: RequestId([226, 151, 109, 239, 115, 223, 116, 109]), node_address: NodeAddress { socket_addr: 127.0.0.1:4568, node_id: NodeId { raw: [5, 208, 240, 167, 153, 116, 216, 224, 160, 101, 80, 229, 154, 206, 113, 239, 182, 109, 181, 137, 16, 96, 251, 63, 85, 223, 235, 208, 3, 242, 175, 11] } }, protocol: [115, 116, 97, 116, 101], body: [1, 1, 0, 0, 0, 0, 0, 0, 0, 12, 0, 0, 0, 1, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0], sender: Some(UnboundedSender { chan: Tx { inner: Chan { tx: Tx { block_tail: 0x55c4fe611290, tail_position: 1 }, semaphore: 0, rx_waker: AtomicWaker, tx_count: 2, rx_fields: "..." } } }) }) +``` + +Good: +```sh +Oct 25 23:43:02.373 DEBUG trin_core::portalnet::overlay: Received Ping(enr_seq=1, radius=18446744073709551615) +``` diff --git a/book/src/developers/contributing/rust/style.md b/book/src/developers/contributing/rust/style.md new file mode 100644 index 000000000..a89b8eafb --- /dev/null +++ b/book/src/developers/contributing/rust/style.md @@ -0,0 +1,10 @@ +# Style + +## Clone + +Minimize the amount of `.clone()`s used. Cloning can be a useful mechanism, but should be used with discretion. When leaned upon excessively to [satisfy the borrow checker](https://rust-unofficial.github.io/patterns/anti_patterns/borrow_clone.html) it can lead to unintended consequences. + +## String interpolation + +Use interpolated string formatting when possible. +- Do `format!("words: {var:?}")` not `format!("words: {:?}", var)` \ No newline at end of file diff --git a/book/src/developers/contributing/tests.md b/book/src/developers/contributing/tests.md new file mode 100644 index 000000000..b8b4937f4 --- /dev/null +++ b/book/src/developers/contributing/tests.md @@ -0,0 +1,7 @@ +# Tests + +Testing is essential to the production of software with minimal flaws. The default should always be writing tests for the code you produce. + +Testing also introduces overhead into our workflow. If a test suite takes a long time to run, it slows down our iteration cycle. This means finding a pragmatic balance between thorough testing, and the speed of our test suite, as well as always iterating on our testing infrastructure. + +Unit test names should unambiguously identify the functionality being tested. Omit any "test" prefix from the name to avoid redundancy. diff --git a/book/src/developers/core_concepts/README.md b/book/src/developers/core_concepts/README.md new file mode 100644 index 000000000..33ad0607d --- /dev/null +++ b/book/src/developers/core_concepts/README.md @@ -0,0 +1,4 @@ +# Core concepts + +This section contains specific concepts that are common, important or +that have an interesting facet to understand. \ No newline at end of file diff --git a/book/src/developers/core_concepts/archive_nodes.md b/book/src/developers/core_concepts/archive_nodes.md new file mode 100644 index 000000000..21171c17b --- /dev/null +++ b/book/src/developers/core_concepts/archive_nodes.md @@ -0,0 +1,105 @@ +# Archive nodes + +A Portal Network node is not an archival node. This page explores the reason for this +and some considerations on the topic. + +An archive node is one that can know the history at a certain block in the past. + +A non-archive node has this information until a block is 128 blocks old. After this +point the data is forgotten. + +## Old state +Archive nodes store old states + +- What was the balance of token x at block y? +- What was in storage slot x at block y? + +## Old traces +Archive nodes store old traces. This means that they can re-execute old +transactions and show everything that the EVM did. + +- What events were emitted during transaction x? +- How much gas did transaction x use? + +## Requirements +Consider an archive node that is going to trace the 100th transaction in an old +block. + +- The transaction may call a contract, which may in turn call another contract (etc., ). The state of the contracts must be known (balance, nonce, bytecode, storage) +- The transaction may reference the hash of a preceeding block (up to depth of 256 blocks) +- The transaction may modify state that has already been modified in in the preceeding 99 +transactions. + +## Would an Archive sub-protocol be of use? + +### Not for sequential data analysis +Archival nodes are great for data science because they allow traversing a large number +of sequential blocks and tracking changes over time. + +A portal node would not be suited for this activity because it requires sequential blocks +rather than posession of data based on the nodes ID. Hence a Portal Node has a disperse subset of +content and would need to ask peers for data for sequential blocks. Asking for all sequential +blocks would cause an infeasible burden on peers. + +### Possibly for personal wallet history + +A user with access to an index of address appearances (such as the Unchained Index) +could make queries about their historical transactions. This could be for a wallet, +multisig contract or any contract. + +After retrieving the traces for these transactions, they could be used to create a +display of activity. E.g., A graph of token balances changing over time, or a log +of on-chain activity (trades, loans, transfers, NFT activity). + +## Could an Archive sub-protocol exist? + +It is not impossible. However, the goal of the Portal Network is to provide the +function of a non-tracing node. Some considerations are explored below. + +### Intra-block state + +To trace the last transaction in a block, all preceeding transaction final states +must be known. Hence single request for a transaction trace could result in requiring +many transactions in a single block to be obtained. This applies to popular contracts +that appear frequently in a block (e.g., exchanges, and popular tokens). + +Consequence of a request for a transaction at the end of a block involving popular contracts: +- It would be very slow to get a response +- It could be used as a denial of service (DoS) attack on the network. For instance, +by finding the final transactions in blocks and requesting them from different nodes. + +### Nested contract calls + +A contract could start a chain of nested calls to other contracts. If a node +does not have the state of these contracts, it would have to request them. +Hence, the time to trace such a transaction would be very slow. Every nested +call would take the time that a single Portal Network request takes. + +Consequences of a request for a transaction with deeply nested contract calls: +- It would be very slow to get a response +- It could be used as a denial of service (DoS) attack on the network. For instance, +by finding many nested transactions and requesting them from different nodes. + +### Duplication of data + +If Archive was a sub-protocol there may be some data that is required to be duplicated +on the History or State sub-protocols. This implies that the sub-protocol is inefficient +with respect to disk space but may not be a significant problem. + +### Medium-sized portal nodes + +There is a always a moderate amount of interest in archive nodes, for many parties +find historical Ethereum data valuable. As archive nodes require minimum ~2TB +of storage, many people choose not to run one. + +Perhaps there is a large enough appetite to run a "medium-sized portal archive node", +such that many users contribute ~100GB. +In this scenario, the DoS attacks are reduced as these medium-sized nodes would +cause less amplification of network traffic. + +### Appetite for lags + +If the desire for the results of an archive node are large enough, applications +and users could be tolerant of slow lookup times. For example, a wallet connected to a +portal archive node could display current wallet state quickly, but under a "history" tab could show: "performing deep search... Estimated time 24 hours". Once the information has been retrieved +it could then be stored for fast access. diff --git a/book/src/developers/core_concepts/bridge.md b/book/src/developers/core_concepts/bridge.md new file mode 100644 index 000000000..9d91de773 --- /dev/null +++ b/book/src/developers/core_concepts/bridge.md @@ -0,0 +1,20 @@ +# Bridge + +Blocks are produced by Ethereum Execution clients which use a different +network to Portal Network nodes. A Bridge node is responsible for taking data +from the external network and passing it to the Portal Network. + +```mermaid +flowchart LR + eth[Ethereum Execution node]-->bridge[Portal Network Bridge node] + bridge-->portal[Portal network node] +``` +This operates as follows: +```mermaid +sequenceDiagram + Bridge-->>Execution: eth_getBlock + Execution-->>Bridge: block + Bridge-->>Portal: block +``` +Currently the bridge functionality exists as a separate python application +with plans to implement in Trin. \ No newline at end of file diff --git a/book/src/developers/core_concepts/chain_tip.md b/book/src/developers/core_concepts/chain_tip.md new file mode 100644 index 000000000..c774f2c86 --- /dev/null +++ b/book/src/developers/core_concepts/chain_tip.md @@ -0,0 +1,13 @@ +# Chain tip + +A Trin node can serve information about the chain tip, such as the latest +block number. A Trin node knows about the beacon chain protocol that is +creating the chain tip. + +By listening to activity on the beacon chain +network, it can follow the activities of members of the sync committee. If a certain fraction +of the sync committee have signed off on a certain beacon block, the Trin node can +be confident that this is likely to be the chain tip. + +Beacon blocks contain references to Ethereum blocks, and so the node can see the tip of the +Execution chain. \ No newline at end of file diff --git a/book/src/developers/core_concepts/cryptographic_accumulator.md b/book/src/developers/core_concepts/cryptographic_accumulator.md new file mode 100644 index 000000000..edbad30d4 --- /dev/null +++ b/book/src/developers/core_concepts/cryptographic_accumulator.md @@ -0,0 +1,118 @@ +# Cryptographic accumulator + +A cryptographic accumulator is a structure that allows verification that a specific +block header in the past is part of the canonical chain. + +The History sub-protocol is reponsible for accumulator-related data. + +An accumulator has been constructed for the Portal Network, because it is too burdensome to +keep all the headers on disk. This applies to pre-merge blocks. For post-merge blocks, the Beacon Chain already maintains an accumulator that Trin can use via a Beacon Chain light client. + +## Canonicality + +A block can be valid but not canonical if it is an Uncle. Blocks A-F are canonical, with F +being the latest block. + +While Uncle_1 may have a valid block difficulty and parent, it was not built upon. + +```mermaid +flowchart RL + Uncle_3-.->D + F--->E + E--->D + Uncle_4-.->D + Uncle_1-.->C + D--->C + Uncle_2-.->C; + C--->B; + B--->A; +``` +If a Trin node is presented with such a block, it can check the accumulator, which only +processes non-uncle blocks A-F. + +## Tip knowledge + +First, the most recent block hash at the tip of the accumulator must be known. + +This is easy, as the the accumulator only needs to cover pre-merge blocks. The +final pre-merge block (last Proof of Work block) hash is known and never needs to be updated. + +## Proofs + +A Merkle proof can be constructed for any given historical block hash. The proof asserts +that a given hash (from a peer) is part of the accumulator (valid based on knowledge of the +current chain tip). + +A proof cannot be constructed for any other sort of block (Uncle block, fabricated block). + +## Accumulator construction + +The accumulator is specifically a double-batched merkle log accumulator. + +First historical blocks are processed in batches called Epochs (unrelated to the concept +of a 32-slot epoch in the Beacon Chain). + +The accumulator constructor consists of two lists: +- One cache for holding blocks (header and difficulty). +- One final store (Master Accumulator) that the cache roots are added to. + +```mermaid +flowchart TD + Genesis-->new_epoch[Start new Epoch Accumulator] + new_epoch-->append + append[Append block header and difficulty to Epoch Accumulator] + append--> epoch_done{Done 8192 yet?} + epoch_done -.->|Yes| get_epoch_root + epoch_done -->|No| PoS{PoS reached?} + PoS --> |No| append + PoS -.-> |Yes| finished[Finished] + add_root[Append root to Master Accumulator] -.-> new_epoch + get_epoch_root[Compute hash tree root of epoch] -.-> add_root + finished -.-> save[Save incomplete final epoch and Master Accumulator] + +``` +Thus the final output is a list of roots called the Master Accumulator. + +## Constructing proofs + +If you have a block and you know the block number, then you know which epoch +root is relevant. You also know which part of the epoch it came from. That is, +you know the index of the leaf in the Merkle tree. + +With the root of the tree, the index and the data (the block hash in question), a proof +can be constructed showing that this leaf was part of the tree. + +## Proof use + +A proof can be to a peer alongside the data. That way, a peer can quickly and check +that the data is canonical. + +## Accumulator distribution + +The Accumulator is built once and then distributed in Trin (and other clients). It does not +change over time and so can be incorporated into the `trin-core` (`./trin-core/src/assets`) and +included in binary releases. + +The History network contains individual epoch hashes from the Master Accumulator and +refers to them with the terms: `epoch_accumulator` and `epoch_accumulator_key` +(includes selector). See the History sub-protocol section of the Portal Network spec. + +## Master accumulator details +The Master Accumulator consists of: +- 1895 complete epoch roots +- 1 incomplete epoch root (a partial epoch witht 5362 records (block headers)) + +```csv +epoch,index +8191,0x5ec1ffb8c3b146f42606c74ced973dc16ec5a107c0345858c343fc94780b4218 // first epoch +16383,0xa5364e9a9bc513c4601f0d62e6b46dbdedf3200bbfae54d6350f46f2c7a01938 +... +15523839,0x804008940c025a4e8a00ea42a659b484ba32c14dff133e9d3b7bf3685c1e54de // penultimate epoch (full) +15532031,0x3f81607c8cb3f0448a11cab8df0e504b605581f4891a9a35bd9c0dd37a71834f // final epoch (incomplete) +``` +Final PoW block: `15537394` + +The hash tree root of the Master Accumulator is: +```sh +0x8eac399e24480dce3cfe06f4bdecba51c6e5d0c46200e3e8611a0b44a3a69ff9 +``` diff --git a/book/src/developers/core_concepts/finding_peers.md b/book/src/developers/core_concepts/finding_peers.md new file mode 100644 index 000000000..77dba384b --- /dev/null +++ b/book/src/developers/core_concepts/finding_peers.md @@ -0,0 +1,110 @@ +# Finding peers + +If a peer is in a network behind a NAT (Network Address Translation) table, the process for +finding a peer is more complicated. + +These diagrams are indended as a rough-guide. + +## Non-NAT simple case + +The bootnode can gossip to Charlie who can then directly contact Alice. + +```mermaid +sequenceDiagram + Alice IP1 PORT1-->>Bootnode: Hello (ENR with no IP) + Bootnode-->>Alice IP1 PORT1: Hi, I notice your address is : + Alice IP1 PORT1-->>Alice IP1 PORT1: Updates ENR (:) + Bootnode-->>Charlie: Meet Alice (ENR with :) + Charlie->>Alice IP1 PORT1: Hello Alice at : + Alice IP1 PORT1->>Charlie: Hello (ENR :) +``` + +## NAT problem + +The bootnode can gossip to Charlie, but Charlie is a stranger from the NAT's perspective. +It doesn't know who on the internal network is the recipient. + +- The NAT remembers who it has spoken to. +- Messages from the bootnode are expected. +- Messages from Charlie are not expected, and its not clear who they are for. Perhaps +the smart fridge? + +```mermaid +sequenceDiagram + Alice IP1 PORT1-->>NAT IP2 PORT2: Hello bootnode (ENR with no IP) + Note right of NAT IP2 PORT2: Stores Bootnode + NAT IP2 PORT2-->>NAT IP2 PORT2: Maps from internal IP + NAT IP2 PORT2-->>Bootnode: Hello bootnode (ENR with no IP) + Bootnode-->>NAT IP2 PORT2: Hi, I notice your address is : + NAT IP2 PORT2-->>NAT IP2 PORT2: Maps to internal IP + NAT IP2 PORT2-->>Alice IP1 PORT1: Hi, I notice your address is : + Alice IP1 PORT1-->>Alice IP1 PORT1: Updates ENR (:) + Alice IP1 PORT1-->>NAT IP2 PORT2: Thanks bootnode (ENR with :) + NAT IP2 PORT2-->>Bootnode: Thanks boodnode (ENR with :) + Bootnode-->>Charlie: Meet Alice (ENR with :) + Charlie->>NAT IP2 PORT2: Hello Alice at : + Note right of NAT IP2 PORT2: No map on record. Who is this for? + Note right of Charlie: Hmm Alice didn't respond. +``` + +## The NAT solution + +If Alice knows she is behind a NAT, she can pass a message which goes: + +"I'm behind a NAT. Send your requests via peers and I'll reach out to you." + +- The bootnode gossips to Charlie +- Charlie sees "NAT" in Alices ENR +- Charlie asks the bootnode to introduce him to Alice +- Alice reaches out to Charlie +- The NAT now has a mapping for Charlie-Alice messages. + +### Part 1: NAT detection + +Alice can suspect that she is behind a NAT probabalitically. +If 2 minutes after connecting with a bootnode, no strangers (like Charlie) +have reached out, a NAT is likely. + +```mermaid +sequenceDiagram + Alice IP1 PORT1-->>NAT IP2 PORT2: Hello bootnode (ENR with no IP) + Note right of NAT IP2 PORT2: Stores Bootnode + NAT IP2 PORT2-->>NAT IP2 PORT2: Maps from internal IP + NAT IP2 PORT2-->>Bootnode: Hello bootnode (ENR with no IP) + Bootnode-->>NAT IP2 PORT2: Hi, I notice your address is : + NAT IP2 PORT2-->>NAT IP2 PORT2: Maps to internal IP + NAT IP2 PORT2-->>Alice IP1 PORT1: Hi, I notice your address is : + Alice IP1 PORT1-->>Alice IP1 PORT1: Updates ENR (:) + Alice IP1 PORT1-->>NAT IP2 PORT2: Thanks bootnode (ENR with :) + NAT IP2 PORT2-->>Bootnode: Thanks boodnode (ENR with :) + Note right of Alice IP1 PORT1: ... Hmm no strangers. Must be a NAT. + +``` + +### Part 2: NAT communication + +Alice can put "NAT" in her ENR. Now when Charlie tries to get in touch, +he knows to go via a peer. + +Continued from above, skipping Charlie's failed attempt to contact Alice directly. + +```mermaid +sequenceDiagram + Note right of Alice IP1 PORT1: ... Hmm no strangers. Must be a NAT. + Alice IP1 PORT1-->>NAT IP2 PORT2: Update: NAT (ENR with NAT :) + NAT IP2 PORT2-->>Bootnode: Update: NAT (ENR with NAT :) + Bootnode-->>Charlie: Meet Alice (ENR with NAT :) + Charlie->>Bootnode: Hello Alice (From Charlie ENR()) + Note right of Bootnode: To Alice via Bootnode + Bootnode->>NAT IP2 PORT2: Hello Alice (From Charlie ENR()) + NAT IP2 PORT2-->>NAT IP2 PORT2: Maps to internal IP + NAT IP2 PORT2-->>Alice IP1 PORT1: Hello Alice (From Charlie ENR()) + Alice IP1 PORT1-->>NAT IP2 PORT2: Hello Charlie (ENR with NAT :) + Note right of NAT IP2 PORT2: Stores Charlie + NAT IP2 PORT2-->>NAT IP2 PORT2: Maps from internal IP + NAT IP2 PORT2-->>Charlie: Hello Charlie (ENR with NAT :) + Charlie-->>NAT IP2 PORT2: Hi Alice + NAT IP2 PORT2-->>NAT IP2 PORT2: Maps to internal IP + Note right of NAT IP2 PORT2: Finally has a mapping for Charlie! + NAT IP2 PORT2-->>Alice IP1 PORT1: Hello Alice +``` \ No newline at end of file diff --git a/book/src/developers/developer_stories.md b/book/src/developers/developer_stories.md new file mode 100644 index 000000000..c98d2372d --- /dev/null +++ b/book/src/developers/developer_stories.md @@ -0,0 +1,43 @@ +# Developer stories + +Trin is under active development. Perhaps you would like to get involved? + +The following are some situations that might resonate. + +## Issue resolver + +Someone who tried out Trin and found an issue, then worked out +where it was coming from. + +Consider making a pull request to fix the issue. + +## Ecosystem contributor + +Trin, and the Portal Network more broadly are perhaps more +quiet than other areas of Ethereum development. Maybe you can see +yourself helping out somewhere where you can have a meaningful impact. + +## Researcher + +Someone looking into the Ethereum protocol upgrade path, and thinking through +the impact of potential upcoming changes. + +There are interesting facets to the Portal network still to be determined. + +Perhaps you can be tempted by: +- Double batched merkle log accumulators +- Topology of content in distributed hash tables +- Adversarial scenario planning and mitigation + +## Hobbyist + +Someone looking to poke around and run a node on a single board computer or +a mobile device. How small is too small? + +## Rust developer + +Someone looking to build something meaningful in Rust, with interesting +architecture and crates: +- Cryptography +- Peer to peer networking +- Async runtimes diff --git a/book/src/developers/goals.md b/book/src/developers/goals.md new file mode 100644 index 000000000..4b8a11de3 --- /dev/null +++ b/book/src/developers/goals.md @@ -0,0 +1,24 @@ +# Goals + +## Demonstrate feasibility + +Implement the Portal Network and demonstrate its use. Starting +with subset of the whole and then expanding from there. + +## Prioritise Sub-protocols + +### Primary +Get the History sub-protocol working. +- Iron out bugs +- Interop with other clients +- Monitor network to see if it retains data + +### Secondary + +Start work on the State sub-protocol. +- Implementation from the Portal Network specification. + +### Tertiary +Start work on remaining sub-protocols +- Canonical indices +- Transaction gossip diff --git a/book/src/developers/progress_status.md b/book/src/developers/progress_status.md new file mode 100644 index 000000000..64b54f1ef --- /dev/null +++ b/book/src/developers/progress_status.md @@ -0,0 +1,15 @@ +# Progress status + +The Portal Network and Trin are under active development so different components +may be in varying stages of progress. + +- Completed, looking for bugs +- Mid-construction +- Future, planned features + +Some methods to get a sense of the state of development are: +- Run `trin -- --help` and see what flags are available. +- Look at recent closed PRs to see what has just been merged. +- Look at recent issues that have been opened to see what active foci are. +- Look at what examples are shown in the setup and usage guides. +- Run trin in the way you think it might work and see what happens. diff --git a/book/src/developers/protocols/README.md b/book/src/developers/protocols/README.md new file mode 100644 index 000000000..2c4c76c8e --- /dev/null +++ b/book/src/developers/protocols/README.md @@ -0,0 +1,7 @@ +# Protocols + +This section contains summaries of important protocols for the Portal +Network. The purpose is to distill important concepts to quickly +see how Trin works. + +See the relevant specifications for deeper explanations. diff --git a/book/src/developers/protocols/discovery.md b/book/src/developers/protocols/discovery.md new file mode 100644 index 000000000..728e00ca9 --- /dev/null +++ b/book/src/developers/protocols/discovery.md @@ -0,0 +1,34 @@ +# Discovery + +## Node Discovery Protocol v5 (Discv5) + +A protocol ([spec](https://github.com/ethereum/devp2p/blob/master/discv5/discv5.md)) for nodes to identify each other. There are three main capabilities: +- Sample (walk the network to find nodes) +- Search (locate nodes interested in a specific topic) +- Update (navigate when a peer updates their details, such as IP address) + +Discovery is a high level protocol that is further defined with the Discovery wire protocol. + +## Discovery (Discv5) wire protocol + +An application-level protocol ([spec](https://github.com/ethereum/devp2p/blob/master/discv5/discv5-wire.md)) for nodes using Discv5. It describes the structure and logic of different +messages sent between nodes. + +Some important properties of the protocol are: +- UDP based scheme (tolerant to packet loss compared with TCP) + - The Portal Network uses a variant of UDP called uTP that is more friendly for larger + packets. +- Message encryption (ENRs are used to encrypt data with the recipients public key) +- Protocol differentiation (allow nodes to avoid unrelated networks) +- Message types (for finding peers, establishing connections, requesting specific things) +- Flexible request/types types (for sub-protocols to use custom messages) + +## Ethereum Node Records (ENR) + +A data format ([spec](https://github.com/ethereum/devp2p/blob/master/enr.md)) that allows nodes to know the identity and important information about +peers. This includes data like IP address and ports. + +Nodes generate a private key for the purpose of node discovery. This is used to sign +the ENR to prevent impersonation. Peers can encrypt messages for each other using the ENR. + +The private key is unrelated to private keys used to sign Ethereum transactions. diff --git a/docs/jsonrpc_api.md b/book/src/developers/protocols/json_rpc.md similarity index 57% rename from docs/jsonrpc_api.md rename to book/src/developers/protocols/json_rpc.md index 512978907..3b791401a 100644 --- a/docs/jsonrpc_api.md +++ b/book/src/developers/protocols/json_rpc.md @@ -1,7 +1,8 @@ -# JSON-RPC API +# JSON-RPC This is a document for all JSON-RPC API endpoints currently supported by Trin. Trin plans to eventually support the entire [Portal Network JSON-RPC API](https://playground.open-rpc.org/?schemaUrl=https://raw.githubusercontent.com/ethereum/portal-network-specs/assembled-spec/jsonrpc/openrpc.json&uiSchema%5BappBar%5D%5Bui:splitView%5D=false&uiSchema%5BappBar%5D%5Bui:input%5D=false&uiSchema%5BappBar%5D%5Bui:examplesDropdown%5D=false) and [Ethereum JSON-RPC API](https://eth.wiki/json-rpc/API#json-rpc-methods). + ## Currently supported endpoints ### Portal Network @@ -10,14 +11,19 @@ The specification for these endpoints can be found [here](https://playground.ope - `discv5_nodeInfo` - `discv5_routingTableInfo` - `portal_historyFindContent` -- `portal_stateFindContent` - `portal_historyFindNodes` -- `portal_stateFindNodes` +- `portal_historyGossip` +- `portal_historyLocalContent` +- `portal_historyPing` - `portal_historyOffer` -- `portal_stateOffer` +- `portal_historyRecursiveFindContent` - `portal_historyStore` +- `portal_stateFindContent` +- `portal_stateFindNodes` +- `portal_stateLocalContent` +- `portal_stateGossip` +- `portal_stateOffer` - `portal_stateStore` -- `portal_historyPing` - `portal_statePing` ### Ethereum endpoints @@ -32,13 +38,12 @@ The specification for these endpoints can be found [here](https://eth.wiki/json- - [`web3_clientVersion`](https://eth.wiki/json-rpc/API#web3_clientversion) ### Custom Trin JSON-RPC endpoints -- [`portal_historyRadius`](#portal_historyRadius) -- [`portal_stateRadius`](#portal_stateRadius) -- [`portal_historyLocalContent`](#portal_historyLocalContent) -- [`portal_stateLocalContent`](#portal_stateLocalContent) -- [`portal_historyRecursiveFindContent`](#portal_historyRecursiveFindContent) -- [`portal_historyTraceRecursiveFindContent`](#portal_historyTraceRecursiveFindContent) -- [`portal_paginateLocalContentKeys`](#portal_paginateLocalContentKeys) +The following endpoints are not part of the Portal Network specification and are defined +in subsequent sections: +- [`portal_historyRadius`](#portal_historyradius) +- [`portal_historyTraceRecursiveFindContent`](#portal_historytracerecursivefindcontent) +- [`portal_paginateLocalContentKeys`](#portal_paginatelocalcontentkeys) +- [`portal_stateRadius`](#portal_stateradius) # History Overlay Network @@ -60,46 +65,6 @@ Returns the current data storage radius being used for the History network. } ``` -## `portal_historyLocalContent` -Attempts to look up content key in Trin node's local db. - -### Parameters -- `content_key`: Target content key. - -### Returns -- Hex-encoded content value. - -#### Example -```json -{ - "id": 1, - "jsonrpc": "2.0", - "result": { - "content": "0xf90217a06add1c183f1194eb132ca8079197c7f2bc43f644f96bf5ab00a93aa4be499360a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942a65aca4d5fc5b5c859090a6c34d164135398226a05ae233f6377f0671c612ec2a8bd15c20e428094f2fafc79bead9c55a989294dda064183d9f805f4aecbf532de75e6ad276dc281ba90947ff706beeaecc14eec6f5a059cf53b2f956a914b8360ea6fe271ebe7b10461c736eb16eb1a4121ba3abbb85b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000860710895564a08309a92a832fefd882520884565fc3be98d783010302844765746887676f312e352e31856c696e7578a0c5e99c6e90fbdee5650ff9b6dd41198655872ba32f810de58acb193a954e15898840f1ce50d18d7fdc" - } -} -``` - -## `portal_historyRecursiveFindContent` -Traverses the network by recursively sending `FINDCONTENT` messages in order to retrieve the target content. - -### Parameters -- `content_key`: Target content key. - -### Returns -- Target content value, or `0x` if the content was not found. - -#### Example -```json -{ - "id": 1, - "jsonrpc": "2.0", - "result": { - "content": "0xf90217a06add1c183f1194eb132ca8079197c7f2bc43f644f96bf5ab00a93aa4be499360a01dcc4de8dec75d7aab85b567b6ccd41ad312451b948a7413f0a142fd40d49347942a65aca4d5fc5b5c859090a6c34d164135398226a05ae233f6377f0671c612ec2a8bd15c20e428094f2fafc79bead9c55a989294dda064183d9f805f4aecbf532de75e6ad276dc281ba90947ff706beeaecc14eec6f5a059cf53b2f956a914b8360ea6fe271ebe7b10461c736eb16eb1a4121ba3abbb85b9010000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000860710895564a08309a92a832fefd882520884565fc3be98d783010302844765746887676f312e352e31856c696e7578a0c5e99c6e90fbdee5650ff9b6dd41198655872ba32f810de58acb193a954e15898840f1ce50d18d7fdc" - } -} -``` - ## `portal_historyTraceRecursiveFindContent` Same as `portal_historyRecursiveFindContent`, but will also return a "route" with the content. The "route" contains all of the ENR's contacted during the lookup, and their respective distance to the target content. If the content is available in local storage, the route will contain an empty array. @@ -145,25 +110,6 @@ Returns the current data storage radius being used for the State network. } ``` -## `portal_stateLocalContent` -Attempts to look up content key in Trin node's local db. - -### Parameters -- `content_key`: Target content key. - -### Returns -- Hex-encoded content value. - -#### Example -```json -{ - "id": 1, - "jsonrpc": "2.0", - "result": { - "content": "0x0217a06ebc43f644f96bf5ab00a93aa4be499360a01dcc4de8dec75d0a142fd40d49347942a65aca4d5fc5b5c859090a6c34d164135398226a05ae233f6377f0671c612ec2a8bd15c20e428094f2fafc79bead9c55a989294dda064183d9f805f4aecbf532de75e6ad276dc281ba90947ff706beeaecc14eec6f5a059cf53b2f956a914b8360ea6fe271ebe7b10461c736eb16eb1a4121ba3abbb85b90110895564a08309a92a832fefd882520884565fc3be98d783010302844765746887676f312e352e31856c696e7578a0c5e99c6e90fbdee5650ff9b6dd41198655872ba32f810de58acb193a954e15898840f1ce50d18d7fdc" - } -} -``` # General diff --git a/book/src/developers/protocols/kademlia.md b/book/src/developers/protocols/kademlia.md new file mode 100644 index 000000000..c711b0af4 --- /dev/null +++ b/book/src/developers/protocols/kademlia.md @@ -0,0 +1,50 @@ +# Kademlia + +A protocol for finding content that is distributed amongst peers. + +## Overview + +### You know who should have what +Each node is responsible for having some content. Exactly what +content they have is determined by the ID of the node. Nodes have +peers, and know the IDs of their peers. Hence, nodes know what data +their peers *should* have. + +### You know if you're close + +Each node has a way of determining how close data is to an ID. + +For example, you might say "I only have data that starts with five 0s". +If you see data with four zeros, you recognise that it is close. Closer +than no zeros. +```ignore +# ID +00000... +# Close data +00001... +# Far away data +11111... +``` + +### Nodes prefer similar nodes + +Nodes prefer peers who have data that is close to theirs. Hence +if you are looking for a piece of data, you can look through your +peers, find the closest one and ask them. That peer will have +contacts that are similar, and so you can ask them to check with their peers. + +Hence, network requests can "head in the right direction". + +### Visualization + +Animations of the protocol can be seen [here](https://kelseyc18.github.io/kademlia_vis/basics/1/) + +## Use + +### Full database + +When Trin is full and a new piece of data is to be stored, the content is +stored, and then other data is deleted until the storage is at the targed size. + +This involves repeatedly removing the content with the furthest ID from the node ID until +the database is below the target. \ No newline at end of file diff --git a/book/src/developers/protocols/portal_wire.md b/book/src/developers/protocols/portal_wire.md new file mode 100644 index 000000000..864554db8 --- /dev/null +++ b/book/src/developers/protocols/portal_wire.md @@ -0,0 +1,69 @@ +# Portal wire protocol + +The Portal Wire protocol ([spec](https://github.com/ethereum/portal-network-specs/blob/master/portal-wire-protocol.md)) is a variant of the discovery (Discv5) wire protocol ([spec](https://github.com/ethereum/devp2p/blob/master/discv5/discv5-wire.md)). + +This means that the basic protocol is the same, but there are custom portal-specific messages. + + +## Protocol ID +Nodes identify each other by a protocol ID. + +- `0x50..` Portal Network + - `0x500A` portal state sub-protocol. + - `0x500B` portal history sub-protocol. + - Etc. +- `0x....` Other networks (Ethereum execution, Ethreum consensus chain, others.) + +## Messages +Messages the define the portal sub-protocols are: +- `TALKREQ` (talk requests). These have a request ID. +- `TALKRESP` (talk responses). These refer to the reqest ID being responded to. + +Messages contain data that is an SSZ union. This means that the message contains +one of the possible message content types, and that the type will be specified. +```py +# Python +message = Union[ping, pong, find_nodes, nodes, find_content, content, offer, accept] +``` +## Message encoding + +The SSZ Union encoding means that each component has a selector (`PING 0x00, PONG 0x01, FIND_NODES 0x02, ...`). +That way, different clients on the network can listen to messages on the right protocol +and correctly decode them. + +For example, receiving a message and seeing that the first byte is `0x02` indicates that the +message contains a `FIND_NODES` type of content. + +## Message data + +Each message has specific data that is sent. For example, a `find_content` message component will have the content that is being sought. The details of these can be found in the spec. + +## Additional API exposure + +The above message definitions are sufficient for a Trin node to participate in the network. + +However, as Trin is a JSON-RPC server (serving Ethereum-related requests like `eth_getBlockNumber`) +it also exposes the wire methods. This is not strictly required by the Portal Network specification, +but is very useful. + +Messages that are wire responses are not exposed, as they are not requests. + +## Relationship to JSON-RPC + +The following table shows the wire definition, purpose and how it is exposed for querying via +the JSON-RPC server. + +Recall that each Trin can serve multiple sub-protocols simultaneously. Hence, the +following table is for the History sub-protocol (`0x500A` in Discovery-terms), and +JSON-RPC methods start with `portal_history*`. + +|message|SSZ union message selector|purpose|JSON-RPC| +|-|-|-|-| +|ping|`0x01`|"Are you alive?"|`portal_historyPing`| +|pong|`0x02`|"I'm alive"|None| +|find_nodes|`0x03`|"Give me peers at specific distances x, y & z"|`portal_historyFindNodes`| +|nodes|`0x04`|"Response to findnodes"|None| +|find_content|`0x05`|"I want content x, or peers who might have it"|`portal_historyFindContent`| +|content|`0x06`|"Here is content x, or peers who might have it."|None| +|offer|`0x07`|"I have content x, y & z, would you like any of them?"|`portal_historyOffer`| +|accept|`0x08`|"Yes please, I would like x, y & z"|None| diff --git a/book/src/developers/protocols/ssz.md b/book/src/developers/protocols/ssz.md new file mode 100644 index 000000000..75b3ddc3d --- /dev/null +++ b/book/src/developers/protocols/ssz.md @@ -0,0 +1,66 @@ +# SSZ + +The Simple Serialize (SSZ) protocol ([spec](https://github.com/ethereum/consensus-specs/blob/dev/ssz/simple-serialize.md)) is used to ensure that data sent to peers is interpreted unambiguously. + +It is used in two main ways: +- Encoding: from rich data (struct, enum) to bytes (`Vec`). +- Decoding: from bytes (`Vec`) to rich data (struct, enum). + +The encoded data is not self-describing, so you have to know what sort of data you +are expecting. Hence, the data type descriptions in the Portal Network spec. + +Encoded data can only be interpreted in one way. Additionally, encoded data +can also be used in Merkle proofs efficiently. + +## Types + +The following is a quick overview of major composite types used. See the spec for +basic types (e.g., bits, bools, unsigned integers). + +|Type|Description|Note| +|-|-|-| +|List|Holds variable number of a specified item|Specify max number| +|Vector|Holds specific number of a specified item|Specify number| +|Container|Holds many different specified items| Items are spaced into 32 byte partitions| +|Union|Holds one of many different specified items| Item kind is specified using a prepended selector byte| + +Each type can hold any of the other types, so a Containter can hold a List +and a Union, and the Union can hold another Container, etc. +Anything that is put into one of the types above must itself be SSZ-able + +## Implementations + +The `ssz` crate does a lot of the work by providing `Encode` and `Decode` methods that +can be derived. +```rs,no_run +#[derive(Clone, Debug, Decode, Encode, PartialEq)] +#[ssz(enum_behaviour = "union")] +pub enum HistoryContentKey { + BlockHeaderWithProof(BlockHeader), + BlockBody(BlockBody), + BlockReceipts(BlockReceipts), + EpochAccumulator(EpochAccumulator), +} +``` +The spec defines a content key for the History sub-protocol as: +```py +block_header_key = Container(block_hash: Bytes32) +selector = 0x00 +content_key = selector + SSZ.serialize(block_header_key) +``` +The inclusion of the selector is handled by implementing `serde::Serialize` for +the enum, and including the appending the appropriate selector byte. + +That is, the hex string ready to be serialized into bytes for a block header would be +```sh +# Header, body or receipts +"0x" +# Header specifically +"0x00" +# Serialize to bytes +[0x00, ...] +``` +## Merkle proofs + +The Epoch Accumulator uses SSZ encoding. This allows for Merkle proofs to be made +for arbitrary historical blocks against the accumulator. \ No newline at end of file diff --git a/book/src/developers/protocols/utp.md b/book/src/developers/protocols/utp.md new file mode 100644 index 000000000..a0cd5c402 --- /dev/null +++ b/book/src/developers/protocols/utp.md @@ -0,0 +1,25 @@ +# uTP + +The Dicv5 protocol normally uses UDP packets, however this has some limitations +in packet size. The Portal Network uses Micro Transport Protocol (uTP) +([spec](https://github.com/ethereum/portal-network-specs/blob/master/discv5-utp.md)) +to avoid this problem. + +uTP is similar to the BitTorrent protocol and provides a way to send ordered packets. + +Once two peers have agreed to send data via messages in the Portal Wire protocol (E.g., via +an `OFFER` and `ACCEPT`) sequence, the peers can then open communication on the uTP +sub-protocol. Inside this protocol they can send the data to each other, following the +uTP protocol until the data transfer is complete. + +First, a sub-protocol is used: +- History sub-protocol (arrange data transfer, but don't start sending) +- State sub-protocol (arrange data transfer, but don't start sending) +- ... + +Second, once data transfer is arranged, peers switch to the uTP protocol to start the +data transfer. uTP uses the relevant sub-protocol (E.g., History Discv5 overlay, +State Discv5 overlay) as transport. + +By providing an ID for the content that they are transferring, the two peers +can easily switch from one protocol to the uTP protocol and complete the specified transfer. diff --git a/docs/getting_started.md b/book/src/developers/quick_setup.md similarity index 52% rename from docs/getting_started.md rename to book/src/developers/quick_setup.md index 1d26cbe6e..4236def81 100644 --- a/docs/getting_started.md +++ b/book/src/developers/quick_setup.md @@ -1,8 +1,10 @@ -# Getting Started +# Quick setup + +This is a single page the aims to cover everything required to get Trin running. **Trin is currently in unstable alpha, and should not be used in production. If you run into any bugs while using Trin, please file an Issue!** -**Check out the [Release Notes](/docs/release_notes.md) to see the latest supported features.** +**Check out the `./newsfragments` directory to see the latest changes.** ## Prerequisites - Execution node, either: @@ -54,7 +56,7 @@ cargo run Note: You may also pass environment variable values in the same command as the run command. This is especially useful for setting log levels. ```sh -RUST_LOG=debug cargo run +RUST_LOG=debug cargo run ``` View CLI options: @@ -70,7 +72,7 @@ Run with the `--trusted-provider` as a local execution node (normally runs on `1 Serve portal node web3 access over a different port (such as `8547`) using the `--web3-http-address` flag. The `--web3-transport` for a local node will be over `http` (rather than `ipc`). -``` +```sh RUST_LOG=debug cargo run -- \ --trusted-provider custom \ --trusted-provider-url http://127.0.0.1:8545 \ @@ -93,7 +95,7 @@ cargo run -- --bootnodes default To establish a connection with a specific peer, pass in one or more bootnode ENRs. Pass the ENR as the value for the `--bootnodes` CLI flag. ```sh -cargo run -- --bootnodes +cargo run -- --bootnodes ``` ## Default data directories @@ -164,95 +166,3 @@ nc -U /tmp/trin-jsonrpc.ipc ``` For something in between, you may use `curl` to send requests to the HTTP JSON-RPC endpoint. - -## Using Trin-CLI - -A more detailed description of `trin-cli` is available [here](../trin-cli/README.md). - -We can use Trin-CLI to initiate sending a message from one Trin client to another. - -### Trin-CLI Environment - -- Open a new terminal window and make sure you're in the same directory where Trin is installed. -- Make sure that you've set the required environment variables. - -### View routing table - -Each Trin client uses a routing table to maintain a record of members in the Portal network with whom it can communicate. At startup, your routing table should be empty (unless you've passed in the bootnode ENR's via the `--bootnodes` CLI param). - -View your routing table: - -```sh -cargo run -p trin-cli -- json-rpc discv5_routingTableInfo -``` - -View ENR information about your own Trin client: - -```sh -cargo run -p trin-cli -- json-rpc discv5_nodeInfo -``` - -### Connect to the Portal Network testnet - -You can send a message from the local node to a bootnode using JSON-RPC, automatically adding the bootnode to your routing table. - -Find a [testnet bootnode ENR](https://github.com/ethereum/portal-network-specs/blob/master/testnet.md). - -Send a `PING` to the node on any of the Portal sub-networks (currently, only history and state are supported in Trin). - -```sh -cargo run -p trin-cli -- json-rpc portal_historyPing --params -``` - -After pinging a bootnode, you should be able to see the messages being sent and received in your node's logs. Now you can check your routing table again, where you should see the pinged bootnode (along with other nodes the bootnode shared with you). Congrats! You're now connected to the Portal Network testnet. - -### Encode Content Keys - -Pieces of content (data) on the Portal Network have unique identifiers that we refer to as "content keys". To request a particular piece of content, you will need the corresponding content key. - -The encoding for the content key depends on the kind of content that the key refers to. - -See available content keys (e.g. block header): - -```sh -cargo run -p trin-cli -- encode-key -h -``` - -See arguments for a specific content key: - -```sh -cargo run -p trin-cli -- encode-key block-header -h -``` - -Example: - -```sh -$ cargo run -p trin-cli -- encode-key block-body --block-hash 59834fe81c78b1838745e4ac352e455ec23cb542658cbba91a4337759f5bf3fc -``` - -### Request Content - -Send a `FindContent` message to a Portal Network bootnode. - -```sh -cargo run -p trin-cli -- json-rpc portal_historyFindContent --params , -``` - -### Setting up local metrics reporting - -1. Install Docker. -2. Run Prometheus: `docker run -d -p 9090:9090 -v /absolute/path/to/trin/docs/metrics_config:/etc/prometheus prom/prometheus`. Set the correct absolute path to your copy of Trin's `docs/metrics_config/`. -3. Run Grafana: `docker run -d -p 3000:3000 -e "GF_INSTALL_PLUGINS=yesoreyeram-infinity-datasource" grafana/grafana:latest`. -4. Start your Trin process with `--enable-metrics-with-url 127.0.0.1:9100 --web3-transport http`. - - The `--enable-metrics-with-url` parameter is the address that Trin exports metrics to, and should be equal to the port to which your Prometheus server is targeting at the bottom of `metrics_config/prometheus.yml`. - - The `--web-transport http` will allow Grafana to request routing table information from Trin via JSON-RPC over HTTP. -5. From the root of the Trin repo, run `cargo run -p trin-cli -- create-dashboard`. If you used different ports than detailed in the above steps, or you are not using docker, then this command's defaults will not work. Run the command with the `-h` flag to see how to provide non-default addresses or credentials. -6. Upon successful dashboard creation, navigate to the dashboard URL that the `create-dashboard` outputs. Use `admin`/`admin` to login. - -## Gotchas - -- If `create-dashboard` fails with an error, the most likely reason is that it has already been run. From within the Grafana UI, delete the "json-rpc" and "prometheus" datasources and the "trin" dashboard and re-run the command. - -- There is a limit on concurrent connections given by the threadpool. At last - doc update, that number was 2, but will surely change. If you leave - connections open, then new connections will block. diff --git a/book/src/introduction/README.md b/book/src/introduction/README.md new file mode 100644 index 000000000..008c60e62 --- /dev/null +++ b/book/src/introduction/README.md @@ -0,0 +1,31 @@ +# Introduction + +> This book is about Trin, which is software used to interact with the Ethereum protocol +via the Portal Network. + +Trin is a Portal network client which acts as a json-rpc server with: +- Nearly instant sync +- Low CPU & storage usage + +The Ethereum protocol will allow full nodes to forget old data in an +likely future upgrade. Portal network nodes can supply users with that data. + +Trin makes it possible to access Ethereum with less computer resources +than a regular full node. It does this by spreading data amongst peers. +```mermaid +flowchart TB + subgraph Full node data: on one computer + full[Regular full node] + end + subgraph Full node data: spread amongst computers + p1[Portal node] + p2[Portal node] + p3[Portal node] + p1 <--> p2 + p2 <--> p3 + p1 <--> p3 + + end + +``` +🏗 The sections, content and links of this book are subject to change. \ No newline at end of file diff --git a/book/src/introduction/portal_network.md b/book/src/introduction/portal_network.md new file mode 100644 index 000000000..0eae7b31f --- /dev/null +++ b/book/src/introduction/portal_network.md @@ -0,0 +1,79 @@ +# Portal Network + +The portal network is a response to two needs. Users should have the ability to: +- Access Ethereum using peers (not providers) from 'small' computers. + - An old nice-to-have. +- Access historical data once "history expiry" upgrade goes live + - A likely future need. + +## What is "history expiry" + +[EIP-4444: Bound Historical Data in Execution Clients](https://eips.ethereum.org/EIPS/eip-4444) +is an upgrade that seeks to limit the costs of participating in the network. It does this +by allowing by clearing data older than 1 year. + +## How the Portal network works + +### Small users working together +```mermaid +graph TD; + A & B & C & D & E & F & G & H & I --> id5[complete network data]; +``` +The portal network consists of many small nodes that each contribute to the whole. +Each node is allocated a specific part of the network to obtain from peers and +serve back to the network. + +The portal network splits data in to different types (e.g., blocks vs new transactions). +Each distinct type is effectively a new network. A portal client such as Trin can be used to +operate on each/all of these different sub-protocols. + +### Dedicated sub-protocols +Users can elect to be part of some sub-networks: +```mermaid +graph TD; + A & B & C --> id1[(History)] + D & E & F --> id2[(State)] + A & G & I --> id4[(Indices)] + C & E & H --> id3[(Txs)] + id1[(History)] & id2[(State)] & id3[(Txs)] & id4[(Indices)] --> id5[complete network data]; +``` + +### Peer referrals based on names +Nodes make requests to each other for data. If they don't have the data, they look at their peers and +suggest one that is most likely to. + +```mermaid +graph LR; + id1[A requests data] -.-> D & E + D -.-> F & G + id1[A requests data] ---> B ---> id2[C has data] + E -.-> F + B & G -.-> I +``` + +### Standard requests +Node each have a name, and only hold data that is similar to that name. Peers can tell who +is likely to have what data based on these names. +```mermaid +sequenceDiagram + Alice-->>Bob: Looking for data 0xabc + Bob-->>Alice: Sorry, but try Charlie (gives address) + Alice-->>Charlie: Looking for data 0xabc + Charlie-->>Alice: I have it, do you want? + Alice-->>Charlie: Yes + Charlie->>Alice: Data 0xabc +``` + +### Tunable resources + +Nodes keep content that is similar to their name. That similarity radius can be made +larger to voluntarily hold more data. + +```mermaid +graph TD; + id1[(Alice with big hard drive)] + id2[(Bob)] + id4[(Charlie, medium)] +``` + +In addition to Trin, other portal clients are in development and participate in the same network. diff --git a/book/src/users/README.md b/book/src/users/README.md new file mode 100644 index 000000000..7adfeb1e6 --- /dev/null +++ b/book/src/users/README.md @@ -0,0 +1,56 @@ +# Users + +The following are users who are well-suited to using Trin. + +## Laptop wallet user + +A user has a laptop that frequently is turned off. When +they want to transact, they can turn on Trin and connect their +wallet to it. + +*Benefit*: Wallet use without reliance on third party wallet APIs. + +## Desktop wallet user + +A user has a desktop that usually on, but most of the disk is used for other things. +When they want to transact, their wallet is already connected to their portal node. + +*Benefit*: Wallet use without reliance on third party wallet APIs. Contributes to +network health without using entire disk. + +## Protocol experimentation + +A researcher looking to explore the Ethereum protocol, testing out +specific aspects and perhaps making experimental changes to the protocol. + +*Benefit*: Spin up a node and play around quickly and with low cost. + +## Single board computer hobbyist + +A raspberry pi 3, or similarly-sized computer with could contribute +to network health. + +Currently a raspberry pi 4 can run a full node, with consensus +and execution clients, however this is a bit tight and requires a ~2TB SSD. + +*Benefit*: Learn about Ethereum, get node access and provide the +network with additional robustness. + +## Mobile user + +Trin is not currently configured to run on mobile, however this is plausibly +a viable and interesting use case. A trin node could run as a background +task with configurable limits on disk, CPU and bandwidth use. + +*Benefit*: Wallet use without reliance on third party wallet APIs. Contributes to +network health. + +## Unsuitable users + +There are situations where Trin is estimated to not be a good node choice: +- Time-critical chain tip data. Likely that data distribution may not be fast enough for these + use cases, however testing may show otherwise. + - Consensus participation. Beacon chain staking with a Consensus client with Portal Network node as Execution client. + - Block builder. Serving blocks to beacon chain validator nodes via MEV-boost +- Data analysis requiring state at historical blocks. Trin is not an archive node and does not + expose `trace_`* or` debug_`* endpoints. diff --git a/book/src/users/faq.md b/book/src/users/faq.md new file mode 100644 index 000000000..6042994e8 --- /dev/null +++ b/book/src/users/faq.md @@ -0,0 +1,23 @@ +# FAQ + +The following are frequently asked questions or topics that may be of interest +to users. + +New submissions are welcome, if you had a question and found the answer elsewhere, +submit a pull request or an issue describing the question and the answer. +These questions will appear in searches in the book and in the trin repository. + +## Can I rely on Trin to interact with Ethereum? + +Not at present. Trin and the Portal Network more broadly are under active +development. + +## Can Trin be used with a VPN? + +Trin should be compatible with VPN use, but if you experience difficulty connecting to the +network we recommend disabling your VPN. + +## Can Trin be used over TOR? + +No, the Trin uses uTP, which is not supported in the TOR protocol. + diff --git a/book/src/users/installation.md b/book/src/users/installation.md new file mode 100644 index 000000000..f0c8972b1 --- /dev/null +++ b/book/src/users/installation.md @@ -0,0 +1,4 @@ +# Installation + +The following are guides for installation on different platforms. +Other methods may be used as appropriate. \ No newline at end of file diff --git a/docs/ubuntu_guide.md b/book/src/users/installation/linux.md similarity index 97% rename from docs/ubuntu_guide.md rename to book/src/users/installation/linux.md index 0de8bfd12..2fccbb3f4 100644 --- a/docs/ubuntu_guide.md +++ b/book/src/users/installation/linux.md @@ -1,15 +1,17 @@ -# Trin on Ubuntu: Setup guide +# Linux + +## Trin on Ubuntu These steps are for setting up a Trin node as a service on Ubuntu. ## Installation -``` +```sh $ sudo apt install libssl-dev librocksdb-dev libclang-dev pkg-config build-essentials ``` Install Trin: > Tip: If you intend to submit code changes to trin, first fork the repo and then clone that url. -``` +```sh $ cd ~ $ git clone https://github.com/ethereum/trin.git $ cd trin @@ -17,32 +19,32 @@ $ cargo build --workspace --release ``` Now the executable is located in `trin/target/release` and can be called by systemd. Move that binary to the standard location for binaries: -``` +```sh $ sudo cp -a ~/trin/target/release/trin /usr/local/bin/trin ``` > Tip: If you make changes to these steps, keep a record for future reference. Make a new user for the Trin service: -``` +```sh $ sudo useradd --no-create-home --shell /bin/false trin ``` Make a directory for Trin data and give the Trin user permission to access it: -``` +```sh $ sudo mkdir -p /var/lib/trin $ sudo chown -R trin:trin /var/lib/trin ``` Check that the binary works: -``` +```sh $ /usr/local/bin/trin --version ``` Example response: -``` +```sh > Launching trin > trin 0.0.1 ``` ## Configuration Before setting up the service, look at the flags that can be set when starting Trin: -``` +```sh $ /usr/local/bin/trin --help ``` Some selected flags are described below. @@ -74,14 +76,14 @@ the transport must also be changed to http from the default (ipc). To pick a new port, select a number in the range 1024–49151 and test if it is in use (no response indicates it is ok to use): -``` +```sh $ sudo ss -tulpn | grep ':9009' ``` ## Create the node service Create a service to run the Trin node: -``` +```sh $ sudo nano /etc/systemd/system/trin.service ``` Paste the following, modifying flags as appropriate: @@ -119,7 +121,7 @@ the `trin.service` file: $ sudo systemctl edit trin ``` Open the file: -``` +```sh $ sudo nano /etc/systemd/system/trin.service.d/override.conf ``` Paste the following, replace the Infura ID with your own. @@ -136,11 +138,11 @@ Environment="TRIN_DATA_PATH=/var/lib/trin" ## Configure firewall Ensure that the discovery port (custom or default 9000) is not blocked by the firewall: -``` +```sh $ sudo ufw allow 9009 ``` Check the configuration: -``` +```sh $ sudo ufw status numbered ``` > Tip: use `sudo ufw delete ` to remove a particular rule. @@ -148,24 +150,24 @@ $ sudo ufw status numbered ## Start the service Start the Trin node service and enable it to start on reboot: -``` +```sh $ sudo systemctl daemon-reload $ sudo systemctl start trin $ sudo systemctl status trin $ sudo systemctl enable trin ``` Follow Trin's logs: -``` +```sh $ sudo journalctl -fu trin ``` CTRL-C to to exit. Logs can be searched for an "exact phrase": -``` +```sh $ grep "trin" /var/log/syslog | grep "exact phrase" ``` To stop Trin and disable it from starting on reboot: -``` +```sh $ sudo systemctl stop trin $ sudo systemctl disable trin ``` @@ -176,7 +178,7 @@ $ sudo systemctl disable trin See [getting started](getting_started.md) notes for more tips including setting environment variables during testing. -``` +```sh $ cargo test --workspace $ cargo run -- --discovery-port 9009 \ --web3-http-address 127.0.0.1:8547 \ @@ -189,7 +191,7 @@ $ cargo run -- --discovery-port 9009 \ To get upstream updates, sync your fork with upstream on Github. To move any changes from the codebase to the service, rebuild and move the binary as before: -``` +```sh $ git pull $ cd trin $ cargo build --workspace --release @@ -197,7 +199,7 @@ $ sudo systemctl stop trin $ sudo cp -a ~/trin/target/release/trin /usr/local/bin/trin ``` Restart the service to use the new binary: -``` +```sh $ sudo systemctl daemon-reload $ sudo systemctl start trin ``` diff --git a/book/src/users/installation/mac_os.md b/book/src/users/installation/mac_os.md new file mode 100644 index 000000000..5e4155826 --- /dev/null +++ b/book/src/users/installation/mac_os.md @@ -0,0 +1,9 @@ +# Mac Os + +Clone trin and run. +```sh +$ cd ~ +$ git clone https://github.com/ethereum/trin.git +$ cd trin +$ cargo run -p trin --release +``` diff --git a/book/src/users/installation/raspberry_pi.md b/book/src/users/installation/raspberry_pi.md new file mode 100644 index 000000000..27e9a457f --- /dev/null +++ b/book/src/users/installation/raspberry_pi.md @@ -0,0 +1,3 @@ +# Raspberry Pi + +Not yet attempted, but experiments are encouraged. \ No newline at end of file diff --git a/book/src/users/installation/windows.md b/book/src/users/installation/windows.md new file mode 100644 index 000000000..a0f887a6f --- /dev/null +++ b/book/src/users/installation/windows.md @@ -0,0 +1,3 @@ +# Windows + +Future support is planned once Trin development is stable. \ No newline at end of file diff --git a/book/src/users/monitoring.md b/book/src/users/monitoring.md new file mode 100644 index 000000000..f0a33b931 --- /dev/null +++ b/book/src/users/monitoring.md @@ -0,0 +1,39 @@ +# Monitoring + +Once Trin is running, the following may be useful + +## Logs + +If errors are encountered, they will be logged to the console in which +Trin was started. + +Be aware that The `RUST_LOG` variable allows for control of what logs are visible. + +- `RUST_LOG=info cargo run -p trin` +- `RUST_LOG=debug cargo run -p trin` + +If started as a systemd service logs will be visible with: +```sh +journalctl -fu .service +``` + +## Disk use + +The following locations are where trin stores data by default: +- Mac Os: `~/Library/Application Support/trin` +- Unix-like: `$HOME/.local/share/trin` +```sh +cd /path/to/data +du -sh +``` + +## CPU and memory use + +`htop` can be used to see the CPU and memory used by trin + +- Ubuntu: `sudo apt install htop` +- Mac Os: `brew install htop` + +```sh +htop +``` diff --git a/book/src/users/problems.md b/book/src/users/problems.md new file mode 100644 index 000000000..039909954 --- /dev/null +++ b/book/src/users/problems.md @@ -0,0 +1,17 @@ +# Problems + +If you encounter a problem, keep in mind that Trin is under active development. +Some issues may be lower on the priority list. + +## Search for more information + +Try searching: +- This book +- The Trin repository issues + +## Document the problem + +If the problem seems new, [raise an issue](https://github.com/ethereum/trin/issues) +in the Trin repository. +Try to record the problem details and include those in the issue. +Include details for how someone else might reproduce the problem you have. diff --git a/book/src/users/requirements.md b/book/src/users/requirements.md new file mode 100644 index 000000000..98d3f7974 --- /dev/null +++ b/book/src/users/requirements.md @@ -0,0 +1,25 @@ +# Requirements + +## Hardware + +Suitable: +- Processor: x86 or Arm based. Minimum spec TBD. +- RAM: Minimum TBD. +- Disk: Any. + +Testing and reports of performance on the following are welcome: +- RISC-V based processor. +- Resource constrained (CPU/RAM) + + +## Software + +- Unix based operating system +- Rust installation (minimum `v1.66`) + +## Network + +Testing/reports of low-bandwidth network are welcome. + +Trin should be compatible with VPN use, but if you experience difficulty +connecting to the network we recommend disabling your VPN. \ No newline at end of file diff --git a/book/src/users/startup.md b/book/src/users/startup.md new file mode 100644 index 000000000..faa510536 --- /dev/null +++ b/book/src/users/startup.md @@ -0,0 +1,68 @@ +# Startup + +Configuration occurs at startup via flags: + +```sh +cargo run -p trin -- --flag1 value --flag2 value +``` +Backslashes allow flags to be on new lines for easier reading. +```sh +cargo run -p trin -- \ + --flag1 value \ + --flag2 value \ + --flag3 value1,value2 \ +``` + +## Flags + +For the most up to date flags run: + +```sh +cargo run -p trin -- --help +``` +### Bootnodes + +To quickly connect to the testnet use the `--bootnodes default` flag. + +### Control disk use + +Trin can be tuned to control how much disk space is used: + +|Selected size|Data acess|Network contribution| +|-|-|-| +|Smaller|Slower|Less| +|Larger|Faster|More| + +See the `--kb` flag. + +### Sub-Protocols + +Trin can connect to different sub-protocols to have access to +different types of data. One more more can be selected, but be aware +that not all sub-protocols are ready: + +- Execution State Network +- Execution History Network +- Execution Transaction Gossip Network +- Execution Canonical Indices Network + +### Networking configuration + +Optionally one can specify Trin's network proprties: +- What sort of network connections (HTPP vs IPC) +- Port answering Ethereum-related queries +- Port for connecting to other nodes + +These types of flags have defaults. + +### Connect to a full node + +During development of the Portal Network, some parts of the network +are not yet available. A connection to a full (Execution) node allows +Trin to use that node when necessary. + +For example: If the state network is not live, state data requests +to Trin will be forwarded to the full node. + +If a node is not provided, Trin requires connection to Infura, and will +ask for an Infura key upon startup. See the `--trusted-provider` flag for more. \ No newline at end of file diff --git a/book/src/users/use/README.md b/book/src/users/use/README.md new file mode 100644 index 000000000..91fdb5c62 --- /dev/null +++ b/book/src/users/use/README.md @@ -0,0 +1,39 @@ +# Use + +Once Trin is running, it will be serving Ethereum data in response to requests. +This can be accessed by other programs, such as a wallet in a web browser. + +Once Trin is running, another program will be able to communicate with Trin as it +would any other Ethereum node. + +Additionally, commands can be made in the terminal to test functionality. +See sections below for more detail. + +## Wallet connection + +Open the wallet and look for options to configure "node/rpc/provider". + +Create a custom connection by providing the following: +```sh +http://127.0.0.1:8545 +``` +Which specifies: +- HTTP protocol (rather than IPC) +- Localhost (127.0.0.1), (internal rather than an external address) +- Port (8545 by default) + +Note that Ethereum mainnet has a `ChainID` of `1`. + +## Access from different computer + +If Trin is started on `host` computer by `user`, serving data over HTTP `port` +then the following command can be issued on another computer to send requests to Trin +and receive responses: +```sh +ssh -N -L :127.0.0.1: @ +``` +For example: +```sh +ssh -N -L 8545:127.0.0.1:8545 username@mycomputer +``` +Accessing Trin from another computer using IPC is not covered here. diff --git a/book/src/users/use/ethereum_data.md b/book/src/users/use/ethereum_data.md new file mode 100644 index 000000000..ac2266d86 --- /dev/null +++ b/book/src/users/use/ethereum_data.md @@ -0,0 +1,20 @@ +# Ethereum data + +Trin is designed to eventually serve the JSON-RPC methods that an Ethereum full node would +provide. This includes methods the start with the `eth_` namespace. + +Here is an example of making an `eth_blockNumber` request to a node serving over HTTP to get +the latest block number. +```json +{"jsonrpc": "2.0", "method": "eth_blockNumber", "params": [], "id":1} +``` +## HTTP + +```sh +curl -X POST -H "Content-Type: application/json" -d '{"jsonrpc": "2.0", "method": "eth_blockNumber", "params": [], "id":1}' localhost:8545 | jq +``` +## IPC + +```sh +echo '{"jsonrpc":"2.0","method":"eth_blockNumber","params":[],"id":1}' | nc -U /tmp/trin-jsonrpc.ipc | jq +``` diff --git a/book/src/users/use/making_queries.md b/book/src/users/use/making_queries.md new file mode 100644 index 000000000..203d051ef --- /dev/null +++ b/book/src/users/use/making_queries.md @@ -0,0 +1,46 @@ +# Making queries + +If you want to manually query trin, the following patterns can be used, depending on whether +Trin was started with `--web3-transport` as `http` or `ipc`. + +## Query form +A query for JSON-RPC has the following form for a call to `"methodname"` that accepts two +parameters: `parameter_one` and `parameter_two`. + +Query: +```json +{ + "jsonrpc": "2.0", + "method": "", + "params": ["", ""], + "id":1 +} +``` +Usually passed on one line: +```json +{"jsonrpc":"2.0","method":"","params":["", ""],"id":1} +``` + +## HTTP transport + +Command for `query` (above) to HTTP server on `port`: +```sh +curl -X POST -H "Content-Type: application/json" -d '' localhost: | jq +``` +## IPC transport + +Command for `query` (above) to IPC server with socket file located at `/path/to/ipc`: +```sh +echo '' | nc -U | jq +``` + +## Response + +If the data is not in the network the following response is expected: +```json +{ + "jsonrpc": "2.0", + "id": 1, + "result": "0x" +} +``` diff --git a/book/src/users/use/portal_network_data.md b/book/src/users/use/portal_network_data.md new file mode 100644 index 000000000..6acd0cbb7 --- /dev/null +++ b/book/src/users/use/portal_network_data.md @@ -0,0 +1,37 @@ +# Portal network data + +There are methods for requesting data that are specific to: +- Each sub-protocol (history, state, etc.) + - `portal_history*` + - `portal_state*` +- Discovery protocol + - `discv5_*` + +See the Portal Network JSON-RPC specification +[here](https://github.com/ethereum/portal-network-specs/tree/master/jsonrpc) +for a comprehensive and interactive view of specific methods available. + +## Designing a Query +One can identify data by its "content key". The following queries ask Trin to speak with +peers, looking for a particular piece of data. + +Let us request the block body for block 16624561 +- Block hash: `0xd27f5e55d88b447788667b3d72cca66b7c944160f68f0a62aaf02aa7e4b2af17` +- Selector for a block body: `0x01` (defined in Portal Network spec under the History sub-protocol). +- Content key: `0x01d27f5e55d88b447788667b3d72cca66b7c944160f68f0a62aaf02aa7e4b2af17` +- Request: `portal_historyRecursiveFindContent`, which accepts a content key as a parameter + +```json +{"jsonrpc":"2.0","method":"portal_historyRecursiveFindContent","params":["0x01d27f5e55d88b447788667b3d72cca66b7c944160f68f0a62aaf02aa7e4b2af17"],"id":1} +``` +## HTTP + +```sh +curl -X POST -H 'Content-Type: application/json' -d '{"jsonrpc":"2.0","method":"portal_historyRecursiveFindContent","params":["0x01d27f5e55d88b447788667b3d72cca66b7c944160f68f0a62aaf02aa7e4b2af17"],"id":1}' http://localhost:8545 | jq +``` + +## IPC + +```sh +echo '{"jsonrpc":"2.0","method":"portal_historyRecursiveFindContent","params":["0x01d27f5e55d88b447788667b3d72cca66b7c944160f68f0a62aaf02aa7e4b2af17"],"id":1}' | nc -U /tmp/trin-jsonrpc.ipc | jq +``` \ No newline at end of file diff --git a/docs/contributing.md b/docs/contributing.md index 0190452d9..60c2b4fb2 100644 --- a/docs/contributing.md +++ b/docs/contributing.md @@ -17,9 +17,9 @@ These guidelines are heavily influenced by the [Snake-Charmer Tactical Manual](h - All logging should be done with the `log` library and not `println!()` statements. - Appropriate log levels (`debug`, `warn`, `info`, etc.) should be used with respect to their content. - Log statements should be declarative, useful, succinct and formatted for readability. - - BAD: `Oct 25 23:42:11.079 DEBUG trin_core::portalnet::events: Got discv5 event TalkRequest(TalkRequest { id: RequestId([226, 151, 109, 239, 115, 223, 116, 109]), node_address: NodeAddress { socket_addr: 127.0.0.1:4568, node_id: NodeId { raw: [5, 208, 240, 167, 153, 116, 216, 224, 160, 101, 80, 229, 154, 206, 113, 239, 182, 109, 181, 137, 16, 96, 251, 63, 85, 223, 235, 208, 3, 242, 175, 11] } }, protocol: [115, 116, 97, 116, 101], body: [1, 1, 0, 0, 0, 0, 0, 0, 0, 12, 0, 0, 0, 1, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0], sender: Some(UnboundedSender { chan: Tx { inner: Chan { tx: Tx { block_tail: 0x55c4fe611290, tail_position: 1 }, semaphore: 0, rx_waker: AtomicWaker, tx_count: 2, rx_fields: "..." } } }) }) + - BAD: `Oct 25 23:42:11.079 DEBUG trin_core::portalnet::events: Got discv5 event TalkRequest(TalkRequest { id: RequestId([226, 151, 109, 239, 115, 223, 116, 109]), node_address: NodeAddress { socket_addr: 127.0.0.1:4568, node_id: NodeId { raw: [5, 208, 240, 167, 153, 116, 216, 224, 160, 101, 80, 229, 154, 206, 113, 239, 182, 109, 181, 137, 16, 96, 251, 63, 85, 223, 235, 208, 3, 242, 175, 11] } }, protocol: [115, 116, 97, 116, 101], body: [1, 1, 0, 0, 0, 0, 0, 0, 0, 12, 0, 0, 0, 1, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 36, 0, 0, 0, 0, 0, 0, 0], sender: Some(UnboundedSender { chan: Tx { inner: Chan { tx: Tx { block_tail: 0x55c4fe611290, tail_position: 1 }, semaphore: 0, rx_waker: AtomicWaker, tx_count: 2, rx_fields: "..." } } }) }) ` - - GOOD: `Oct 25 23:43:02.373 DEBUG trin_core::portalnet::overlay: Received Ping(enr_seq=1, radius=18446744073709551615) + - GOOD: `Oct 25 23:43:02.373 DEBUG trin_core::portalnet::overlay: Received Ping(enr_seq=1, radius=18446744073709551615) ` ## Pull Requests @@ -112,7 +112,7 @@ Every team member is responsible for reviewing code. The designations :speech_ba - a specific concern, without a satisfactory solution in mind - a specific concern with a satisfactory solution provided, but *alternative* solutions **may** be unacceptable - any concern with significant subtleties - + Contributors **should** react to reviews as follows: - :x: if *any* review is marked as "Request changes": - make changes and/or request clarification @@ -183,7 +183,7 @@ still need to add their remote explicitly. ## Releases -- When cutting a new release, the versions of every crate in this repo should be updated simultaneously to the new version. +- When cutting a new release, the versions of every crate in this repo should be updated simultaneously to the new version. ### Versioning @@ -255,3 +255,4 @@ Any datatype of significance **should** have an accompanying comment briefly des - Minimize the amount of `.clone()`s used. Cloning can be a useful mechanism, but should be used with discretion. When leaned upon excessively to [satisfy the borrow checker](https://rust-unofficial.github.io/patterns/anti_patterns/borrow_clone.html) it can lead to unintended consequences. - Use interpolated string formatting when possible. - Do `format!("words: {var:?}")` not `format!("words: {:?}", var)` + diff --git a/newsfragments/584.doc.md b/newsfragments/584.doc.md new file mode 100644 index 000000000..2990c3949 --- /dev/null +++ b/newsfragments/584.doc.md @@ -0,0 +1 @@ +Move user and developer documentation to mdbook diff --git a/trin-cli/README.md b/trin-cli/README.md index 913541018..c85970fd0 100644 --- a/trin-cli/README.md +++ b/trin-cli/README.md @@ -28,3 +28,84 @@ $ cargo run -p trin-cli -- json-rpc discv5_routingTableInfo --ipc /tmp/trin-json ### To use trin-cli to encode content keys: Check out the `Encode Content Keys` section of the [Getting Started docs](../docs/getting_started.md#encode-content-keys). + +### View routing table + +Each Trin client uses a routing table to maintain a record of members in the Portal network with whom it can communicate. At startup, your routing table should be empty (unless you've passed in the bootnode ENR's via the `--bootnodes` CLI param). + +View your routing table: + +```sh +cargo run -p trin-cli -- json-rpc discv5_routingTableInfo +``` + +View ENR information about your own Trin client: + +```sh +cargo run -p trin-cli -- json-rpc discv5_nodeInfo +``` + +### Connect to the Portal Network testnet + +You can send a message from the local node to a bootnode using JSON-RPC, automatically adding the bootnode to your routing table. + +Find a [testnet bootnode ENR](https://github.com/ethereum/portal-network-specs/blob/master/testnet.md). + +Send a `PING` to the node on any of the Portal sub-networks (currently, only history and state are supported in Trin). + +```sh +cargo run -p trin-cli -- json-rpc portal_historyPing --params +``` + +After pinging a bootnode, you should be able to see the messages being sent and received in your node's logs. Now you can check your routing table again, where you should see the pinged bootnode (along with other nodes the bootnode shared with you). Congrats! You're now connected to the Portal Network testnet. + +### Encode Content Keys + +Pieces of content (data) on the Portal Network have unique identifiers that we refer to as "content keys". To request a particular piece of content, you will need the corresponding content key. + +The encoding for the content key depends on the kind of content that the key refers to. + +See available content keys (e.g. block header): + +```sh +cargo run -p trin-cli -- encode-key -h +``` + +See arguments for a specific content key: + +```sh +cargo run -p trin-cli -- encode-key block-header -h +``` + +Example: + +```sh +$ cargo run -p trin-cli -- encode-key block-body --block-hash 59834fe81c78b1838745e4ac352e455ec23cb542658cbba91a4337759f5bf3fc +``` + +### Request Content + +Send a `FindContent` message to a Portal Network bootnode. + +```sh +cargo run -p trin-cli -- json-rpc portal_historyFindContent --params , +``` + +### Setting up local metrics reporting + +1. Install Docker. +2. Run Prometheus: `docker run -d -p 9090:9090 -v /absolute/path/to/trin/docs/metrics_config:/etc/prometheus prom/prometheus`. Set the correct absolute path to your copy of Trin's `docs/metrics_config/`. +3. Run Grafana: `docker run -d -p 3000:3000 -e "GF_INSTALL_PLUGINS=yesoreyeram-infinity-datasource" grafana/grafana:latest`. +4. Start your Trin process with `--enable-metrics-with-url 127.0.0.1:9100 --web3-transport http`. + - The `--enable-metrics-with-url` parameter is the address that Trin exports metrics to, and should be equal to the port to which your Prometheus server is targeting at the bottom of `metrics_config/prometheus.yml`. + - The `--web-transport http` will allow Grafana to request routing table information from Trin via JSON-RPC over HTTP. +5. From the root of the Trin repo, run `cargo run -p trin-cli -- create-dashboard`. If you used different ports than detailed in the above steps, or you are not using docker, then this command's defaults will not work. Run the command with the `-h` flag to see how to provide non-default addresses or credentials. +6. Upon successful dashboard creation, navigate to the dashboard URL that the `create-dashboard` outputs. Use `admin`/`admin` to login. + +## Gotchas + +- If `create-dashboard` fails with an error, the most likely reason is that it has already been run. From within the Grafana UI, delete the "json-rpc" and "prometheus" datasources and the "trin" dashboard and re-run the command. + +- There is a limit on concurrent connections given by the threadpool. At last + doc update, that number was 2, but will surely change. If you leave + connections open, then new connections will block.