diff --git a/.circleci/config.yml b/.circleci/config.yml index b911cafed1..56b1c9d808 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -217,10 +217,12 @@ commands: - restore_libsodium - restore_cache: keys: - - 'go-mod-1-14-7-v2-{{ arch }}-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}' + - 'go-mod-1.17.9-v3-{{ arch }}-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}' - restore_cache: keys: - - 'go-cache-v2-{{ .Environment.CIRCLE_STAGE }}-' + - 'go-cache-v3-{{ arch }}-{{ .Branch }}-{{ .Revision }}' + - 'go-cache-v3-{{ arch }}-{{ .Branch }}-' + - 'go-cache-v3-{{ arch }}-' - run: name: scripts/travis/build.sh --make_debug command: | @@ -233,11 +235,11 @@ commands: scripts/travis/build.sh --make_debug - cache_libsodium - save_cache: - key: 'go-mod-1-14-7-v2-{{ arch }}-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}' + key: 'go-mod-1.17.9-v3-{{ arch }}-{{ checksum "go.mod" }}-{{ checksum "go.sum" }}' paths: - << parameters.build_dir >>/go/pkg/mod - save_cache: - key: 'go-cache-v2-{{ .Environment.CIRCLE_STAGE }}-{{ .Environment.CIRCLE_BUILD_NUM }}' + key: 'go-cache-v3-{{ arch }}-{{ .Branch }}-{{ .Revision }}' paths: - tmp/go-cache - persist_to_workspace: @@ -257,7 +259,7 @@ commands: mkdir -p tmp find crypto/libsodium-fork -type f -exec openssl md5 "{}" + > tmp/libsodium.md5 - save_cache: - key: 'libsodium-fork-v2-{{ .Environment.CIRCLE_STAGE }}-{{ checksum "tmp/libsodium.md5" }}' + key: 'libsodium-fork-v2-{{ arch }}-{{ checksum "tmp/libsodium.md5" }}' paths: - crypto/libs @@ -271,7 +273,7 @@ commands: find crypto/libsodium-fork -type f -exec openssl md5 "{}" + > tmp/libsodium.md5 - restore_cache: keys: - - 'libsodium-fork-v2-{{ .Environment.CIRCLE_STAGE }}-{{ checksum "tmp/libsodium.md5" }}' + - 'libsodium-fork-v2-{{ arch }}-{{ checksum "tmp/libsodium.md5" }}' generic_test: description: Run build tests from build workspace, for re-use by diferent architectures @@ -301,7 +303,9 @@ commands: touch << parameters.result_path >>/<< parameters.result_subdir >>/${CIRCLE_NODE_INDEX}/testresults.json - restore_cache: keys: - - 'go-cache-v2-{{ .Environment.CIRCLE_STAGE }}-' + - 'go-cache-v3-{{ arch }}-{{ .Branch }}-{{ .Revision }}' + - 'go-cache-v3-{{ arch }}-{{ .Branch }}-' + - 'go-cache-v3-{{ arch }}-' - run: name: Run build tests no_output_timeout: << parameters.no_output_timeout >> @@ -333,10 +337,6 @@ commands: root: << parameters.result_path >> paths: - << parameters.result_subdir >> - - save_cache: - key: 'go-cache-v2-{{ .Environment.CIRCLE_STAGE }}-{{ .Environment.CIRCLE_BUILD_NUM }}' - paths: - - tmp/go-cache upload_coverage: description: Collect coverage reports and upload them diff --git a/cmd/goal/account.go b/cmd/goal/account.go index f6e609a5a4..c78ed0872d 100644 --- a/cmd/goal/account.go +++ b/cmd/goal/account.go @@ -25,6 +25,7 @@ import ( "path/filepath" "sort" "strings" + "time" "github.com/spf13/cobra" @@ -825,7 +826,7 @@ var changeOnlineCmd = &cobra.Command{ reportErrorf(err.Error()) } err = changeAccountOnlineStatus( - accountAddress, part, online, statusChangeTxFile, walletName, + accountAddress, online, statusChangeTxFile, walletName, firstTxRound, lastTxRound, transactionFee, scLeaseBytes(cmd), dataDir, client, ) if err != nil { @@ -834,12 +835,16 @@ var changeOnlineCmd = &cobra.Command{ }, } -func changeAccountOnlineStatus(acct string, part *algodAcct.Participation, goOnline bool, txFile string, wallet string, firstTxRound, lastTxRound, fee uint64, leaseBytes [32]byte, dataDir string, client libgoal.Client) error { +func changeAccountOnlineStatus( + acct string, goOnline bool, txFile string, wallet string, + firstTxRound, lastTxRound, fee uint64, leaseBytes [32]byte, + dataDir string, client libgoal.Client, +) error { // Generate an unsigned online/offline tx var utx transactions.Transaction var err error if goOnline { - utx, err = client.MakeUnsignedGoOnlineTx(acct, part, firstTxRound, lastTxRound, fee, leaseBytes) + utx, err = client.MakeUnsignedGoOnlineTx(acct, firstTxRound, lastTxRound, fee, leaseBytes) } else { utx, err = client.MakeUnsignedGoOfflineTx(acct, firstTxRound, lastTxRound, fee, leaseBytes) } @@ -870,8 +875,8 @@ func changeAccountOnlineStatus(acct string, part *algodAcct.Participation, goOnl var addParticipationKeyCmd = &cobra.Command{ Use: "addpartkey", - Short: "Generate a participation key for the specified account", - Long: `Generate a participation key for the specified account. This participation key can then be used for going online and participating in consensus.`, + Short: "Generate and install participation key for the specified account", + Long: `Generate and install participation key for the specified account. This participation key can then be used for going online and participating in consensus.`, Args: validateNoPosArgsFn, Run: func(cmd *cobra.Command, args []string) { dataDir := ensureSingleDataDir() @@ -886,8 +891,9 @@ var addParticipationKeyCmd = &cobra.Command{ reportInfof("Please stand by while generating keys. This might take a few minutes...") var err error + var part algodAcct.Participation participationGen := func() { - _, _, err = client.GenParticipationKeysTo(accountAddress, roundFirstValid, roundLastValid, keyDilution, partKeyOutDir) + part, _, err = client.GenParticipationKeysTo(accountAddress, roundFirstValid, roundLastValid, keyDilution, partKeyOutDir) } util.RunFuncWithSpinningCursor(participationGen) @@ -895,7 +901,7 @@ var addParticipationKeyCmd = &cobra.Command{ reportErrorf(errorRequestFail, err) } - reportInfof("Participation key generation successful") + reportInfof("Participation key generation successful. Participation ID: %s\n", part.ID()) }, } @@ -922,11 +928,22 @@ No --delete-input flag specified, exiting without installing key.`) dataDir := ensureSingleDataDir() client := ensureAlgodClient(dataDir) - _, _, err := client.InstallParticipationKeys(partKeyFile) + addResponse, err := client.AddParticipationKey(partKeyFile) if err != nil { reportErrorf(errorRequestFail, err) } - fmt.Println("Participation key installed successfully") + // In an abundance of caution, check for ourselves that the key has been installed. + if err := client.VerifyParticipationKey(time.Minute, addResponse.PartId); err != nil { + err = fmt.Errorf("unable to verify key installation. Verify key installation with 'goal account partkeyinfo' and delete '%s', or retry the command. Error: %w", partKeyFile, err) + reportErrorf(errorRequestFail, err) + } + + reportInfof("Participation key installed successfully, Participation ID: %s\n", addResponse.PartId) + + // Delete partKeyFile + if nil != os.Remove(partKeyFile) { + reportErrorf("An error occurred while removing the partkey file, please delete it manually: %s", err) + } }, } @@ -957,14 +974,14 @@ var renewParticipationKeyCmd = &cobra.Command{ txRoundLastValid := currentRound + proto.MaxTxnLife // Make sure we don't already have a partkey valid for (or after) specified roundLastValid - parts, err := client.ListParticipationKeyFiles() + parts, err := client.ListParticipationKeys() if err != nil { reportErrorf(errorRequestFail, err) } for _, part := range parts { - if part.Address().String() == accountAddress { - if part.LastValid >= basics.Round(roundLastValid) { - reportErrorf(errExistingPartKey, roundLastValid, part.LastValid) + if part.Address == accountAddress { + if part.Key.VoteLastValid >= roundLastValid { + reportErrorf(errExistingPartKey, roundLastValid, part.Key.VoteLastValid) } } } @@ -982,7 +999,7 @@ func generateAndRegisterPartKey(address string, currentRound, keyLastValidRound, var keyPath string var err error genFunc := func() { - part, keyPath, err = client.GenParticipationKeysTo(address, currentRound, keyLastValidRound, dilution, "") + part, keyPath, err = client.GenParticipationKeys(address, currentRound, keyLastValidRound, dilution) if err != nil { err = fmt.Errorf(errorRequestFail, err) } @@ -997,12 +1014,13 @@ func generateAndRegisterPartKey(address string, currentRound, keyLastValidRound, // Now register it as our new online participation key goOnline := true txFile := "" - err = changeAccountOnlineStatus(address, &part, goOnline, txFile, wallet, currentRound, txLastValidRound, fee, leaseBytes, dataDir, client) + err = changeAccountOnlineStatus(address, goOnline, txFile, wallet, currentRound, txLastValidRound, fee, leaseBytes, dataDir, client) if err != nil { os.Remove(keyPath) fmt.Fprintf(os.Stderr, " Error registering keys - deleting newly-generated key file: %s\n", keyPath) } - return err + fmt.Printf("Participation key installed successfully, Participation ID: %s\n", part.ID()) + return nil } var renewAllParticipationKeyCmd = &cobra.Command{ @@ -1025,19 +1043,19 @@ func renewPartKeysInDir(dataDir string, lastValidRound uint64, fee uint64, lease client := ensureAlgodClient(dataDir) // Build list of accounts to renew from all accounts with part keys present - parts, err := client.ListParticipationKeyFiles() + parts, err := client.ListParticipationKeys() if err != nil { return fmt.Errorf(errorRequestFail, err) } - renewAccounts := make(map[basics.Address]algodAcct.Participation) + renewAccounts := make(map[string]generatedV2.ParticipationKey) for _, part := range parts { - if existing, has := renewAccounts[part.Address()]; has { - if existing.LastValid >= part.LastValid { + if existing, has := renewAccounts[part.Address]; has { + if existing.Key.VoteFirstValid >= part.Key.VoteLastValid { // We already saw a partkey that expires later continue } } - renewAccounts[part.Address()] = part + renewAccounts[part.Address] = part } currentRound, err := client.CurrentRound() @@ -1062,18 +1080,18 @@ func renewPartKeysInDir(dataDir string, lastValidRound uint64, fee uint64, lease // at least through lastValidRound, generate a new key and register it. // Make sure we don't already have a partkey valid for (or after) specified roundLastValid for _, renewPart := range renewAccounts { - if renewPart.LastValid >= basics.Round(lastValidRound) { - fmt.Printf(" Skipping account %s: Already has a part key valid beyond %d (currently %d)\n", renewPart.Address(), lastValidRound, renewPart.LastValid) + if renewPart.Key.VoteLastValid >= lastValidRound { + fmt.Printf(" Skipping account %s: Already has a part key valid beyond %d (currently %d)\n", renewPart.Address, lastValidRound, renewPart.Key.VoteLastValid) continue } // If the account's latest partkey expired before the current round, don't automatically renew and instead instruct the user to explicitly renew it. - if renewPart.LastValid < basics.Round(lastValidRound) { - fmt.Printf(" Skipping account %s: This account has part keys that have expired. Please renew this account explicitly using 'renewpartkey'\n", renewPart.Address()) + if renewPart.Key.VoteLastValid < lastValidRound { + fmt.Printf(" Skipping account %s: This account has part keys that have expired. Please renew this account explicitly using 'renewpartkey'\n", renewPart.Address) continue } - address := renewPart.Address().String() + address := renewPart.Address err = generateAndRegisterPartKey(address, currentRound, lastValidRound, txLastValidRound, fee, leaseBytes, dilution, wallet, dataDir, client) if err != nil { fmt.Fprintf(os.Stderr, " Error renewing part key for account %s: %v\n", address, err) @@ -1097,53 +1115,6 @@ func uintToStr(number uint64) string { return fmt.Sprintf("%d", number) } -// legacyListParticipationKeysCommand prints key information in the same -// format as earlier versions of goal. Some users are using this information -// in scripts and need some extra time to migrate to the REST API. -// DEPRECATED -func legacyListParticipationKeysCommand() { - dataDir := ensureSingleDataDir() - - client := ensureGoalClient(dataDir, libgoal.DynamicClient) - parts, err := client.ListParticipationKeyFiles() - if err != nil { - reportErrorf(errorRequestFail, err) - } - - var filenames []string - for fn := range parts { - filenames = append(filenames, fn) - } - sort.Strings(filenames) - - rowFormat := "%-10s\t%-80s\t%-60s\t%12s\t%12s\t%12s\n" - fmt.Printf(rowFormat, "Registered", "Filename", "Parent address", "First round", "Last round", "First key") - for _, fn := range filenames { - onlineInfoStr := "unknown" - onlineAccountInfo, err := client.AccountInformation(parts[fn].Address().GetUserAddress()) - if err == nil { - votingBytes := parts[fn].Voting.OneTimeSignatureVerifier - vrfBytes := parts[fn].VRF.PK - if onlineAccountInfo.Participation != nil && - (string(onlineAccountInfo.Participation.ParticipationPK) == string(votingBytes[:])) && - (string(onlineAccountInfo.Participation.VRFPK) == string(vrfBytes[:])) && - (onlineAccountInfo.Participation.VoteFirst == uint64(parts[fn].FirstValid)) && - (onlineAccountInfo.Participation.VoteLast == uint64(parts[fn].LastValid)) && - (onlineAccountInfo.Participation.VoteKeyDilution == parts[fn].KeyDilution) { - onlineInfoStr = "yes" - } else { - onlineInfoStr = "no" - } - } - // it's okay to proceed without algod info - first, last := parts[fn].ValidInterval() - fmt.Printf(rowFormat, onlineInfoStr, fn, parts[fn].Address().GetUserAddress(), - fmt.Sprintf("%d", first), - fmt.Sprintf("%d", last), - fmt.Sprintf("%d.%d", parts[fn].Voting.FirstBatch, parts[fn].Voting.FirstOffset)) - } -} - var listParticipationKeysCmd = &cobra.Command{ Use: "listpartkeys", Short: "List participation keys summary", @@ -1396,47 +1367,6 @@ func strOrNA(value *uint64) string { return uintToStr(*value) } -// legacyPartkeyInfoCommand prints key information in the same -// format as earlier versions of goal. Some users are using this information -// in scripts and need some extra time to migrate to alternatives. -// DEPRECATED -func legacyPartkeyInfoCommand() { - type partkeyInfo struct { - _struct struct{} `codec:",omitempty,omitemptyarray"` - Address string `codec:"acct"` - FirstValid basics.Round `codec:"first"` - LastValid basics.Round `codec:"last"` - VoteID crypto.OneTimeSignatureVerifier `codec:"vote"` - SelectionID crypto.VRFVerifier `codec:"sel"` - VoteKeyDilution uint64 `codec:"voteKD"` - } - - onDataDirs(func(dataDir string) { - fmt.Printf("Dumping participation key info from %s...\n", dataDir) - client := ensureGoalClient(dataDir, libgoal.DynamicClient) - - // Make sure we don't already have a partkey valid for (or after) specified roundLastValid - parts, err := client.ListParticipationKeyFiles() - if err != nil { - reportErrorf(errorRequestFail, err) - } - - for filename, part := range parts { - fmt.Println("------------------------------------------------------------------") - info := partkeyInfo{ - Address: part.Address().String(), - FirstValid: part.FirstValid, - LastValid: part.LastValid, - VoteID: part.VotingSecrets().OneTimeSignatureVerifier, - SelectionID: part.VRFSecrets().PK, - VoteKeyDilution: part.KeyDilution, - } - infoString := protocol.EncodeJSON(&info) - fmt.Printf("File: %s\n%s\n", filename, string(infoString)) - } - }) -} - var partkeyInfoCmd = &cobra.Command{ Use: "partkeyinfo", Short: "Output details about all available part keys", @@ -1528,3 +1458,138 @@ var markNonparticipatingCmd = &cobra.Command{ } }, } + +// listParticipationKeyFiles returns the available participation keys, +// as a map from database filename to Participation key object. +// DEPRECATED +func listParticipationKeyFiles(c *libgoal.Client) (partKeyFiles map[string]algodAcct.Participation, err error) { + genID, err := c.GenesisID() + if err != nil { + return + } + + // Get a list of files in the participation keys directory + keyDir := filepath.Join(c.DataDir(), genID) + files, err := ioutil.ReadDir(keyDir) + if err != nil { + return + } + + partKeyFiles = make(map[string]algodAcct.Participation) + for _, file := range files { + // If it can't be a participation key database, skip it + if !config.IsPartKeyFilename(file.Name()) { + continue + } + + filename := file.Name() + + // Fetch a handle to this database + handle, err := db.MakeErasableAccessor(filepath.Join(keyDir, filename)) + if err != nil { + // Couldn't open it, skip it + continue + } + + // Fetch an account.Participation from the database + part, err := algodAcct.RestoreParticipation(handle) + if err != nil { + // Couldn't read it, skip it + handle.Close() + continue + } + + partKeyFiles[filename] = part.Participation + part.Close() + } + + return +} + +// legacyListParticipationKeysCommand prints key information in the same +// format as earlier versions of goal. Some users are using this information +// in scripts and need some extra time to migrate to the REST API. +// DEPRECATED +func legacyListParticipationKeysCommand() { + dataDir := ensureSingleDataDir() + + client := ensureGoalClient(dataDir, libgoal.DynamicClient) + parts, err := listParticipationKeyFiles(&client) + if err != nil { + reportErrorf(errorRequestFail, err) + } + + var filenames []string + for fn := range parts { + filenames = append(filenames, fn) + } + sort.Strings(filenames) + + rowFormat := "%-10s\t%-80s\t%-60s\t%12s\t%12s\t%12s\n" + fmt.Printf(rowFormat, "Registered", "Filename", "Parent address", "First round", "Last round", "First key") + for _, fn := range filenames { + onlineInfoStr := "unknown" + onlineAccountInfo, err := client.AccountInformation(parts[fn].Address().GetUserAddress()) + if err == nil { + votingBytes := parts[fn].Voting.OneTimeSignatureVerifier + vrfBytes := parts[fn].VRF.PK + if onlineAccountInfo.Participation != nil && + (string(onlineAccountInfo.Participation.ParticipationPK) == string(votingBytes[:])) && + (string(onlineAccountInfo.Participation.VRFPK) == string(vrfBytes[:])) && + (onlineAccountInfo.Participation.VoteFirst == uint64(parts[fn].FirstValid)) && + (onlineAccountInfo.Participation.VoteLast == uint64(parts[fn].LastValid)) && + (onlineAccountInfo.Participation.VoteKeyDilution == parts[fn].KeyDilution) { + onlineInfoStr = "yes" + } else { + onlineInfoStr = "no" + } + } + // it's okay to proceed without algod info + first, last := parts[fn].ValidInterval() + fmt.Printf(rowFormat, onlineInfoStr, fn, parts[fn].Address().GetUserAddress(), + fmt.Sprintf("%d", first), + fmt.Sprintf("%d", last), + fmt.Sprintf("%d.%d", parts[fn].Voting.FirstBatch, parts[fn].Voting.FirstOffset)) + } +} + +// legacyPartkeyInfoCommand prints key information in the same +// format as earlier versions of goal. Some users are using this information +// in scripts and need some extra time to migrate to alternatives. +// DEPRECATED +func legacyPartkeyInfoCommand() { + type partkeyInfo struct { + _struct struct{} `codec:",omitempty,omitemptyarray"` + Address string `codec:"acct"` + FirstValid basics.Round `codec:"first"` + LastValid basics.Round `codec:"last"` + VoteID crypto.OneTimeSignatureVerifier `codec:"vote"` + SelectionID crypto.VRFVerifier `codec:"sel"` + VoteKeyDilution uint64 `codec:"voteKD"` + } + + onDataDirs(func(dataDir string) { + fmt.Printf("Dumping participation key info from %s...\n", dataDir) + client := ensureGoalClient(dataDir, libgoal.DynamicClient) + + // Make sure we don't already have a partkey valid for (or after) specified roundLastValid + parts, err := listParticipationKeyFiles(&client) + if err != nil { + reportErrorf(errorRequestFail, err) + } + + for filename, part := range parts { + fmt.Println(strings.Repeat("-", 40)) + info := partkeyInfo{ + Address: part.Address().String(), + FirstValid: part.FirstValid, + LastValid: part.LastValid, + VoteID: part.VotingSecrets().OneTimeSignatureVerifier, + SelectionID: part.VRFSecrets().PK, + VoteKeyDilution: part.KeyDilution, + } + infoString := protocol.EncodeJSON(&info) + fmt.Printf("File: %s\n%s\n", filename, string(infoString)) + } + }) +} diff --git a/cmd/pingpong/runCmd.go b/cmd/pingpong/runCmd.go index cb62840360..21bd8c1aa8 100644 --- a/cmd/pingpong/runCmd.go +++ b/cmd/pingpong/runCmd.go @@ -43,9 +43,7 @@ var minFee uint64 var randomFee, noRandomFee bool var randomAmount, noRandomAmount bool var randomDst bool -var delayBetween string var runTime string -var restTime string var refreshTime string var saveConfig bool var useDefault bool @@ -84,9 +82,7 @@ func init() { runCmd.Flags().BoolVar(&randomFee, "rf", false, "Set to enable random fees (between minf and mf)") runCmd.Flags().BoolVar(&noRandomFee, "nrf", false, "Set to disable random fees") runCmd.Flags().BoolVar(&randomDst, "rd", false, "Send money to randomly-generated addresses") - runCmd.Flags().StringVar(&delayBetween, "delay", "", "Delay (ms) between every transaction (0 means none)") runCmd.Flags().StringVar(&runTime, "run", "", "Duration of time (seconds) to run transfers before resting (0 means non-stop)") - runCmd.Flags().StringVar(&restTime, "rest", "", "Duration of time (seconds) to rest between transfer periods (0 means no rest)") runCmd.Flags().StringVar(&refreshTime, "refresh", "", "Duration of time (seconds) between refilling accounts with money (0 means no refresh)") runCmd.Flags().StringVar(&logicProg, "program", "", "File containing the compiled program to include as a logic sig") runCmd.Flags().BoolVar(&saveConfig, "save", false, "Save the effective configuration to disk") @@ -187,13 +183,6 @@ var runCmd = &cobra.Command{ } cfg.RandomizeDst = randomDst cfg.Quiet = quietish - if delayBetween != "" { - val, err := strconv.ParseUint(delayBetween, 10, 32) - if err != nil { - reportErrorf("Invalid value specified for --delay: %v\n", err) - } - cfg.DelayBetweenTxn = time.Duration(uint32(val)) * time.Millisecond - } if runTime != "" { val, err := strconv.ParseUint(runTime, 10, 32) if err != nil { @@ -201,13 +190,6 @@ var runCmd = &cobra.Command{ } cfg.RunTime = time.Duration(uint32(val)) * time.Second } - if restTime != "" { - val, err := strconv.ParseUint(restTime, 10, 32) - if err != nil { - reportErrorf("Invalid value specified for --rest: %v\n", err) - } - cfg.RestTime = time.Duration(uint32(val)) * time.Second - } if refreshTime != "" { val, err := strconv.ParseUint(refreshTime, 10, 32) if err != nil { diff --git a/config/consensus.go b/config/consensus.go index 3631270de3..d54b1a7fe6 100644 --- a/config/consensus.go +++ b/config/consensus.go @@ -910,6 +910,9 @@ func initConsensusProtocols() { // Enable application support v24.Application = true + // Although Inners were not allowed yet, this gates downgrade checks, which must be allowed + v24.MinInnerApplVersion = 6 + // Enable rekeying v24.SupportRekeying = true @@ -1090,7 +1093,6 @@ func initConsensusProtocols() { v31.LogicSigVersion = 6 v31.EnableInnerTransactionPooling = true v31.IsolateClearState = true - v31.MinInnerApplVersion = 6 // stat proof key registration v31.EnableStateProofKeyregCheck = true diff --git a/data/transactions/logic/TEAL_opcodes.md b/data/transactions/logic/TEAL_opcodes.md index de7c8d8bdd..ca35545721 100644 --- a/data/transactions/logic/TEAL_opcodes.md +++ b/data/transactions/logic/TEAL_opcodes.md @@ -751,7 +751,7 @@ When A is a uint64, index 0 is the least significant bit. Setting bit 3 to 1 on - Opcode: 0x5c {uint8 encoding index} - Stack: ..., A: []byte → ..., []byte - decode A which was base64-encoded using _encoding_ E. Fail if A is not base64 encoded with encoding E -- **Cost**: 1 + 1 per 16 bytes +- **Cost**: 1 + 1 per 16 bytes of A - Availability: v7 `base64` Encodings: @@ -769,6 +769,7 @@ Decodes A using the base64 encoding E. Specify the encoding with an immediate ar - Opcode: 0x5d {string return type} - Stack: ..., A: []byte, B: []byte → ..., any - return key B's value from a [valid](jsonspec.md) utf-8 encoded json object A +- **Cost**: 25 + 2 per 7 bytes of A - Availability: v7 `json_ref` Types: diff --git a/data/transactions/logic/doc.go b/data/transactions/logic/doc.go index a8badee03f..6ebd98478e 100644 --- a/data/transactions/logic/doc.go +++ b/data/transactions/logic/doc.go @@ -356,7 +356,8 @@ func OpAllCosts(opName string) []VerCost { if !ok { continue } - cost := spec.OpDetails.docCost() + argLen := len(spec.Arg.Types) + cost := spec.OpDetails.docCost(argLen) if costs == nil || cost != costs[len(costs)-1].Cost { costs = append(costs, VerCost{v, v, cost}) } else { diff --git a/data/transactions/logic/eval.go b/data/transactions/logic/eval.go index f55b61bc93..9ddb422f42 100644 --- a/data/transactions/logic/eval.go +++ b/data/transactions/logic/eval.go @@ -1033,13 +1033,13 @@ func (cx *EvalContext) step() error { return nil } -// oneBlank is a boring stack provided to deets.Cost during checkStep. It is +// blankStack is a boring stack provided to deets.Cost during checkStep. It is // good enough to allow Cost() to not crash. It would be incorrect to provide // this stack if there were linear cost opcodes before backBranchEnabledVersion, // because the static cost would be wrong. But then again, a static cost model // wouldn't work before backBranchEnabledVersion, so such an opcode is already // unacceptable. TestLinearOpcodes ensures. -var oneBlank = []stackValue{{Bytes: []byte{}}} +var blankStack = make([]stackValue, 5) func (cx *EvalContext) checkStep() (int, error) { cx.instructionStarts[cx.pc] = true @@ -1055,7 +1055,7 @@ func (cx *EvalContext) checkStep() (int, error) { if deets.Size != 0 && (cx.pc+deets.Size > len(cx.program)) { return 0, fmt.Errorf("%s program ends short of immediate values", spec.Name) } - opcost := deets.Cost(cx.program, cx.pc, oneBlank) + opcost := deets.Cost(cx.program, cx.pc, blankStack) if opcost <= 0 { return 0, fmt.Errorf("%s reported non-positive cost", spec.Name) } @@ -4764,56 +4764,52 @@ func opBase64Decode(cx *EvalContext) error { cx.stack[last].Bytes = bytes return nil } -func hasDuplicateKeys(jsonText []byte) (bool, map[string]json.RawMessage, error) { + +func isPrimitiveJSON(jsonText []byte) (bool, error) { dec := json.NewDecoder(bytes.NewReader(jsonText)) - parsed := make(map[string]json.RawMessage) t, err := dec.Token() if err != nil { - return false, nil, err + return false, err } t, ok := t.(json.Delim) if !ok || t.(json.Delim).String() != "{" { - return false, nil, fmt.Errorf("only json object is allowed") - } - for dec.More() { - var value json.RawMessage - // get JSON key - key, err := dec.Token() - if err != nil { - return false, nil, err - } - // end of json - if key == '}' { - break - } - // decode value - err = dec.Decode(&value) - if err != nil { - return false, nil, err - } - // check for duplicates - if _, ok := parsed[key.(string)]; ok { - return true, nil, nil - } - parsed[key.(string)] = value + return true, nil } - return false, parsed, nil + return false, nil } func parseJSON(jsonText []byte) (map[string]json.RawMessage, error) { - if !json.Valid(jsonText) { + // parse JSON with Algorand's standard JSON library + var parsed map[interface{}]json.RawMessage + err := protocol.DecodeJSON(jsonText, &parsed) + + if err != nil { + // if the error was caused by duplicate keys + if strings.Contains(err.Error(), "cannot decode into a non-pointer value") { + return nil, fmt.Errorf("invalid json text, duplicate keys not allowed") + } + + // if the error was caused by non-json object + if strings.Contains(err.Error(), "read map - expect char '{' but got char") { + return nil, fmt.Errorf("invalid json text, only json object is allowed") + } + return nil, fmt.Errorf("invalid json text") } - // parse json text and check for duplicate keys - hasDuplicates, parsed, err := hasDuplicateKeys(jsonText) - if hasDuplicates { - return nil, fmt.Errorf("invalid json text, duplicate keys not allowed") - } - if err != nil { - return nil, fmt.Errorf("invalid json text, %v", err) + + // check whether any keys are not strings + stringMap := make(map[string]json.RawMessage) + for k, v := range parsed { + key, ok := k.(string) + if !ok { + return nil, fmt.Errorf("invalid json text") + } + stringMap[key] = v } - return parsed, nil + + return stringMap, nil } + func opJSONRef(cx *EvalContext) error { // get json key last := len(cx.stack) - 1 @@ -4837,6 +4833,17 @@ func opJSONRef(cx *EvalContext) error { var stval stackValue _, ok = parsed[key] if !ok { + // if the key is not found, first check whether the JSON text is the null value + // by checking whether it is a primitive JSON value. Any other primitive + // (or array) would have thrown an error previously during `parseJSON`. + isPrimitive, err := isPrimitiveJSON(cx.stack[last].Bytes) + if err == nil && isPrimitive { + err = fmt.Errorf("invalid json text, only json object is allowed") + } + if err != nil { + return fmt.Errorf("error while parsing JSON text, %v", err) + } + return fmt.Errorf("key %s not found in JSON text", key) } diff --git a/data/transactions/logic/eval_test.go b/data/transactions/logic/eval_test.go index 9e54e7b291..aafa4e44e3 100644 --- a/data/transactions/logic/eval_test.go +++ b/data/transactions/logic/eval_test.go @@ -20,6 +20,7 @@ import ( "encoding/base64" "encoding/binary" "encoding/hex" + "encoding/json" "fmt" "strconv" "strings" @@ -114,7 +115,7 @@ func benchmarkEvalParams(txn *transactions.SignedTxn) *EvalParams { ep := defaultEvalParamsWithVersion(txn, LogicVersion) ep.Trace = nil // Tracing would slow down benchmarks clone := *ep.Proto - bigBudget := 1000 * 1000 // Allow long run times + bigBudget := 2 * 1000 * 1000 // Allow long run times clone.LogicSigMaxCost = uint64(bigBudget) clone.MaxAppProgramCost = bigBudget ep.Proto = &clone @@ -260,6 +261,22 @@ func TestWrongProtoVersion(t *testing.T) { } } +func TestBlankStackSufficient(t *testing.T) { + partitiontest.PartitionTest(t) + + t.Parallel() + for v := 0; v <= LogicVersion; v++ { + t.Run(fmt.Sprintf("v=%d", v), func(t *testing.T) { + for i := 0; i < 256; i++ { + spec := opsByOpcode[v][i] + argLen := len(spec.Arg.Types) + blankStackLen := len(blankStack) + require.GreaterOrEqual(t, blankStackLen, argLen) + } + }) + } +} + func TestSimpleMath(t *testing.T) { partitiontest.PartitionTest(t) @@ -3564,9 +3581,89 @@ func BenchmarkCheckx5(b *testing.B) { } } +func makeNestedKeys(depth int) string { + if depth <= 0 { + return `{\"key0\":\"value0\"}` + } + return fmt.Sprintf(`{\"key0\":%s}`, makeNestedKeys(depth-1)) +} + +func BenchmarkJsonRef(b *testing.B) { + // base case + oneKey := `{\"key0\":\"value0\"}` + + // many keys + sb := &strings.Builder{} + sb.WriteString(`{`) + for i := 0; i < 100; i++ { + sb.WriteString(fmt.Sprintf(`\"key%d\":\"value%d\",`, i, i)) + } + sb.WriteString(`\"key100\":\"value100\"}`) // so there is no trailing comma + manyKeys := sb.String() + + lenOfManyKeys := len(manyKeys) + longTextLen := lenOfManyKeys - 36 // subtract the difference + mediumText := strings.Repeat("a", longTextLen/2) + longText := strings.Repeat("a", longTextLen) + + // medium key + mediumKey := fmt.Sprintf(`{\"%s\":\"value\",\"key1\":\"value2\"}`, mediumText) + + // long key + longKey := fmt.Sprintf(`{\"%s\":\"value\",\"key1\":\"value2\"}`, longText) + + // long value + longValue := fmt.Sprintf(`{\"key0\":\"%s\",\"key1\":\"value2\"}`, longText) + + // nested keys + nestedKeys := makeNestedKeys(200) + + jsonLabels := []string{"one key", "many keys", "medium key", "long key", "long val", "nested keys"} + jsonSamples := []string{oneKey, manyKeys, mediumKey, longKey, longValue, nestedKeys} + keys := [][]string{ + {"key0"}, + {"key0", "key100"}, + {mediumText, "key1"}, + {longText, "key1"}, + {"key0", "key1"}, + {"key0"}, + } + valueFmt := [][]string{ + {"JSONString"}, + {"JSONString", "JSONString"}, + {"JSONString", "JSONString"}, + {"JSONString", "JSONString"}, + {"JSONString", "JSONString"}, + {"JSONObject"}, + } + benches := [][]string{} + for i, label := range jsonLabels { + for j, key := range keys[i] { + prog := fmt.Sprintf(`byte "%s"; byte "%s"; json_ref %s; pop;`, jsonSamples[i], key, valueFmt[i][j]) + + // indicate long key + keyLabel := key + if len(key) > 50 { + keyLabel = fmt.Sprintf("long_key_%d", len(key)) + } + + benches = append(benches, []string{ + fmt.Sprintf("%s_%s", label, keyLabel), // label + "", // prefix + prog, // operation + "int 1", // suffix + }) + } + } + for _, bench := range benches { + b.Run(bench[0], func(b *testing.B) { + benchmarkOperation(b, bench[1], bench[2], bench[3]) + }) + } +} + func TestEvalVersions(t *testing.T) { partitiontest.PartitionTest(t) - t.Parallel() text := `intcblock 1 @@ -4780,42 +4877,55 @@ int ` + fmt.Sprintf("%d", 20_000-3-6) + ` // base64_decode cost = 6 (68 bytes -> testAccepts(t, source, fidoVersion) } -func TestHasDuplicateKeys(t *testing.T) { +func TestIsPrimitive(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() testCases := []struct { text []byte }{ { - text: []byte(`{"key0": "1","key0": "2", "key1":1}`), + text: []byte(`null`), + }, + { + text: []byte(`[1, 2, 3]`), }, { - text: []byte(`{"key0": "1","key1": [1], "key0":{"key2": "a"}}`), + text: []byte(`2`), }, } for _, s := range testCases { - hasDuplicates, _, err := hasDuplicateKeys(s.text) + isPrimitive, err := isPrimitiveJSON(s.text) require.Nil(t, err) - require.True(t, hasDuplicates) + require.True(t, isPrimitive) } - noDuplicates := []struct { + notPrimitive := []struct { text []byte }{ { text: []byte(`{"key0": "1","key1": "2", "key2":3}`), }, { - text: []byte(`{"key0": "1","key1": [{"key0":1,"key0":2},{"key0":1,"key0":2}], "key2":{"key5": "a","key5": "b"}}`), + text: []byte(`{}`), }, } - for _, s := range noDuplicates { - hasDuplicates, _, err := hasDuplicateKeys(s.text) + for _, s := range notPrimitive { + primitive, err := isPrimitiveJSON(s.text) require.Nil(t, err) - require.False(t, hasDuplicates) + require.False(t, primitive) } } +func TestProtocolParseDuplicateErrMsg(t *testing.T) { + partitiontest.PartitionTest(t) + t.Parallel() + text := `{"key0": "algo", "key0": "algo"}` + var parsed map[string]json.RawMessage + err := protocol.DecodeJSON([]byte(text), &parsed) + require.Contains(t, err.Error(), "cannot decode into a non-pointer value") + require.Error(t, err) +} + func TestOpJSONRef(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() @@ -4953,6 +5063,33 @@ func TestOpJSONRef(t *testing.T) { ==`, previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}}, }, + // JavaScript MAX_SAFE_INTEGER + { + source: `byte "{\"maxSafeInt\": 9007199254740991}"; + byte "maxSafeInt"; + json_ref JSONUint64; + int 9007199254740991; + ==`, + previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}}, + }, + // maximum uint64 + { + source: `byte "{\"maxUint64\": 18446744073709551615}"; + byte "maxUint64"; + json_ref JSONUint64; + int 18446744073709551615; + ==`, + previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}}, + }, + // larger-than-uint64s are allowed if not requested + { + source: `byte "{\"maxUint64\": 18446744073709551616, \"smallUint64\": 0}"; + byte "smallUint64"; + json_ref JSONUint64; + int 0; + ==`, + previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}}, + }, } for _, s := range testCases { @@ -4978,6 +5115,9 @@ func TestOpJSONRef(t *testing.T) { pass, _, err := EvalContract(ops.Program, 0, 888, ep) require.NoError(t, err) require.True(t, pass) + + // reset pooled budget for new "app call" + *ep.PooledApplicationBudget = ep.Proto.MaxAppProgramCost } failedCases := []struct { @@ -5096,11 +5236,11 @@ func TestOpJSONRef(t *testing.T) { json_ref JSONObject; byte "key4"; json_ref JSONObject; - byte "key40" + byte "key40"; json_ref JSONString `, error: "error while parsing JSON text, invalid json text, duplicate keys not allowed", - previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}, {9, "unknown opcode: json_ref"}, {12, "unknown opcode: json_ref"}}, + previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}, {9, "unknown opcode: json_ref"}, {13, "unknown opcode: json_ref"}}, }, { source: `byte "[1,2,3]"; @@ -5142,6 +5282,25 @@ func TestOpJSONRef(t *testing.T) { error: "error while parsing JSON text, invalid json text, only json object is allowed", previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}}, }, + { + source: `byte "{noquotes: \"shouldn't work\"}"; + byte "noquotes"; + json_ref JSONString; + byte "shouldn't work"; + ==`, + error: "error while parsing JSON text, invalid json text", + previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}}, + }, + // max uint64 + 1 should fail + { + source: `byte "{\"tooBig\": 18446744073709551616}"; + byte "tooBig"; + json_ref JSONUint64; + int 1; + return`, + error: "json: cannot unmarshal number 18446744073709551616 into Go value of type uint64", + previousVersErrors: []Expect{{5, "unknown opcode: json_ref"}}, + }, } for _, s := range failedCases { @@ -5170,6 +5329,9 @@ func TestOpJSONRef(t *testing.T) { require.False(t, pass) require.Error(t, err) require.EqualError(t, err, s.error) + + // reset pooled budget for new "app call" + *ep.PooledApplicationBudget = ep.Proto.MaxAppProgramCost } } diff --git a/data/transactions/logic/jsonspec.md b/data/transactions/logic/jsonspec.md index e747e40f5e..817c01ece4 100644 --- a/data/transactions/logic/jsonspec.md +++ b/data/transactions/logic/jsonspec.md @@ -12,7 +12,7 @@ Additional specifications used by **json_ref** that are extensions to the RFC715 - The byte order mark (BOM), "\uFEFF", is not allowed at the beginning of a JSON text - Raw non-unicode characters not accepted -## Invalid JSON text +### Invalid JSON text ```json \uFEFF{"key0": 1} @@ -104,10 +104,6 @@ Comment blocks are not accepted. {"key0": /*comment*/"algo"} ``` -```json -{"key0": "algo"}/*comment*/ -``` - ```json {"key0": [1,/*comment*/,3]} ``` diff --git a/data/transactions/logic/jsonspec_test.go b/data/transactions/logic/jsonspec_test.go index 7ab173a0f3..3ebe131e8a 100644 --- a/data/transactions/logic/jsonspec_test.go +++ b/data/transactions/logic/jsonspec_test.go @@ -58,9 +58,6 @@ func TestParseComments(t *testing.T) { text := `{"key0": /*comment*/"algo"}` _, err := parseJSON([]byte(text)) require.Error(t, err) - text = `{"key0": "algo"}/*comment*/` - _, err = parseJSON([]byte(text)) - require.Error(t, err) text = `{"key0": [1,/*comment*/,3]}` _, err = parseJSON([]byte(text)) require.Error(t, err) @@ -210,7 +207,6 @@ func TestParseKeys(t *testing.T) { text = `{1: 1}` _, err = parseJSON([]byte(text)) require.Error(t, err) - } func TestParseFileEncoding(t *testing.T) { diff --git a/data/transactions/logic/opcodes.go b/data/transactions/logic/opcodes.go index 1d26d437a0..c7c9cd356b 100644 --- a/data/transactions/logic/opcodes.go +++ b/data/transactions/logic/opcodes.go @@ -72,6 +72,7 @@ type linearCost struct { baseCost int chunkCost int chunkSize int + depth int } // divideCeilUnsafely provides `math.Ceil` semantics using integer division. The technique avoids slower floating point operations as suggested in https://stackoverflow.com/a/2745086. @@ -84,22 +85,24 @@ func (lc *linearCost) compute(stack []stackValue) int { cost := lc.baseCost if lc.chunkCost != 0 && lc.chunkSize != 0 { // Uses divideCeilUnsafely rather than (len/size) to match how Ethereum discretizes hashing costs. - cost += divideCeilUnsafely(lc.chunkCost*len(stack[len(stack)-1].Bytes), lc.chunkSize) + cost += divideCeilUnsafely(lc.chunkCost*len(stack[len(stack)-1-lc.depth].Bytes), lc.chunkSize) } return cost } -func (lc *linearCost) docCost() string { +func (lc *linearCost) docCost(argLen int) string { if *lc == (linearCost{}) { return "" } if lc.chunkCost == 0 { return strconv.Itoa(lc.baseCost) } + idxFromStart := argLen - lc.depth - 1 + stackArg := rune(int('A') + idxFromStart) if lc.chunkSize == 1 { - return fmt.Sprintf("%d + %d per byte", lc.baseCost, lc.chunkCost) + return fmt.Sprintf("%d + %d per byte of %c", lc.baseCost, lc.chunkCost, stackArg) } - return fmt.Sprintf("%d + %d per %d bytes", lc.baseCost, lc.chunkCost, lc.chunkSize) + return fmt.Sprintf("%d + %d per %d bytes of %c", lc.baseCost, lc.chunkCost, lc.chunkSize, stackArg) } // OpDetails records details such as non-standard costs, immediate arguments, or @@ -118,8 +121,8 @@ type OpDetails struct { Immediates []immediate // details of each immediate arg to opcode } -func (d *OpDetails) docCost() string { - cost := d.FullCost.docCost() +func (d *OpDetails) docCost(argLen int) string { + cost := d.FullCost.docCost(argLen) if cost != "" { return cost } @@ -147,7 +150,7 @@ func (d *OpDetails) docCost() string { // both static (the program, which can be used to find the immediate values // supplied), and dynamic (the stack, which can be used to find the run-time // arguments supplied). Cost is used at run-time. docCost returns similar -// information in human-reable form. +// information in human-readable form. func (d *OpDetails) Cost(program []byte, pc int, stack []stackValue) int { cost := d.FullCost.compute(stack) if cost != 0 { @@ -214,9 +217,9 @@ func (d OpDetails) only(m runMode) OpDetails { return clone } -func (d OpDetails) costByLength(initial, perChunk, chunkSize int) OpDetails { +func (d OpDetails) costByLength(initial, perChunk, chunkSize, depth int) OpDetails { clone := d - clone.FullCost = costByLength(initial, perChunk, chunkSize).FullCost + clone.FullCost = costByLength(initial, perChunk, chunkSize, depth).FullCost return clone } @@ -263,12 +266,12 @@ func costByField(immediate string, group *FieldGroup, costs []int) OpDetails { return opd } -func costByLength(initial int, perChunk int, chunkSize int) OpDetails { +func costByLength(initial, perChunk, chunkSize, depth int) OpDetails { if initial < 1 || perChunk <= 0 || chunkSize < 1 || chunkSize > maxStringSize { panic("bad cost configuration") } d := opDefault() - d.FullCost = linearCost{initial, perChunk, chunkSize} + d.FullCost = linearCost{initial, perChunk, chunkSize, depth} return d } @@ -490,8 +493,8 @@ var OpSpecs = []OpSpec{ {0x59, "extract_uint16", opExtract16Bits, proto("bi:i"), 5, opDefault()}, {0x5a, "extract_uint32", opExtract32Bits, proto("bi:i"), 5, opDefault()}, {0x5b, "extract_uint64", opExtract64Bits, proto("bi:i"), 5, opDefault()}, - {0x5c, "base64_decode", opBase64Decode, proto("b:b"), fidoVersion, field("e", &Base64Encodings).costByLength(1, 1, 16)}, - {0x5d, "json_ref", opJSONRef, proto("bb:a"), fidoVersion, field("r", &JSONRefTypes)}, + {0x5c, "base64_decode", opBase64Decode, proto("b:b"), fidoVersion, field("e", &Base64Encodings).costByLength(1, 1, 16, 0)}, + {0x5d, "json_ref", opJSONRef, proto("bb:a"), fidoVersion, field("r", &JSONRefTypes).costByLength(25, 2, 7, 1)}, {0x60, "balance", opBalance, proto("i:i"), 2, only(modeApp)}, {0x60, "balance", opBalance, proto("a:i"), directRefEnabledVersion, only(modeApp)}, diff --git a/data/transactions/transaction.go b/data/transactions/transaction.go index 5c67f64dd2..1c63c0c8a4 100644 --- a/data/transactions/transaction.go +++ b/data/transactions/transaction.go @@ -726,9 +726,9 @@ func ProgramVersion(bytecode []byte) (version uint64, length int, err error) { // matching versions between approval and clearstate. const syncProgramsVersion = 6 -// CheckContractVersions ensures that for v6 and higher two programs are version -// matched, and that they are not a downgrade. If proto.AllowV4InnerAppls, then -// no downgrades are allowed, regardless of version. +// CheckContractVersions ensures that for syncProgramsVersion and higher, two programs are version +// matched, and that they are not a downgrade. If either program version is +// >= proto.MinInnerApplVersion, downgrade of that program is not allowed. func CheckContractVersions(approval []byte, clear []byte, previous basics.AppParams, proto *config.ConsensusParams) error { av, _, err := ProgramVersion(approval) if err != nil { diff --git a/ledger/accountdb_test.go b/ledger/accountdb_test.go index b771d1532f..ca4432b6a0 100644 --- a/ledger/accountdb_test.go +++ b/ledger/accountdb_test.go @@ -237,7 +237,7 @@ func TestAccountDBRound(t *testing.T) { numElementsPerSegment := 10 // lastCreatableID stores asset or app max used index to get rid of conflicts - lastCreatableID := crypto.RandUint64() % 512 + lastCreatableID := basics.CreatableIndex(crypto.RandUint64() % 512) ctbsList, randomCtbs := randomCreatables(numElementsPerSegment) expectedDbImage := make(map[basics.CreatableIndex]ledgercore.ModifiedCreatable) var baseAccounts lruAccounts @@ -247,7 +247,7 @@ func TestAccountDBRound(t *testing.T) { baseResources.init(nil, 100, 80) for i := 1; i < 10; i++ { var updates ledgercore.AccountDeltas - updates, newacctsTotals, _, lastCreatableID = ledgertesting.RandomDeltasFull(20, accts, 0, lastCreatableID) + updates, newacctsTotals, _ = ledgertesting.RandomDeltasFull(20, accts, 0, &lastCreatableID) totals = ledgertesting.CalculateNewRoundAccountTotals(t, updates, 0, proto, accts, totals) accts = applyPartialDeltas(accts, updates) ctbsWithDeletes := randomCreatableSampling(i, ctbsList, randomCtbs, diff --git a/ledger/acctupdates_test.go b/ledger/acctupdates_test.go index d2faf6ec7c..d597dfdbcf 100644 --- a/ledger/acctupdates_test.go +++ b/ledger/acctupdates_test.go @@ -441,7 +441,7 @@ func TestAcctUpdates(t *testing.T) { checkAcctUpdates(t, au, 0, 9, accts, rewardsLevels, proto) // lastCreatableID stores asset or app max used index to get rid of conflicts - lastCreatableID := crypto.RandUint64() % 512 + lastCreatableID := basics.CreatableIndex(crypto.RandUint64() % 512) knownCreatables := make(map[basics.CreatableIndex]bool) start := basics.Round(10) @@ -452,7 +452,8 @@ func TestAcctUpdates(t *testing.T) { var updates ledgercore.AccountDeltas var totals map[basics.Address]ledgercore.AccountData base := accts[i-1] - updates, totals, lastCreatableID = ledgertesting.RandomDeltasBalancedFull(1, base, rewardLevel, lastCreatableID) + updates, totals = ledgertesting.RandomDeltasBalancedFull( + 1, base, rewardLevel, &lastCreatableID) prevRound, prevTotals, err := au.LatestTotals() require.Equal(t, i-1, prevRound) require.NoError(t, err) @@ -2221,7 +2222,7 @@ func testAcctUpdatesLookupRetry(t *testing.T, assertFn func(au *accountUpdates, checkAcctUpdates(t, au, 0, 9, accts, rewardsLevels, proto) // lastCreatableID stores asset or app max used index to get rid of conflicts - lastCreatableID := crypto.RandUint64() % 512 + lastCreatableID := basics.CreatableIndex(crypto.RandUint64() % 512) knownCreatables := make(map[basics.CreatableIndex]bool) for i := basics.Round(10); i < basics.Round(proto.MaxBalLookback+15); i++ { @@ -2230,7 +2231,8 @@ func testAcctUpdatesLookupRetry(t *testing.T, assertFn func(au *accountUpdates, var updates ledgercore.AccountDeltas var totals map[basics.Address]ledgercore.AccountData base := accts[i-1] - updates, totals, lastCreatableID = ledgertesting.RandomDeltasBalancedFull(1, base, rewardLevel, lastCreatableID) + updates, totals = ledgertesting.RandomDeltasBalancedFull( + 1, base, rewardLevel, &lastCreatableID) prevRound, prevTotals, err := au.LatestTotals() require.Equal(t, i-1, prevRound) require.NoError(t, err) diff --git a/ledger/catchpointtracker_test.go b/ledger/catchpointtracker_test.go index eb812937f7..025c7ebfe0 100644 --- a/ledger/catchpointtracker_test.go +++ b/ledger/catchpointtracker_test.go @@ -433,7 +433,7 @@ func TestReproducibleCatchpointLabels(t *testing.T) { const testCatchpointLabelsCount = 5 // lastCreatableID stores asset or app max used index to get rid of conflicts - lastCreatableID := crypto.RandUint64() % 512 + lastCreatableID := basics.CreatableIndex(crypto.RandUint64() % 512) knownCreatables := make(map[basics.CreatableIndex]bool) catchpointLabels := make(map[basics.Round]string) ledgerHistory := make(map[basics.Round]*mockLedgerForTracker) @@ -444,7 +444,7 @@ func TestReproducibleCatchpointLabels(t *testing.T) { var updates ledgercore.AccountDeltas var totals map[basics.Address]ledgercore.AccountData base := accts[i-1] - updates, totals, lastCreatableID = ledgertesting.RandomDeltasBalancedFull(1, base, rewardLevel, lastCreatableID) + updates, totals = ledgertesting.RandomDeltasBalancedFull(1, base, rewardLevel, &lastCreatableID) prevRound, prevTotals, err := au.LatestTotals() require.Equal(t, i-1, prevRound) require.NoError(t, err) diff --git a/ledger/internal/apptxn_test.go b/ledger/internal/apptxn_test.go index 4e2b64c1a3..94f72b0e2c 100644 --- a/ledger/internal/apptxn_test.go +++ b/ledger/internal/apptxn_test.go @@ -1890,7 +1890,7 @@ func TestInnerAppVersionCalling(t *testing.T) { genBalances, addrs, _ := ledgertesting.NewTestGenesis() - // 31 allowed inner appls. vFuture enables proto.AllowV4InnerAppls (presumed v33, below) + // 31 allowed inner appls. v33 lowered proto.MinInnerApplVersion testConsensusRange(t, 31, 0, func(t *testing.T, ver int) { dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver]) defer dl.Close() @@ -1994,7 +1994,7 @@ itxn_submit`, createAndOptin.ApplicationArgs = [][]byte{six.Program, six.Program} dl.txn(&createAndOptin, "overspend") // passed the checks, but is an overspend } else { - // after 32 proto.AllowV4InnerAppls should be in effect, so calls and optins to v5 are ok + // after 32 proto.MinInnerApplVersion is lowered to 4, so calls and optins to v5 are ok dl.txn(&call, "overspend") // it tried to execute, but test doesn't bother funding dl.txn(&optin, "overspend") // it tried to execute, but test doesn't bother funding optin.ForeignApps[0] = v5withv3csp // but we can't optin to a v5 if it has an old csp @@ -2070,6 +2070,10 @@ func TestAppDowngrade(t *testing.T) { partitiontest.PartitionTest(t) t.Parallel() + two, err := logic.AssembleStringWithVersion("int 1", 2) + require.NoError(t, err) + three, err := logic.AssembleStringWithVersion("int 1", 3) + require.NoError(t, err) four, err := logic.AssembleStringWithVersion("int 1", 4) require.NoError(t, err) five, err := logic.AssembleStringWithVersion("int 1", 5) @@ -2078,6 +2082,40 @@ func TestAppDowngrade(t *testing.T) { require.NoError(t, err) genBalances, addrs, _ := ledgertesting.NewTestGenesis() + + // Confirm that in old protocol version, downgrade is legal + // Start at 28 because we want to v4 app to downgrade to v3 + testConsensusRange(t, 28, 30, func(t *testing.T, ver int) { + dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver]) + defer dl.Close() + + create := txntest.Txn{ + Type: "appl", + Sender: addrs[0], + ApprovalProgram: four.Program, + ClearStateProgram: four.Program, + } + + vb := dl.fullBlock(&create) + app := vb.Block().Payset[0].ApplicationID + + update := txntest.Txn{ + Type: "appl", + ApplicationID: app, + OnCompletion: transactions.UpdateApplicationOC, + Sender: addrs[0], + ApprovalProgram: three.Program, + ClearStateProgram: three.Program, + } + + // No change - legal + dl.fullBlock(&update) + + update.ApprovalProgram = two.Program + // Also legal, and let's check mismatched version while we're at it. + dl.fullBlock(&update) + }) + testConsensusRange(t, 31, 0, func(t *testing.T, ver int) { dl := NewDoubleLedger(t, genBalances, consensusByNumber[ver]) defer dl.Close() @@ -2112,7 +2150,7 @@ func TestAppDowngrade(t *testing.T) { update.ClearStateProgram = five.Program dl.fullBlock(&update) - // Downgrade (allowed for pre 6 programs until AllowV4InnerAppls) + // Downgrade (allowed for pre 6 programs until MinInnerApplVersion was lowered) update.ClearStateProgram = four.Program if ver <= 32 { dl.fullBlock(update.Noted("actually a repeat of first upgrade")) diff --git a/ledger/testing/randomAccounts.go b/ledger/testing/randomAccounts.go index 6cbc7cbcea..8600315193 100644 --- a/ledger/testing/randomAccounts.go +++ b/ledger/testing/randomAccounts.go @@ -187,7 +187,7 @@ func RandomAppLocalState() basics.AppLocalState { } // RandomFullAccountData generates a random AccountData -func RandomFullAccountData(rewardsLevel uint64, knownCreatables map[basics.CreatableIndex]basics.CreatableType, lastCreatableID uint64) (basics.AccountData, map[basics.CreatableIndex]basics.CreatableType, uint64) { +func RandomFullAccountData(rewardsLevel uint64, lastCreatableID *basics.CreatableIndex, assets map[basics.AssetIndex]struct{}, apps map[basics.AppIndex]struct{}) basics.AccountData { data := RandomAccountData(rewardsLevel) crypto.RandBytes(data.VoteID[:]) @@ -202,28 +202,26 @@ func RandomFullAccountData(rewardsLevel uint64, knownCreatables map[basics.Creat data.AssetParams = make(map[basics.AssetIndex]basics.AssetParams, createdAssetsCount) for i := uint64(0); i < createdAssetsCount; i++ { ap := RandomAssetParams() - lastCreatableID++ - data.AssetParams[basics.AssetIndex(lastCreatableID)] = ap - knownCreatables[basics.CreatableIndex(lastCreatableID)] = basics.AssetCreatable + *lastCreatableID++ + data.AssetParams[basics.AssetIndex(*lastCreatableID)] = ap + assets[basics.AssetIndex(*lastCreatableID)] = struct{}{} } } - if (crypto.RandUint64()%2) == 1 && lastCreatableID > 0 { + if (crypto.RandUint64()%2 == 1) && (len(assets) > 0) { // if account owns assets ownedAssetsCount := crypto.RandUint64()%20 + 1 data.Assets = make(map[basics.AssetIndex]basics.AssetHolding, ownedAssetsCount) for i := uint64(0); i < ownedAssetsCount; i++ { ah := RandomAssetHolding(false) - aidx := crypto.RandUint64() % lastCreatableID + var aidx basics.AssetIndex for { - ctype, ok := knownCreatables[basics.CreatableIndex(aidx)] - if !ok || ctype == basics.AssetCreatable { + aidx = basics.AssetIndex(crypto.RandUint64()%uint64(*lastCreatableID) + 1) + if _, ok := assets[aidx]; ok { break } - aidx = crypto.RandUint64() % lastCreatableID } - data.Assets[basics.AssetIndex(aidx)] = ah - knownCreatables[basics.CreatableIndex(aidx)] = basics.AssetCreatable + data.Assets[aidx] = ah } } if (crypto.RandUint64() % 5) == 1 { @@ -235,26 +233,24 @@ func RandomFullAccountData(rewardsLevel uint64, knownCreatables map[basics.Creat data.AppParams = make(map[basics.AppIndex]basics.AppParams, appParamsCount) for i := uint64(0); i < appParamsCount; i++ { ap := RandomAppParams() - lastCreatableID++ - data.AppParams[basics.AppIndex(lastCreatableID)] = ap - knownCreatables[basics.CreatableIndex(lastCreatableID)] = basics.AppCreatable + *lastCreatableID++ + data.AppParams[basics.AppIndex(*lastCreatableID)] = ap + apps[basics.AppIndex(*lastCreatableID)] = struct{}{} } } - if (crypto.RandUint64()%3) == 1 && lastCreatableID > 0 { + if (crypto.RandUint64()%3 == 1) && (len(apps) > 0) { appStatesCount := crypto.RandUint64()%20 + 1 data.AppLocalStates = make(map[basics.AppIndex]basics.AppLocalState, appStatesCount) for i := uint64(0); i < appStatesCount; i++ { ap := RandomAppLocalState() - aidx := crypto.RandUint64() % lastCreatableID + var aidx basics.AppIndex for { - ctype, ok := knownCreatables[basics.CreatableIndex(aidx)] - if !ok || ctype == basics.AppCreatable { + aidx = basics.AppIndex(crypto.RandUint64()%uint64(*lastCreatableID) + 1) + if _, ok := apps[aidx]; ok { break } - aidx = crypto.RandUint64() % lastCreatableID } data.AppLocalStates[basics.AppIndex(aidx)] = ap - knownCreatables[basics.CreatableIndex(aidx)] = basics.AppCreatable } } @@ -264,7 +260,8 @@ func RandomFullAccountData(rewardsLevel uint64, knownCreatables map[basics.Creat NumByteSlice: crypto.RandUint64() % 50, } } - return data, knownCreatables, lastCreatableID + + return data } // RandomAccounts generates a random set of accounts map @@ -275,10 +272,11 @@ func RandomAccounts(niter int, simpleAccounts bool) map[basics.Address]basics.Ac res[RandomAddress()] = RandomAccountData(0) } } else { - lastCreatableID := crypto.RandUint64() % 512 - knownCreatables := make(map[basics.CreatableIndex]basics.CreatableType) + lastCreatableID := basics.CreatableIndex(crypto.RandUint64() % 512) + assets := make(map[basics.AssetIndex]struct{}) + apps := make(map[basics.AppIndex]struct{}) for i := 0; i < niter; i++ { - res[RandomAddress()], knownCreatables, lastCreatableID = RandomFullAccountData(0, knownCreatables, lastCreatableID) + res[RandomAddress()] = RandomFullAccountData(0, &lastCreatableID, assets, apps) } } return res @@ -286,18 +284,20 @@ func RandomAccounts(niter int, simpleAccounts bool) map[basics.Address]basics.Ac // RandomDeltas generates a random set of accounts delta func RandomDeltas(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]ledgercore.AccountData, imbalance int64) { - updates, totals, imbalance, _ = RandomDeltasImpl(niter, base, rewardsLevel, true, 0) + var lastCreatableID basics.CreatableIndex + updates, totals, imbalance = + RandomDeltasImpl(niter, base, rewardsLevel, true, &lastCreatableID) return } // RandomDeltasFull generates a random set of accounts delta -func RandomDeltasFull(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, lastCreatableIDIn uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]ledgercore.AccountData, imbalance int64, lastCreatableID uint64) { - updates, totals, imbalance, lastCreatableID = RandomDeltasImpl(niter, base, rewardsLevel, false, lastCreatableIDIn) +func RandomDeltasFull(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, lastCreatableID *basics.CreatableIndex) (updates ledgercore.AccountDeltas, totals map[basics.Address]ledgercore.AccountData, imbalance int64) { + updates, totals, imbalance = RandomDeltasImpl(niter, base, rewardsLevel, false, lastCreatableID) return } // RandomDeltasImpl generates a random set of accounts delta -func RandomDeltasImpl(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, simple bool, lastCreatableIDIn uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]ledgercore.AccountData, imbalance int64, lastCreatableID uint64) { +func RandomDeltasImpl(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, simple bool, lastCreatableID *basics.CreatableIndex) (updates ledgercore.AccountDeltas, totals map[basics.Address]ledgercore.AccountData, imbalance int64) { proto := config.Consensus[protocol.ConsensusCurrentVersion] totals = make(map[basics.Address]ledgercore.AccountData) @@ -309,30 +309,21 @@ func RandomDeltasImpl(niter int, base map[basics.Address]basics.AccountData, rew } // if making a full delta then need to determine max asset/app id to get rid of conflicts - lastCreatableID = lastCreatableIDIn - knownCreatables := make(map[basics.CreatableIndex]basics.CreatableType) + assets := make(map[basics.AssetIndex]struct{}) + apps := make(map[basics.AppIndex]struct{}) if !simple { for _, ad := range base { for aid := range ad.AssetParams { - if uint64(aid) > lastCreatableID { - lastCreatableID = uint64(aid) - } - knownCreatables[basics.CreatableIndex(aid)] = basics.AssetCreatable + assets[aid] = struct{}{} } for aid := range ad.Assets { - // do not check lastCreatableID since lastCreatableID is only incremented for new params - knownCreatables[basics.CreatableIndex(aid)] = basics.AssetCreatable + assets[aid] = struct{}{} } - for aid := range ad.AppParams { - if uint64(aid) > lastCreatableID { - lastCreatableID = uint64(aid) - } - knownCreatables[basics.CreatableIndex(aid)] = basics.AppCreatable + apps[aid] = struct{}{} } for aid := range ad.AppLocalStates { - // do not check lastCreatableID since lastCreatableID is only incremented for new params - knownCreatables[basics.CreatableIndex(aid)] = basics.AppCreatable + apps[aid] = struct{}{} } } } @@ -357,7 +348,7 @@ func RandomDeltasImpl(niter int, base map[basics.Address]basics.AccountData, rew new = ledgercore.ToAccountData(data) updates.Upsert(addr, new) } else { - data, knownCreatables, lastCreatableID = RandomFullAccountData(rewardsLevel, knownCreatables, lastCreatableID) + data = RandomFullAccountData(rewardsLevel, lastCreatableID, assets, apps) new = ledgercore.ToAccountData(data) updates.Upsert(addr, new) appResources := make(map[basics.AppIndex]ledgercore.AppResourceRecord) @@ -442,7 +433,7 @@ func RandomDeltasImpl(niter int, base map[basics.Address]basics.AccountData, rew new = ledgercore.ToAccountData(data) updates.Upsert(addr, new) } else { - data, knownCreatables, lastCreatableID = RandomFullAccountData(rewardsLevel, knownCreatables, lastCreatableID) + data = RandomFullAccountData(rewardsLevel, lastCreatableID, assets, apps) new = ledgercore.ToAccountData(data) updates.Upsert(addr, new) appResources := make(map[basics.AppIndex]ledgercore.AppResourceRecord) @@ -489,23 +480,26 @@ func RandomDeltasImpl(niter int, base map[basics.Address]basics.AccountData, rew // RandomDeltasBalanced generates a random set of accounts delta func RandomDeltasBalanced(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]ledgercore.AccountData) { - updates, totals, _ = RandomDeltasBalancedImpl(niter, base, rewardsLevel, true, 0) + var lastCreatableID basics.CreatableIndex + updates, totals = RandomDeltasBalancedImpl( + niter, base, rewardsLevel, true, &lastCreatableID) return } // RandomDeltasBalancedFull generates a random set of accounts delta -func RandomDeltasBalancedFull(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, lastCreatableIDIn uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]ledgercore.AccountData, lastCreatableID uint64) { - updates, totals, lastCreatableID = RandomDeltasBalancedImpl(niter, base, rewardsLevel, false, lastCreatableIDIn) +func RandomDeltasBalancedFull(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, lastCreatableID *basics.CreatableIndex) (updates ledgercore.AccountDeltas, totals map[basics.Address]ledgercore.AccountData) { + updates, totals = RandomDeltasBalancedImpl(niter, base, rewardsLevel, false, lastCreatableID) return } // RandomDeltasBalancedImpl generates a random set of accounts delta -func RandomDeltasBalancedImpl(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, simple bool, lastCreatableIDIn uint64) (updates ledgercore.AccountDeltas, totals map[basics.Address]ledgercore.AccountData, lastCreatableID uint64) { +func RandomDeltasBalancedImpl(niter int, base map[basics.Address]basics.AccountData, rewardsLevel uint64, simple bool, lastCreatableID *basics.CreatableIndex) (updates ledgercore.AccountDeltas, totals map[basics.Address]ledgercore.AccountData) { var imbalance int64 if simple { updates, totals, imbalance = RandomDeltas(niter, base, rewardsLevel) } else { - updates, totals, imbalance, lastCreatableID = RandomDeltasFull(niter, base, rewardsLevel, lastCreatableIDIn) + updates, totals, imbalance = + RandomDeltasFull(niter, base, rewardsLevel, lastCreatableID) } oldPool := base[testPoolAddr] @@ -516,5 +510,5 @@ func RandomDeltasBalancedImpl(niter int, base map[basics.Address]basics.AccountD updates.Upsert(testPoolAddr, newPool) totals[testPoolAddr] = newPool - return updates, totals, lastCreatableID + return updates, totals } diff --git a/libgoal/libgoal.go b/libgoal/libgoal.go index f766363f2d..ea99545285 100644 --- a/libgoal/libgoal.go +++ b/libgoal/libgoal.go @@ -18,10 +18,12 @@ package libgoal import ( "encoding/json" + "errors" "fmt" "io/ioutil" "os" "path/filepath" + "time" algodclient "github.com/algorand/go-algorand/daemon/algod/api/client" v2 "github.com/algorand/go-algorand/daemon/algod/api/server/v2" @@ -938,6 +940,30 @@ func (c *Client) GetPendingTransactionsByAddress(addr string, maxTxns uint64) (r return } +// VerifyParticipationKey checks if a given participationID is installed in a loop until timeout has elapsed. +func (c *Client) VerifyParticipationKey(timeout time.Duration, participationID string) error { + start := time.Now() + + for { + keysResp, err := c.GetParticipationKeys() + if err != nil { + return err + } + for _, key := range keysResp { + if key.Id == participationID { + // Installation successful. + return nil + } + } + + if time.Since(start) > timeout { + return errors.New("timeout waiting for key to appear") + } + + time.Sleep(1 * time.Second) + } +} + // AddParticipationKey takes a participation key file and sends it to the node. // The key will be loaded into the system when the function returns successfully. func (c *Client) AddParticipationKey(keyfile string) (resp generated.PostParticipationResponse, err error) { diff --git a/libgoal/participation.go b/libgoal/participation.go index 2dbbbde98c..88a1151a7e 100644 --- a/libgoal/participation.go +++ b/libgoal/participation.go @@ -18,7 +18,6 @@ package libgoal import ( "fmt" - "io/ioutil" "math" "os" "path/filepath" @@ -27,76 +26,34 @@ import ( "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated" "github.com/algorand/go-algorand/data/account" "github.com/algorand/go-algorand/data/basics" - "github.com/algorand/go-algorand/protocol" "github.com/algorand/go-algorand/util/db" ) // chooseParticipation chooses which participation keys to use for going online // based on the address, round number, and available participation databases -func (c *Client) chooseParticipation(address basics.Address, round basics.Round) (part account.Participation, err error) { - genID, err := c.GenesisID() +func (c *Client) chooseParticipation(address basics.Address, round basics.Round) (part generated.ParticipationKey, err error) { + parts, err := c.ListParticipationKeys() if err != nil { return } - // Get a list of files in the participation keys directory - keyDir := filepath.Join(c.DataDir(), genID) - files, err := ioutil.ReadDir(keyDir) - if err != nil { - return - } - // This lambda will be used for finding the desired file. - checkIfFileIsDesiredKey := func(file os.FileInfo, expiresAfter basics.Round) (part account.Participation, err error) { - var handle db.Accessor - var partCandidate account.PersistedParticipation - - // If it can't be a participation key database, skip it - if !config.IsPartKeyFilename(file.Name()) { - return - } - - filename := file.Name() - - // Fetch a handle to this database - handle, err = db.MakeErasableAccessor(filepath.Join(keyDir, filename)) - if err != nil { - // Couldn't open it, skip it - return - } - - // Fetch an account.Participation from the database - partCandidate, err = account.RestoreParticipation(handle) - if err != nil { - // Couldn't read it, skip it - handle.Close() - return - } - defer partCandidate.Close() - - // Return the Participation valid for this round that relates to the passed address + // Loop through each of the participation keys; pick the one that expires farthest in the future. + var expiry uint64 = 0 + for _, info := range parts { + // Choose the Participation valid for this round that relates to the passed address // that expires farthest in the future. // Note that algod will sign votes with all possible Participations. so any should work // in the short-term. // In the future we should allow the user to specify exactly which partkeys to register. - if partCandidate.FirstValid <= round && round <= partCandidate.LastValid && partCandidate.Parent == address && partCandidate.LastValid > expiresAfter { - part = partCandidate.Participation + if info.Key.VoteFirstValid <= uint64(round) && uint64(round) <= info.Key.VoteLastValid && info.Address == address.String() && info.Key.VoteLastValid > expiry { + part = info + expiry = part.Key.VoteLastValid } - return - } - // Loop through each of the files; pick the one that expires farthest in the future. - var expiry basics.Round - for _, info := range files { - // Use above lambda so the deferred handle closure happens each loop - partCandidate, err := checkIfFileIsDesiredKey(info, expiry) - if err == nil && (!partCandidate.Parent.IsZero()) { - part = partCandidate - expiry = part.LastValid - } } - if part.Parent.IsZero() { + if part.Address == "" { // Couldn't find one - err = fmt.Errorf("Couldn't find a participation key database for address %v valid at round %v in directory %v", address.GetUserAddress(), round, keyDir) + err = fmt.Errorf("couldn't find a participation key database for address %v valid at round %v in participation registry", address.GetUserAddress(), round) return } return @@ -117,8 +74,12 @@ func (c *Client) GenParticipationKeys(address string, firstValid, lastValid, key } // GenParticipationKeysTo creates a .partkey database for a given address, fills -// it with keys, and saves it in the specified output directory. +// it with keys, and saves it in the specified output directory. If the output +// directory is empty, the key will be installed. func (c *Client) GenParticipationKeysTo(address string, firstValid, lastValid, keyDilution uint64, outDir string) (part account.Participation, filePath string, err error) { + + install := outDir == "" + // Parse the address parsedAddr, err := basics.UnmarshalChecksumAddress(address) if err != nil { @@ -127,16 +88,9 @@ func (c *Client) GenParticipationKeysTo(address string, firstValid, lastValid, k firstRound, lastRound := basics.Round(firstValid), basics.Round(lastValid) - // If output directory wasn't specified, store it in the current ledger directory. - if outDir == "" { - // Get the GenesisID for use in the participation key path - var genID string - genID, err = c.GenesisID() - if err != nil { - return - } - - outDir = filepath.Join(c.DataDir(), genID) + // If we are installing, generate in the temp dir + if install { + outDir = os.TempDir() } // Connect to the database partKeyPath, err := participationKeysPath(outDir, parsedAddr, firstRound, lastRound) @@ -152,6 +106,14 @@ func (c *Client) GenParticipationKeysTo(address string, firstValid, lastValid, k return } + // If the key is being installed, remove it afterwards. + if install { + // Explicitly ignore any errors + defer func(name string) { + _ = os.Remove(name) + }(partKeyPath) + } + partdb, err := db.MakeErasableAccessor(partKeyPath) if err != nil { return @@ -165,79 +127,15 @@ func (c *Client) GenParticipationKeysTo(address string, firstValid, lastValid, k newPart, err := account.FillDBWithParticipationKeys(partdb, parsedAddr, firstRound, lastRound, keyDilution) part = newPart.Participation partdb.Close() - return part, partKeyPath, err -} - -// InstallParticipationKeys creates a .partkey database for a given address, -// based on an existing database from inputfile. On successful install, it -// deletes the input file. -func (c *Client) InstallParticipationKeys(inputfile string) (part account.Participation, filePath string, err error) { - proto, ok := c.consensus[protocol.ConsensusCurrentVersion] - if !ok { - err = fmt.Errorf("Unknown consensus protocol %s", protocol.ConsensusCurrentVersion) - return - } - - // Get the GenesisID for use in the participation key path - var genID string - genID, err = c.GenesisID() - if err != nil { - return - } - - outDir := filepath.Join(c.DataDir(), genID) - - inputdb, err := db.MakeErasableAccessor(inputfile) - if err != nil { - return - } - defer inputdb.Close() - - partkey, err := account.RestoreParticipationWithSecrets(inputdb) - if err != nil { - return - } - - if partkey.Parent == (basics.Address{}) { - err = fmt.Errorf("Cannot install partkey with missing (zero) parent address") - return - } - - newdbpath, err := participationKeysPath(outDir, partkey.Parent, partkey.FirstValid, partkey.LastValid) - if err != nil { - return - } - newdb, err := db.MakeErasableAccessor(newdbpath) if err != nil { return } - newpartkey := partkey - newpartkey.Store = newdb - err = newpartkey.PersistWithSecrets() - if err != nil { - newpartkey.Close() - return + if install { + _, err = c.AddParticipationKey(partKeyPath) } - - // After successful install, remove the input copy of the - // partkey so that old keys cannot be recovered after they - // are used by algod. We try to delete the data inside - // sqlite first, so the key material is zeroed out from - // disk blocks, but regardless of whether that works, we - // delete the input file. The consensus protocol version - // is irrelevant for the maxuint64 round number we pass in. - errCh := partkey.DeleteOldKeys(basics.Round(math.MaxUint64), proto) - err = <-errCh - if err != nil { - newpartkey.Close() - return - } - os.Remove(inputfile) - part = newpartkey.Participation - newpartkey.Close() - return part, newdbpath, nil + return part, partKeyPath, err } // ListParticipationKeys returns the available participation keys, @@ -249,49 +147,3 @@ func (c *Client) ListParticipationKeys() (partKeyFiles generated.ParticipationKe } return } - -// ListParticipationKeyFiles returns the available participation keys, -// as a map from database filename to Participation key object. -func (c *Client) ListParticipationKeyFiles() (partKeyFiles map[string]account.Participation, err error) { - genID, err := c.GenesisID() - if err != nil { - return - } - - // Get a list of files in the participation keys directory - keyDir := filepath.Join(c.DataDir(), genID) - files, err := ioutil.ReadDir(keyDir) - if err != nil { - return - } - - partKeyFiles = make(map[string]account.Participation) - for _, file := range files { - // If it can't be a participation key database, skip it - if !config.IsPartKeyFilename(file.Name()) { - continue - } - - filename := file.Name() - - // Fetch a handle to this database - handle, err := db.MakeErasableAccessor(filepath.Join(keyDir, filename)) - if err != nil { - // Couldn't open it, skip it - continue - } - - // Fetch an account.Participation from the database - part, err := account.RestoreParticipation(handle) - if err != nil { - // Couldn't read it, skip it - handle.Close() - continue - } - - partKeyFiles[filename] = part.Participation - part.Close() - } - - return -} diff --git a/libgoal/transactions.go b/libgoal/transactions.go index bf704cc9ef..a03a9d5517 100644 --- a/libgoal/transactions.go +++ b/libgoal/transactions.go @@ -20,7 +20,10 @@ import ( "errors" "fmt" + "github.com/algorand/go-algorand/config" "github.com/algorand/go-algorand/crypto" + "github.com/algorand/go-algorand/crypto/merklesignature" + "github.com/algorand/go-algorand/daemon/algod/api/server/v2/generated" "github.com/algorand/go-algorand/daemon/algod/api/spec/v1" "github.com/algorand/go-algorand/data/account" "github.com/algorand/go-algorand/data/basics" @@ -191,8 +194,98 @@ func (c *Client) SignAndBroadcastTransaction(walletHandle, pw []byte, utx transa return c.BroadcastTransaction(stx) } +// generateRegistrationTransaction returns a transaction object for registering a Participation with its parent this is +// similar to account.Participation.GenerateRegistrationTransaction. +func generateRegistrationTransaction(part generated.ParticipationKey, fee basics.MicroAlgos, txnFirstValid, txnLastValid basics.Round, leaseBytes [32]byte) (transactions.Transaction, error) { + addr, err := basics.UnmarshalChecksumAddress(part.Address) + if err != nil { + return transactions.Transaction{}, err + } + + if len(part.Key.VoteParticipationKey) != 32 { + return transactions.Transaction{}, fmt.Errorf("voting key is the wrong size, should be 32 but it is %d", len(part.Key.VoteParticipationKey)) + } + + var votePk [32]byte + copy(votePk[:], part.Key.VoteParticipationKey[:]) + + if len(part.Key.SelectionParticipationKey) != 32 { + return transactions.Transaction{}, fmt.Errorf("selection key is the wrong size, should be 32 but it is %d", len(part.Key.VoteParticipationKey)) + } + + var selectionPk [32]byte + copy(selectionPk[:], part.Key.SelectionParticipationKey[:]) + + if part.Key.StateProofKey == nil { + return transactions.Transaction{}, fmt.Errorf("state proof key pointer is nil") + } + + if len(*part.Key.StateProofKey) != len(merklesignature.Verifier{}) { + return transactions.Transaction{}, fmt.Errorf("state proof key is the wrong size, should be %d but it is %d", len(merklesignature.Verifier{}), len(*part.Key.StateProofKey)) + } + + var stateProofPk merklesignature.Verifier + copy(stateProofPk[:], (*part.Key.StateProofKey)[:]) + + t := transactions.Transaction{ + Type: protocol.KeyRegistrationTx, + Header: transactions.Header{ + Sender: addr, + Fee: fee, + FirstValid: txnFirstValid, + LastValid: txnLastValid, + Lease: leaseBytes, + }, + KeyregTxnFields: transactions.KeyregTxnFields{ + VotePK: votePk, + SelectionPK: selectionPk, + StateProofPK: stateProofPk, + }, + } + t.KeyregTxnFields.VoteFirst = basics.Round(part.Key.VoteFirstValid) + t.KeyregTxnFields.VoteLast = basics.Round(part.Key.VoteLastValid) + t.KeyregTxnFields.VoteKeyDilution = part.Key.VoteKeyDilution + + return t, nil +} + +// MakeRegistrationTransactionWithGenesisID Generates a Registration transaction with the genesis ID set from the suggested parameters of the client +func (c *Client) MakeRegistrationTransactionWithGenesisID(part account.Participation, fee, txnFirstValid, txnLastValid uint64, leaseBytes [32]byte, includeStateProofKeys bool) (transactions.Transaction, error) { + + // Get current round, protocol, genesis ID + params, err := c.SuggestedParams() + if err != nil { + return transactions.Transaction{}, err + } + + cparams, ok := c.consensus[protocol.ConsensusVersion(params.ConsensusVersion)] + if !ok { + return transactions.Transaction{}, errors.New("unknown consensus version") + } + + txnFirstValid, txnLastValid, err = computeValidityRounds(txnFirstValid, txnLastValid, 0, params.LastRound, cparams.MaxTxnLife) + if err != nil { + return transactions.Transaction{}, err + } + + goOnlineTx := part.GenerateRegistrationTransaction( + basics.MicroAlgos{Raw: fee}, + basics.Round(txnFirstValid), + basics.Round(txnLastValid), + leaseBytes, includeStateProofKeys) + + goOnlineTx.Header.GenesisID = params.GenesisID + + // Check if the protocol supports genesis hash + if config.Consensus[protocol.ConsensusFuture].SupportGenesisHash { + copy(goOnlineTx.Header.GenesisHash[:], params.GenesisHash) + } + + return goOnlineTx, nil +} + // MakeUnsignedGoOnlineTx creates a transaction that will bring an address online using available participation keys -func (c *Client) MakeUnsignedGoOnlineTx(address string, part *account.Participation, firstValid, lastValid, fee uint64, leaseBytes [32]byte) (transactions.Transaction, error) { +func (c *Client) MakeUnsignedGoOnlineTx(address string, firstValid, lastValid, fee uint64, leaseBytes [32]byte) (transactions.Transaction, error) { // Parse the address parsedAddr, err := basics.UnmarshalChecksumAddress(address) if err != nil { @@ -217,19 +310,19 @@ func (c *Client) MakeUnsignedGoOnlineTx(address string, part *account.Participat // Choose which participation keys to go online with; // need to do this after filling in the round number. - if part == nil { - bestPart, err := c.chooseParticipation(parsedAddr, basics.Round(firstValid)) - if err != nil { - return transactions.Transaction{}, err - } - part = &bestPart + part, err := c.chooseParticipation(parsedAddr, basics.Round(firstValid)) + if err != nil { + return transactions.Transaction{}, err } parsedFrstValid := basics.Round(firstValid) parsedLastValid := basics.Round(lastValid) parsedFee := basics.MicroAlgos{Raw: fee} - goOnlineTransaction := part.GenerateRegistrationTransaction(parsedFee, parsedFrstValid, parsedLastValid, leaseBytes, cparams.EnableStateProofKeyregCheck) + goOnlineTransaction, err := generateRegistrationTransaction(part, parsedFee, parsedFrstValid, parsedLastValid, leaseBytes) + if err != nil { + return transactions.Transaction{}, err + } if cparams.SupportGenesisHash { var genHash crypto.Digest copy(genHash[:], params.GenesisHash) diff --git a/node/node.go b/node/node.go index 156f8fe336..6e5b6371ae 100644 --- a/node/node.go +++ b/node/node.go @@ -403,12 +403,7 @@ func (node *AlgorandFullNode) Start() { // startMonitoringRoutines starts the internal monitoring routines used by the node. func (node *AlgorandFullNode) startMonitoringRoutines() { - node.monitoringRoutinesWaitGroup.Add(3) - - // PKI TODO: Remove this with #2596 - // Periodically check for new participation keys - go node.checkForParticipationKeys(node.ctx.Done()) - + node.monitoringRoutinesWaitGroup.Add(2) go node.txPoolGaugeThread(node.ctx.Done()) // Delete old participation keys go node.oldKeyDeletionThread(node.ctx.Done()) @@ -781,24 +776,6 @@ func ensureParticipationDB(genesisDir string, log logging.Logger) (account.Parti return account.MakeParticipationRegistry(accessor, log) } -// Reload participation keys from disk periodically -func (node *AlgorandFullNode) checkForParticipationKeys(done <-chan struct{}) { - defer node.monitoringRoutinesWaitGroup.Done() - ticker := time.NewTicker(node.config.ParticipationKeysRefreshInterval) - for { - select { - case <-ticker.C: - err := node.loadParticipationKeys() - if err != nil { - node.log.Errorf("Could not refresh participation keys: %v", err) - } - case <-done: - ticker.Stop() - return - } - } -} - // ListParticipationKeys returns all participation keys currently installed on the node func (node *AlgorandFullNode) ListParticipationKeys() (partKeys []account.ParticipationRecord, err error) { return node.accountManager.Registry().GetAll(), nil @@ -916,7 +893,7 @@ func (node *AlgorandFullNode) InstallParticipationKey(partKeyBinary []byte) (acc } defer inputdb.Close() - partkey, err := account.RestoreParticipation(inputdb) + partkey, err := account.RestoreParticipationWithSecrets(inputdb) if err != nil { return account.ParticipationID{}, err } @@ -927,20 +904,19 @@ func (node *AlgorandFullNode) InstallParticipationKey(partKeyBinary []byte) (acc } // Tell the AccountManager about the Participation (dupes don't matter) so we ignore the return value - _ = node.accountManager.AddParticipation(partkey) + added := node.accountManager.AddParticipation(partkey) + if !added { + return account.ParticipationID{}, fmt.Errorf("ParticipationRegistry: cannot register duplicate participation key") + } - err = node.accountManager.Registry().Flush(participationRegistryFlushMaxWaitDuration) + err = insertStateProofToRegistry(partkey, node) if err != nil { return account.ParticipationID{}, err } - newFilename := config.PartKeyFilename(partkey.ID().String(), uint64(partkey.FirstValid), uint64(partkey.LastValid)) - newFullyQualifiedFilename := filepath.Join(outDir, filepath.Base(newFilename)) - - err = os.Rename(fullyQualifiedTempFile, newFullyQualifiedFilename) - + err = node.accountManager.Registry().Flush(participationRegistryFlushMaxWaitDuration) if err != nil { - return account.ParticipationID{}, nil + return account.ParticipationID{}, err } return partkey.ID(), nil diff --git a/shared/pingpong/accounts.go b/shared/pingpong/accounts.go index 7cf8de4054..fa8cfecdf0 100644 --- a/shared/pingpong/accounts.go +++ b/shared/pingpong/accounts.go @@ -19,7 +19,6 @@ package pingpong import ( "fmt" "io/ioutil" - "math" "math/rand" "os" "path/filepath" @@ -36,7 +35,6 @@ import ( "github.com/algorand/go-algorand/data/transactions/logic" "github.com/algorand/go-algorand/libgoal" "github.com/algorand/go-algorand/protocol" - "github.com/algorand/go-algorand/util" "github.com/algorand/go-algorand/util/db" ) @@ -132,17 +130,6 @@ func (pps *WorkerState) ensureAccounts(ac libgoal.Client, initCfg PpConfig) (acc return } -// throttle transaction rate -func throttleTransactionRate(startTime time.Time, cfg PpConfig, totalSent uint64) { - localTimeDelta := time.Since(startTime) - currentTps := float64(totalSent) / localTimeDelta.Seconds() - if currentTps > float64(cfg.TxnPerSec) { - sleepSec := float64(totalSent)/float64(cfg.TxnPerSec) - localTimeDelta.Seconds() - sleepTime := time.Duration(int64(math.Round(sleepSec*1000))) * time.Millisecond - util.NanoSleep(sleepTime) - } -} - // Prepare assets for asset transaction testing // Step 1) Create X assets for each of the participant accounts // Step 2) For each participant account, opt-in to assets of all other participant accounts @@ -153,13 +140,14 @@ func (pps *WorkerState) prepareAssets(accounts map[string]*pingPongAccount, clie return } - var startTime = time.Now() - var totalSent uint64 = 0 resultAssetMaps = make(map[uint64]v1.AssetParams) // optIns contains own and explicitly opted-in assets optIns = make(map[uint64][]string) numCreatedAssetsByAddr := make(map[string]int, len(accounts)) + + nextSendTime := time.Now() + // 1) Create X assets for each of the participant accounts for addr := range accounts { if addr == pps.cfg.SrcAccount { @@ -179,6 +167,7 @@ func (pps *WorkerState) prepareAssets(accounts map[string]*pingPongAccount, clie fmt.Printf("cfg.NumAsset %v, addrAccount.AssetParams %v\n", pps.cfg.NumAsset, addrAccount.AssetParams) totalSupply := pps.cfg.MinAccountAsset * uint64(pps.cfg.NumPartAccounts) * 9 * uint64(pps.cfg.GroupSize) * uint64(pps.cfg.RefreshTime.Seconds()) / pps.cfg.TxnPerSec + // create assets in participant account for i := 0; i < toCreate; i++ { var metaLen = 32 @@ -205,14 +194,12 @@ func (pps *WorkerState) prepareAssets(accounts map[string]*pingPongAccount, clie return } tx.Note = pps.makeNextUniqueNoteField() + schedule(pps.cfg.TxnPerSec, &nextSendTime) _, err = signAndBroadcastTransaction(accounts[addr], tx, client) if err != nil { _, _ = fmt.Fprintf(os.Stderr, "signing and broadcasting asset creation failed with error %v\n", err) return } - - totalSent++ - throttleTransactionRate(startTime, pps.cfg, totalSent) } } @@ -255,10 +242,6 @@ func (pps *WorkerState) prepareAssets(accounts map[string]*pingPongAccount, clie // optInsByAddr tracks only explicitly opted-in assetsA optInsByAddr := make(map[string]map[uint64]bool) - // reset rate-control - startTime = time.Now() - totalSent = 0 - // 2) For each participant account, opt-in up to proto.MaxAssetsPerAccount assets of all other participant accounts for addr := range accounts { if addr == pps.cfg.SrcAccount { @@ -308,17 +291,14 @@ func (pps *WorkerState) prepareAssets(accounts map[string]*pingPongAccount, clie } tx.Note = pps.makeNextUniqueNoteField() + schedule(pps.cfg.TxnPerSec, &nextSendTime) _, err = signAndBroadcastTransaction(accounts[addr], tx, client) if err != nil { _, _ = fmt.Fprintf(os.Stderr, "signing and broadcasting asset optin failed with error %v\n", err) return } - totalSent++ - optIns[k] = append(optIns[k], addr) optInsByAddr[addr][k] = true - - throttleTransactionRate(startTime, pps.cfg, totalSent) } } @@ -354,10 +334,6 @@ func (pps *WorkerState) prepareAssets(accounts map[string]*pingPongAccount, clie } } - // reset rate-control - startTime = time.Now() - totalSent = 0 - // Step 3) Evenly distribute the assets across all opted-in accounts for k, creator := range allAssets { if !pps.cfg.Quiet { @@ -403,14 +379,12 @@ func (pps *WorkerState) prepareAssets(accounts map[string]*pingPongAccount, clie } } + schedule(pps.cfg.TxnPerSec, &nextSendTime) _, err = signAndBroadcastTransaction(accounts[creator], tx, client) if err != nil { _, _ = fmt.Fprintf(os.Stderr, "signing and broadcasting asset distribution failed with error %v\n", err) return } - - totalSent++ - throttleTransactionRate(startTime, pps.cfg, totalSent) } // append the asset to the result assets resultAssetMaps[k] = assetParams[k] diff --git a/shared/pingpong/config.go b/shared/pingpong/config.go index 73ad4e4ec7..db6cbb4ed1 100644 --- a/shared/pingpong/config.go +++ b/shared/pingpong/config.go @@ -31,7 +31,6 @@ const ConfigFilename = "ppconfig.json" // PpConfig defines configuration structure for type PpConfig struct { SrcAccount string - DelayBetweenTxn time.Duration RandomizeFee bool RandomizeAmt bool RandomizeDst bool @@ -41,7 +40,6 @@ type PpConfig struct { TxnPerSec uint64 NumPartAccounts uint32 RunTime time.Duration - RestTime time.Duration RefreshTime time.Duration MinAccountFunds uint64 Quiet bool @@ -71,7 +69,6 @@ type PpConfig struct { // DefaultConfig object for Ping Pong var DefaultConfig = PpConfig{ SrcAccount: "", - DelayBetweenTxn: 100, RandomizeFee: false, RandomizeAmt: false, RandomizeDst: false, @@ -81,7 +78,6 @@ var DefaultConfig = PpConfig{ TxnPerSec: 200, NumPartAccounts: 10, RunTime: 10 * time.Second, - RestTime: 1 * time.Hour, // Long default rest to avoid accidental DoS RefreshTime: 10 * time.Second, MinAccountFunds: 100000, GroupSize: 1, diff --git a/shared/pingpong/pingpong.go b/shared/pingpong/pingpong.go index 71d787f792..5fcde03737 100644 --- a/shared/pingpong/pingpong.go +++ b/shared/pingpong/pingpong.go @@ -262,6 +262,16 @@ func computeAccountMinBalance(client libgoal.Client, cfg PpConfig) (fundingRequi return } +// Wait for `*nextSendTime` and update it afterwards. +func schedule(tps uint64, nextSendTime *time.Time) { + dur := time.Until(*nextSendTime) + if dur > 0 { + time.Sleep(dur) + } + + *nextSendTime = nextSendTime.Add(time.Second / time.Duration(tps)) +} + func (pps *WorkerState) fundAccounts(accounts map[string]*pingPongAccount, client libgoal.Client, cfg PpConfig) error { var srcFunds, minFund uint64 var err error @@ -272,7 +282,6 @@ func (pps *WorkerState) fundAccounts(accounts map[string]*pingPongAccount, clien return err } - startTime := time.Now() var totalSent uint64 // Fee of 0 will make cause the function to use the suggested one by network @@ -282,12 +291,12 @@ func (pps *WorkerState) fundAccounts(accounts map[string]*pingPongAccount, clien if err != nil { return err } - fmt.Printf("adjusting account balance to %d\n", minFund) + + nextSendTime := time.Now() for { accountsAdjusted := 0 for addr, acct := range accounts { - if addr == pps.cfg.SrcAccount { continue } @@ -307,6 +316,7 @@ func (pps *WorkerState) fundAccounts(accounts map[string]*pingPongAccount, clien fmt.Printf("adjusting balance of account %v by %d\n ", addr, toSend) } + schedule(cfg.TxnPerSec, &nextSendTime) tx, err = pps.sendPaymentFromSourceAccount(client, addr, fee, toSend) if err != nil { if strings.Contains(err.Error(), "broadcast queue full") { @@ -323,7 +333,6 @@ func (pps *WorkerState) fundAccounts(accounts map[string]*pingPongAccount, clien } totalSent++ - throttleTransactionRate(startTime, cfg, totalSent) } accounts[cfg.SrcAccount].setBalance(srcFunds) // wait until all the above transactions are sent, or that we have no more transactions @@ -462,7 +471,6 @@ func (pps *WorkerState) RunPingPong(ctx context.Context, ac libgoal.Client) { if cfg.MaxRuntime > 0 { endTime = time.Now().Add(cfg.MaxRuntime) } - restTime := cfg.RestTime refreshTime := time.Now().Add(cfg.RefreshTime) var nftThrottler *throttler @@ -473,6 +481,7 @@ func (pps *WorkerState) RunPingPong(ctx context.Context, ac libgoal.Client) { lastLog := time.Now() nextLog := lastLog.Add(logPeriod) + nextSendTime := time.Now() for { if ctx.Err() != nil { _, _ = fmt.Fprintf(os.Stderr, "error bad context in RunPingPong: %v\n", ctx.Err()) @@ -520,7 +529,7 @@ func (pps *WorkerState) RunPingPong(ctx context.Context, ac libgoal.Client) { } toList := listSufficientAccounts(pps.accounts, minimumAmount, cfg.SrcAccount) - sent, succeeded, err := pps.sendFromTo(fromList, toList, ac) + sent, succeeded, err := pps.sendFromTo(fromList, toList, ac, &nextSendTime) totalSent += sent totalSucceeded += succeeded if err != nil { @@ -535,16 +544,10 @@ func (pps *WorkerState) RunPingPong(ctx context.Context, ac libgoal.Client) { refreshTime = refreshTime.Add(cfg.RefreshTime) } - - throttleTransactionRate(startTime, cfg, totalSent) } timeDelta := time.Since(startTime) _, _ = fmt.Fprintf(os.Stdout, "Sent %d transactions (%d attempted) in %d seconds\n", totalSucceeded, totalSent, int(math.Round(timeDelta.Seconds()))) - if cfg.RestTime > 0 { - _, _ = fmt.Fprintf(os.Stdout, "Pausing %d seconds before sending more transactions\n", int(math.Round(cfg.RestTime.Seconds()))) - time.Sleep(restTime) - } } } @@ -672,7 +675,7 @@ func (pps *WorkerState) makeNftTraffic(client libgoal.Client) (sentCount uint64, func (pps *WorkerState) sendFromTo( fromList, toList []string, - client libgoal.Client, + client libgoal.Client, nextSendTime *time.Time, ) (sentCount, successCount uint64, err error) { accounts := pps.accounts cinfo := pps.cinfo @@ -693,8 +696,6 @@ func (pps *WorkerState) sendFromTo( *ap = p assetsByCreator[c] = append(assetsByCreator[c], ap) } - lastTransactionTime := time.Now() - timeCredit := time.Duration(0) for i := 0; i < len(fromList); i = (i + 1) % len(fromList) { from := fromList[i] @@ -770,6 +771,7 @@ func (pps *WorkerState) sendFromTo( return } + schedule(cfg.TxnPerSec, nextSendTime) sentCount++ _, sendErr = client.BroadcastTransaction(stxn) } else { @@ -856,6 +858,7 @@ func (pps *WorkerState) sendFromTo( } } + schedule(cfg.TxnPerSec, nextSendTime) sentCount++ sendErr = client.BroadcastTransactionGroup(stxGroup) } @@ -871,30 +874,6 @@ func (pps *WorkerState) sendFromTo( accounts[from].addBalance(fromBalanceChange) // avoid updating the "to" account. - // the logic here would sleep for the remaining of time to match the desired cfg.DelayBetweenTxn - if cfg.DelayBetweenTxn > 0 { - time.Sleep(cfg.DelayBetweenTxn) - } - if cfg.TxnPerSec > 0 { - timeCredit += time.Second / time.Duration(cfg.TxnPerSec) - - now := time.Now() - took := now.Sub(lastTransactionTime) - timeCredit -= took - if timeCredit > 0 { - time.Sleep(timeCredit) - timeCredit -= time.Since(now) - } else if timeCredit < -1000*time.Millisecond { - // cap the "time debt" to 1000 ms. - timeCredit = -1000 * time.Millisecond - } - lastTransactionTime = time.Now() - - // since we just slept enough here, we can take it off the counters - sentCount-- - successCount-- - // fmt.Printf("itration took %v\n", took) - } } return } diff --git a/test/e2e-go/cli/goal/expect/pingpongTest.exp b/test/e2e-go/cli/goal/expect/pingpongTest.exp index 40aec03c85..99fb9a3ee7 100644 --- a/test/e2e-go/cli/goal/expect/pingpongTest.exp +++ b/test/e2e-go/cli/goal/expect/pingpongTest.exp @@ -51,28 +51,28 @@ proc pingpongTest { TEST_ALGO_DIR TEST_DATA_DIR} { set pingpong_duration 5 - set pingpongArray(1_smallops_smallhash) "--appprogops 2 --appproghashes 2 --appproghashsize sha512_256 --numapp 10 --tps 200 --rest 0 --refresh 10 --numaccounts 5 --minaccount 100000000" - set pingpongArray(2_smallops_mediumhash) "--appprogops 2 --appproghashes 5 --appproghashsize sha512_256 --numapp 10 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000" - set pingpongArray(3_smallops_bighash) "--appprogops 2 --appproghashes 10 --appproghashsize sha512_256 --numapp 10 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000" - set pingpongArray(4_mediumops_smallhash) "--appprogops 200 --appproghashes 2 --appproghashsize sha512_256 --numapp 10 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000" - set pingpongArray(5_mediumops_mediumhash) "--appprogops 200 --appproghashes 5 --appproghashsize sha512_256 --numapp 10 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000" - set pingpongArray(6_mediumops_bighash) "--appprogops 200 --appproghashes 10 --appproghashsize sha512_256 --numapp 10 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000" - set pingpongArray(7_bigops_smallhash) "--appprogops 500 --appproghashes 2 --appproghashsize sha512_256 --numapp 10 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000" - set pingpongArray(8_bigops_mediumhash) "--appprogops 300 --appproghashes 5 --appproghashsize sha512_256 --numapp 10 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000" - set pingpongArray(9_bigops_bighash) "--appprogops 220 --appproghashes 10 --appproghashsize sha512_256 --numapp 10 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000" - - set pingpongArray(10_payment_transaction) "--tps 200 --rest 0 --refresh 10 --numaccounts 50" - set pingpongArray(11_teal_light_transaction) "--teal=light --tps 200 --rest 0 --refresh 10 --numaccounts 50" - set pingpongArray(10_teal_normal_transaction) "--teal=normal --tps 200 --rest 0 --refresh 10 --numaccounts 50" - set pingpongArray(12_teal_heavy_transaction) "--teal=heavy --tps 200 --rest 0 --refresh 10 --numaccounts 50" - set pingpongArray(13_atomic_transfer_small_transaction) "--groupsize=5 --tps 200 --rest 0 --refresh 10 --numaccounts 50" - set pingpongArray(14_atomic_transfer_large_transaction) "--groupsize=12 --tps 200 --rest 0 --refresh 10 --numaccounts 50" - set pingpongArray(15_asset_transfer_small_transaction) "--tps 200 --numasset=5 --mf 0 --rest 0 --numaccounts 10 --refresh 10 --mf=1000" - set pingpongArray(16_asset_transfer_large_transaction) "--tps 200 --numasset=10 --mf 0 --rest 0 --numaccounts 10 --refresh 10 --mf=1000" - set pingpongArray(17_stateful_teal_small_transaction) "--numapp 10 --appprogops 10 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000" - set pingpongArray(18_stateful_teal_medium_transaction) "--numapp 10 --appprogops 200 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000" - set pingpongArray(19_stateful_teal_large_transaction) "--numapp 10 --appprogops 600 --tps 200 --rest 0 --refresh 10 --numaccounts 50 --minaccount 100000000" - set pingpongArray(20_rekey_payment_transaction) "--rekey=true --groupsize=2 --randomnote=true --tps 200 --rest 0 --refresh 10 --numaccounts 50" + set pingpongArray(1_smallops_smallhash) "--appprogops 2 --appproghashes 2 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 5 --minaccount 100000000" + set pingpongArray(2_smallops_mediumhash) "--appprogops 2 --appproghashes 5 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000" + set pingpongArray(3_smallops_bighash) "--appprogops 2 --appproghashes 10 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000" + set pingpongArray(4_mediumops_smallhash) "--appprogops 200 --appproghashes 2 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000" + set pingpongArray(5_mediumops_mediumhash) "--appprogops 200 --appproghashes 5 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000" + set pingpongArray(6_mediumops_bighash) "--appprogops 200 --appproghashes 10 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000" + set pingpongArray(7_bigops_smallhash) "--appprogops 500 --appproghashes 2 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000" + set pingpongArray(8_bigops_mediumhash) "--appprogops 300 --appproghashes 5 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000" + set pingpongArray(9_bigops_bighash) "--appprogops 220 --appproghashes 10 --appproghashsize sha512_256 --numapp 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000" + + set pingpongArray(10_payment_transaction) "--tps 200 --refresh 10 --numaccounts 50" + set pingpongArray(11_teal_light_transaction) "--teal=light --tps 200 --refresh 10 --numaccounts 50" + set pingpongArray(10_teal_normal_transaction) "--teal=normal --tps 200 --refresh 10 --numaccounts 50" + set pingpongArray(12_teal_heavy_transaction) "--teal=heavy --tps 200 --refresh 10 --numaccounts 50" + set pingpongArray(13_atomic_transfer_small_transaction) "--groupsize=5 --tps 200 --refresh 10 --numaccounts 50" + set pingpongArray(14_atomic_transfer_large_transaction) "--groupsize=12 --tps 200 --refresh 10 --numaccounts 50" + set pingpongArray(15_asset_transfer_small_transaction) "--tps 200 --numasset=5 --mf 0 --numaccounts 10 --refresh 10 --mf=1000" + set pingpongArray(16_asset_transfer_large_transaction) "--tps 200 --numasset=10 --mf 0 --numaccounts 10 --refresh 10 --mf=1000" + set pingpongArray(17_stateful_teal_small_transaction) "--numapp 10 --appprogops 10 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000" + set pingpongArray(18_stateful_teal_medium_transaction) "--numapp 10 --appprogops 200 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000" + set pingpongArray(19_stateful_teal_large_transaction) "--numapp 10 --appprogops 600 --tps 200 --refresh 10 --numaccounts 50 --minaccount 100000000" + set pingpongArray(20_rekey_payment_transaction) "--rekey=true --groupsize=2 --randomnote=true --tps 200 --refresh 10 --numaccounts 50" foreach index [array names pingpongArray] { diff --git a/test/e2e-go/features/participation/accountParticipationTransitions_test.go b/test/e2e-go/features/participation/accountParticipationTransitions_test.go index a151ec2e21..9cf58310a4 100644 --- a/test/e2e-go/features/participation/accountParticipationTransitions_test.go +++ b/test/e2e-go/features/participation/accountParticipationTransitions_test.go @@ -60,8 +60,8 @@ func registerParticipationAndWait(t *testing.T, client libgoal.Client, part acco sAccount := part.Address().String() sWH, err := client.GetUnencryptedWalletHandle() require.NoError(t, err) - goOnlineTx, err := client.MakeUnsignedGoOnlineTx(sAccount, &part, txParams.LastRound+1, txParams.LastRound+1, txParams.Fee, [32]byte{}) - require.NoError(t, err) + goOnlineTx, err := client.MakeRegistrationTransactionWithGenesisID(part, txParams.Fee, txParams.LastRound+1, txParams.LastRound+1, [32]byte{}, true) + assert.NoError(t, err) require.Equal(t, sAccount, goOnlineTx.Src().String()) onlineTxID, err := client.SignAndBroadcastTransaction(sWH, nil, goOnlineTx) require.NoError(t, err) diff --git a/test/e2e-go/features/participation/onlineOfflineParticipation_test.go b/test/e2e-go/features/participation/onlineOfflineParticipation_test.go index c89ca262ff..a09b566a70 100644 --- a/test/e2e-go/features/participation/onlineOfflineParticipation_test.go +++ b/test/e2e-go/features/participation/onlineOfflineParticipation_test.go @@ -177,11 +177,18 @@ func TestNewAccountCanGoOnlineAndParticipate(t *testing.T) { partKeyFirstValid := uint64(0) partKeyValidityPeriod := uint64(10000) partKeyLastValid := partKeyFirstValid + partKeyValidityPeriod + + maxTxnLife := consensus[protocol.ConsensusVersion("shortpartkeysprotocol")].MaxTxnLife + + if partKeyLastValid > maxTxnLife { + partKeyLastValid = maxTxnLife + } + partkeyResponse, _, err := client.GenParticipationKeys(newAccount, partKeyFirstValid, partKeyLastValid, 0) a.NoError(err, "rest client should be able to add participation key to new account") a.Equal(newAccount, partkeyResponse.Parent.String(), "partkey response should echo queried account") // account uses part key to go online - goOnlineTx, err := client.MakeUnsignedGoOnlineTx(newAccount, &partkeyResponse, 0, 0, transactionFee, [32]byte{}) + goOnlineTx, err := client.MakeRegistrationTransactionWithGenesisID(partkeyResponse, transactionFee, partKeyFirstValid, partKeyLastValid, [32]byte{}, true) a.NoError(err, "should be able to make go online tx") a.Equal(newAccount, goOnlineTx.Src().String(), "go online response should echo queried account") onlineTxID, err := client.SignAndBroadcastTransaction(wh, nil, goOnlineTx) @@ -290,7 +297,8 @@ func TestAccountGoesOnlineForShortPeriod(t *testing.T) { a.NoError(err, "rest client should be able to add participation key to new account") a.Equal(newAccount, partkeyResponse.Parent.String(), "partkey response should echo queried account") // account uses part key to go online - goOnlineTx, err := client.MakeUnsignedGoOnlineTx(newAccount, &partkeyResponse, partKeyFirstValid, partKeyLastValid, transactionFee, [32]byte{}) + goOnlineTx, err := client.MakeRegistrationTransactionWithGenesisID(partkeyResponse, transactionFee, partKeyFirstValid, partKeyLastValid, [32]byte{}, true) + a.NoError(err) a.Equal(goOnlineTx.KeyregTxnFields.StateProofPK.IsEmpty(), false, "stateproof key should not be zero") a.NoError(err, "should be able to make go online tx") a.Equal(newAccount, goOnlineTx.Src().String(), "go online response should echo queried account") diff --git a/test/e2e-go/features/participation/overlappingParticipationKeys_test.go b/test/e2e-go/features/participation/overlappingParticipationKeys_test.go index 18bdec3695..018cb60a4d 100644 --- a/test/e2e-go/features/participation/overlappingParticipationKeys_test.go +++ b/test/e2e-go/features/participation/overlappingParticipationKeys_test.go @@ -39,6 +39,17 @@ import ( "github.com/algorand/go-algorand/util/db" ) +// TestOverlappingParticipationKeys is a test that "overlaps" participation keys across +// various nodes. Keys are installed in a rotating fashion across the nodes where: +// ((Network Round - 1) Mod 10) = nodeIdx and nodeIdx is used to pull out from an +// "array" of nodes similar to {Node1, Node2, Node3} etc. The Mod 10 simply pulls the +// "digit" from the number: +// Round: 13 -> 13 - 1 = 12 -> 12 Mod 10 -> 2 -> Node3 with nodeIdx == 2 +// +// The keys are overlapped in the sense that a key is registered to a node and +// "overlaps" with other installed keys that are also valid. Meaning there might be: +// PKI 1 (Valid 3-15) and PKI 2 (Valid 13-25) and PKI 3 (Valid 23-35) all installed +// on the same node func TestOverlappingParticipationKeys(t *testing.T) { partitiontest.PartitionTest(t) defer fixtures.ShutdownSynchronizedTest(t) @@ -50,6 +61,7 @@ func TestOverlappingParticipationKeys(t *testing.T) { shortPartKeysProtocol := config.Consensus[protocol.ConsensusCurrentVersion] shortPartKeysProtocol.ApprovedUpgrades = map[protocol.ConsensusVersion]uint64{} // keys round = current - 2 * (2 * 1) (see selector.go) + // --> return r.SubSaturate(basics.Round(2 * cparams.SeedRefreshInterval * cparams.SeedLookback)) // new keys must exist at least 4 rounds prior use shortPartKeysProtocol.SeedLookback = 2 shortPartKeysProtocol.SeedRefreshInterval = 1 @@ -68,13 +80,6 @@ func TestOverlappingParticipationKeys(t *testing.T) { defer fixture.Shutdown() accountsNum := len(fixture.NodeDataDirs()) - for _, dataDir := range fixture.NodeDataDirs() { - cfg, err := config.LoadConfigFromDisk(dataDir) - a.NoError(err) - cfg.ParticipationKeysRefreshInterval = 500 * time.Millisecond - err = cfg.SaveToDisk(dataDir) - a.NoError(err) - } genesis, err := bookkeeping.LoadGenesisFromFile(filepath.Join(fixture.PrimaryDataDir(), "genesis.json")) a.NoError(err) @@ -89,10 +94,23 @@ func TestOverlappingParticipationKeys(t *testing.T) { continue } acctIdx := (round - 1) % 10 + + // Prepare the registration keys ahead of time. Note that the + 10 is because we use Mod 10 + + // These variables control when the transaction will be sent out to be valid from. + // These variables will also be the name of the file produced EXCEPT + // prepareParticipationKey() will add 2 to the txStartRound for the filename. + // so the file for round 1 will be 3.15 + // For round 11 (the next round that Mod 10 will index to 1), that means the filename will be + // 13.25 which results in a 2 round overlap txStartRound := round txEndRound := txStartRound + 10 + 4 + // The registration variables here control when the participation key will actually be valid from + // For round 1, that means from 1-16 (one round of overlap) + // For round 11 (the next round that Mod 10 will index to 1), that means the 11-26 regStartRound := round regEndRound := regStartRound + 11 + 4 + err = prepareParticipationKey(a, &fixture, acctIdx, txStartRound, txEndRound, regStartRound, regEndRound, genesisHash, rootKeys, regTransactions, config.Consensus[protocol.ConsensusCurrentVersion]) a.NoError(err) } @@ -100,17 +118,39 @@ func TestOverlappingParticipationKeys(t *testing.T) { fixture.Start() currentRound := uint64(0) fixture.AlgodClient = fixture.GetAlgodClientForController(fixture.NC) + + // ******** IMPORTANT ******** + // It is CRITICAL that this for loop NOT BLOCK. + // This loop assumes that it stays current with the round of the network. + // Remember: this test is running while the network is advancing rounds in parallel + // If this test blocks for more than a couple seconds, then the network round count will have advanced + // farther than the current "currentRound" variable. This will mean that the "addParticipationKey" function + // will NOT install the participation key in time for the shortened SeedLookback variable resulting + // in a network stall and a test failure for { err := fixture.WaitForRoundWithTimeout(currentRound + 1) a.NoError(err) + + // A sanity check that makes sure that the round of the network is the same as our + // current round variable + sts, err := fixture.GetAlgodClientForController(fixture.NC).Status() + a.NoError(err, "the network stalled, see test comments and review node.log in each nodes data directory for details.") + a.Equal(sts.LastRound, currentRound+1) + currentRound++ if (currentRound-1)%10 < uint64(accountsNum) { acctIdx := (currentRound - 1) % 10 + + // We do a plus two because the filenames were stored with a plus 2 startRound := currentRound + 2 // +2 and -2 below to balance, start/end must match in part key file name endRound := startRound + 10 + 4 - 2 + regStartRound := currentRound regEndRound := regStartRound + 11 + 4 + // This cannot block! (See above) + // We pull the files from the disk according to their start round end round filenames + // and install them as well as send out a transaction pk, err := addParticipationKey(a, &fixture, acctIdx, startRound, endRound, regTransactions) a.NoError(err) t.Logf("[.] Round %d, Added reg key for node %d range [%d..%d] %s\n", currentRound, acctIdx, regStartRound, regEndRound, hex.EncodeToString(pk[:8])) @@ -128,17 +168,20 @@ func TestOverlappingParticipationKeys(t *testing.T) { func addParticipationKey(a *require.Assertions, fixture *fixtures.RestClientFixture, acctNum uint64, startRound, endRound uint64, regTransactions map[int]transactions.SignedTxn) (crypto.OneTimeSignatureVerifier, error) { dataDir := fixture.NodeDataDirs()[acctNum] nc := fixture.GetNodeControllerForDataDir(dataDir) - genesisDir, err := nc.GetGenesisDir() partKeyName := filepath.Join(dataDir, config.PartKeyFilename("Wallet", startRound, endRound)) - partKeyNameTarget := filepath.Join(genesisDir, config.PartKeyFilename("Wallet", startRound, endRound)) - // make the rename in the background to ensure it won't take too long. We have ~4 rounds to complete this. - go os.Rename(partKeyName, partKeyNameTarget) + // This function can take more than a couple seconds, we can't have this function block so + // we wrap it in a go routine + go func() { + clientController := fixture.GetLibGoalClientFromNodeController(nc) + _, err := clientController.AddParticipationKey(partKeyName) + a.NoError(err) + }() signedTxn := regTransactions[int(startRound-2)] a.NotEmpty(signedTxn.Sig) - _, err = fixture.GetAlgodClientForController(nc).SendRawTransaction(signedTxn) + _, err := fixture.GetAlgodClientForController(nc).SendRawTransaction(signedTxn) a.NoError(err) return signedTxn.Txn.KeyregTxnFields.VotePK, err } diff --git a/test/e2e-go/features/participation/participationExpiration_test.go b/test/e2e-go/features/participation/participationExpiration_test.go index 126b5acf09..06b392856f 100644 --- a/test/e2e-go/features/participation/participationExpiration_test.go +++ b/test/e2e-go/features/participation/participationExpiration_test.go @@ -31,7 +31,7 @@ import ( "github.com/algorand/go-algorand/test/partitiontest" ) -func testExpirationAccounts(t *testing.T, fixture *fixtures.RestClientFixture, finalStatus basics.Status, protocolCheck string) { +func testExpirationAccounts(t *testing.T, fixture *fixtures.RestClientFixture, finalStatus basics.Status, protocolCheck string, includeStateProofs bool) { a := require.New(fixtures.SynchronizedTest(t)) pClient := fixture.GetLibGoalClientForNamedNode("Primary") @@ -84,7 +84,7 @@ func testExpirationAccounts(t *testing.T, fixture *fixtures.RestClientFixture, f a.Equal(sAccount, partkeyResponse.Parent.String()) // account uses part key to go online - goOnlineTx, err := sClient.MakeUnsignedGoOnlineTx(sAccount, &partkeyResponse, 0, 0, transactionFee, [32]byte{}) + goOnlineTx, err := sClient.MakeRegistrationTransactionWithGenesisID(partkeyResponse, transactionFee, 0, 0, [32]byte{}, includeStateProofs) a.NoError(err) a.Equal(sAccount, goOnlineTx.Src().String()) @@ -191,7 +191,7 @@ func TestParticipationAccountsExpirationFuture(t *testing.T) { fixture.Start() defer fixture.Shutdown() - testExpirationAccounts(t, &fixture, basics.Offline, "future") + testExpirationAccounts(t, &fixture, basics.Offline, "future", true) } // TestParticipationAccountsExpirationNonFuture tests that sending a transaction to an account with @@ -214,5 +214,5 @@ func TestParticipationAccountsExpirationNonFuture(t *testing.T) { fixture.Start() defer fixture.Shutdown() - testExpirationAccounts(t, &fixture, basics.Online, string(protocol.ConsensusV29)) + testExpirationAccounts(t, &fixture, basics.Online, string(protocol.ConsensusV29), false) } diff --git a/test/e2e-go/features/transactions/onlineStatusChange_test.go b/test/e2e-go/features/transactions/onlineStatusChange_test.go index d12715beb7..be2ff60ff6 100644 --- a/test/e2e-go/features/transactions/onlineStatusChange_test.go +++ b/test/e2e-go/features/transactions/onlineStatusChange_test.go @@ -18,6 +18,8 @@ package transactions import ( "fmt" + "io/ioutil" + "os" "path/filepath" "testing" @@ -86,7 +88,7 @@ func testAccountsCanChangeOnlineState(t *testing.T, templatePath string) { a.NoError(err, "should be no errors when creating partkeys") a.Equal(initiallyOffline, partkeyResponse.Address().String(), "successful partkey creation should echo account") - goOnlineUTx, err := client.MakeUnsignedGoOnlineTx(initiallyOffline, nil, curRound, curRound+transactionValidityPeriod, transactionFee, [32]byte{}) + goOnlineUTx, err := client.MakeUnsignedGoOnlineTx(initiallyOffline, curRound, curRound+transactionValidityPeriod, transactionFee, [32]byte{}) a.NoError(err, "should be able to make go online tx") wh, err := client.GetUnencryptedWalletHandle() a.NoError(err, "should be able to get unencrypted wallet handle") @@ -168,13 +170,20 @@ func TestCloseOnError(t *testing.T) { // get the current round for partkey creation _, curRound := fixture.GetBalanceAndRound(initiallyOnline) + tempDir, err := ioutil.TempDir(os.TempDir(), "test-close-on-error") + require.NoError(t, err) + defer os.RemoveAll(tempDir) + + var partkeyFile string + _, partkeyFile, err = client.GenParticipationKeysTo(initiallyOffline, 0, curRound+1000, 0, tempDir) + // make a participation key for initiallyOffline - _, _, err = client.GenParticipationKeys(initiallyOffline, 0, curRound+1000, 0) + _, err = client.AddParticipationKey(partkeyFile) a.NoError(err) // check duplicate keys does not crash - _, _, err = client.GenParticipationKeys(initiallyOffline, 0, curRound+1000, 0) - errMsg := fmt.Sprintf("ParticipationKeys exist for the range 0 to %d", curRound+1000) - a.Equal(errMsg, err.Error()) + _, err = client.AddParticipationKey(partkeyFile) + a.Error(err) + a.Contains(err.Error(), "cannot register duplicate participation key") // check lastValid < firstValid does not crash _, _, err = client.GenParticipationKeys(initiallyOffline, curRound+1001, curRound+1000, 0) expected := fmt.Sprintf("FillDBWithParticipationKeys: firstValid %d is after lastValid %d", int(curRound+1001), int(curRound+1000)) diff --git a/test/e2e-go/stress/transactions/createManyAndGoOnline_test.go b/test/e2e-go/stress/transactions/createManyAndGoOnline_test.go index 77c11ccb47..064a615968 100644 --- a/test/e2e-go/stress/transactions/createManyAndGoOnline_test.go +++ b/test/e2e-go/stress/transactions/createManyAndGoOnline_test.go @@ -128,7 +128,7 @@ func TestManyAccountsCanGoOnline(t *testing.T) { partkeyResponse, _, err := client.GenParticipationKeys(account, curRound-10, curRound+1000, 0) a.NoError(err, "should be no errors when creating many partkeys, creation number %v", i) a.Equal(account, partkeyResponse.Address, "successful partkey creation should echo account") - goOnlineUTx, err := client.MakeUnsignedGoOnlineTx(account, nil, curRound, curRound+transactionValidityPeriod, transactionFee, [32]byte{}) + goOnlineUTx, err := client.MakeUnsignedGoOnlineTx(account, curRound, curRound+transactionValidityPeriod, transactionFee, [32]byte{}) a.NoError(err, "should be able to make go online tx %v", i) wh, err := client.GetUnencryptedWalletHandle() a.NoError(err, "should be able to get unencrypted wallet handle") @@ -149,7 +149,7 @@ func TestManyAccountsCanGoOnline(t *testing.T) { a.NoError(err, "should be no errors when creating many partkeys, creation number %v", i) a.Equal(account, partkeyResponse.Address, "successful partkey creation should echo account") - goOnlineUTx, err := client.MakeUnsignedGoOnlineTx(account, nil, curRound, curRound+transactionValidityPeriod, transactionFee, [32]byte{}) + goOnlineUTx, err := client.MakeUnsignedGoOnlineTx(account, curRound, curRound+transactionValidityPeriod, transactionFee, [32]byte{}) a.NoError(err, "should be able to make go online tx %v", i) wh, err := client.GetUnencryptedWalletHandle() a.NoError(err, "should be able to get unencrypted wallet handle") diff --git a/test/heapwatch/bwstart.sh b/test/heapwatch/bwstart.sh index 3770136f70..a2fa8ef284 100644 --- a/test/heapwatch/bwstart.sh +++ b/test/heapwatch/bwstart.sh @@ -35,10 +35,10 @@ python3 "${REPO_ROOT}/test/heapwatch/heapWatch.py" -o "${TESTDIR}/heaps" --no-he echo "$!" > .heapWatch.pid # TODO: other pingpong modes -pingpong run -d "${TESTDIR}/node1" --tps 20 --rest 0 --run 0 & +pingpong run -d "${TESTDIR}/node1" --tps 20 --run 0 & echo "$!" > .pingpong1.pid -pingpong run -d "${TESTDIR}/node2" --tps 20 --rest 0 --run 0 & +pingpong run -d "${TESTDIR}/node2" --tps 20 --run 0 & echo "$!" > .pingpong2.pid diff --git a/test/heapwatch/start.sh b/test/heapwatch/start.sh index cb4b37eca5..82560f1185 100755 --- a/test/heapwatch/start.sh +++ b/test/heapwatch/start.sh @@ -25,10 +25,10 @@ python3 "${REPO_ROOT}/test/heapwatch/heapWatch.py" -o "${TESTDIR}/heaps" --perio echo "$!" > .heapWatch.pid # TODO: other pingpong modes -pingpong run -d "${TESTDIR}/Node1" --tps 10 --rest 0 --run 0 --nftasapersecond 200 & +pingpong run -d "${TESTDIR}/Node1" --tps 10 --run 0 --nftasapersecond 200 & echo "$!" > .pingpong1.pid -pingpong run -d "${TESTDIR}/Node2" --tps 10 --rest 0 --run 0 --nftasapersecond 200 & +pingpong run -d "${TESTDIR}/Node2" --tps 10 --run 0 --nftasapersecond 200 & echo "$!" > .pingpong2.pid diff --git a/test/scripts/e2e_subs/goal-partkey-commands.sh b/test/scripts/e2e_subs/goal-partkey-commands.sh new file mode 100755 index 0000000000..94c831c86e --- /dev/null +++ b/test/scripts/e2e_subs/goal-partkey-commands.sh @@ -0,0 +1,100 @@ +#!/usr/bin/env bash +# TIMEOUT=300 + +# errors are handled manually, so no -e +set -x + +date "+$0 start %Y%m%d_%H%M%S" + +# Registered Account ParticipationID Last Used First round Last round +# yes LFMT...RHJQ 4UPT6AQC... 4 0 3000000 +OUTPUT=$(goal account listpartkeys) +# In case there are multiple keys, make sure we are checking the correct one. +OUTPUT=$(echo "$OUTPUT"|grep "yes.*3000"|tr -s ' ') +if [[ "$OUTPUT" != yes* ]]; then echo "Registered should be 'yes' but wasn't."; exit 1; fi +if [[ $(echo "$OUTPUT" | cut -d' ' -f 4) == 0 ]]; then echo "Last Used shouldn't be 0 but was."; exit 1; fi +if [[ $(echo "$OUTPUT" | cut -d' ' -f 5) != 0 ]]; then echo "First round should be 0 but wasn't."; exit 1; fi +if [[ $(echo "$OUTPUT" | cut -d' ' -f 6) != 3000 ]]; then echo "Last round should be 3000 but wasn't."; exit 1; fi + +#Dumping participation key info from /tmp/tmpwtomya9x/net/Node... +# +#Participation ID: 4UPT6AQCFZU5ZDN3WKVPCFYOH2SFJ7SPHK7XPWI2CIDYKK7K3WMQ +#Parent address: LFMTCXCY6WGSFSGLSNTFH532KVERJVNRD7W5H7GIQ4MPGM7SSVYMQYRHJQ +#Last vote round: 3 +#Last block proposal round: 4 +#Effective first round: 0 +#Effective last round: 3000 +#First round: 0 +#Last round: 3000 +#Key dilution: 10000 +#Selection key: esIsBJB86P+sLeqO3gVoLBGfpuwYlWN4lNzz2AYslTo= +#Voting key: W1OcXLZsaATyOd5FbhRgXHmcywvn++xEVUAQ0NejmW4= +OUTPUT=$(goal account partkeyinfo) +if ! echo "$OUTPUT" | grep -q 'First round:[[:space:]]* 0'; then echo "First round should have been 0."; exit 1; fi +if ! echo "$OUTPUT" | grep -q 'Last round:[[:space:]]* 3000'; then echo "Last round should have been 3000."; exit 1; fi +if ! echo "$OUTPUT" | grep -q 'Effective last round:[[:space:]]* 3000'; then echo "Effective last round should have been 3000."; exit 1; fi +# 100 or 10000 due to arm64 bug +if ! echo "$OUTPUT" | grep -q 'Key dilution:[[:space:]]* 100\(00\)\?'; then echo "Key dilution should have been 10000."; exit 1; fi +if ! echo "$OUTPUT" | grep -q 'Participation ID:[[:space:]]*[[:alnum:]]\{52\}'; then echo "There should be a participation ID."; exit 1; fi + +# Test multiple data directory supported +NUM_OUTPUT_1=$(echo "$OUTPUT"|grep -c 'Participation ID') +OUTPUT=$(goal account partkeyinfo -d "$ALGORAND_DATA" -d "$ALGORAND_DATA2") +NUM_OUTPUT_2=$(echo "$OUTPUT"|grep -c 'Participation ID') +if (( "$NUM_OUTPUT_2" <= "$NUM_OUTPUT_1" )); then echo "Should have found more participation keys when checking both data directories."; exit 1; fi + +# get stderr from this one +OUTPUT=$(goal account listpartkeys -d "$ALGORAND_DATA" -d "$ALGORAND_DATA2" 2>&1) +EXPECTED_ERR="Only one data directory can be specified for this command." +if [[ "$OUTPUT" != "$EXPECTED_ERR" ]]; then echo -e "Unexpected output from multiple data directories with 'listpartkeys': \n$OUTPUT"; exit 1; fi + +create_and_fund_account () { + local TEMP_ACCT=$(${gcmd} account new|awk '{ print $6 }') + ${gcmd} clerk send -f "$INITIAL_ACCOUNT" -t "$TEMP_ACCT" -a 1000000 > /dev/null + echo "$TEMP_ACCT" +} + +# given key should be installed and have the expected yes/no state +# $1 - yes or no +# $2 - a participation id +# $3 - error message +verify_registered_state () { + # look for participation ID anywhere in the partkeyinfo output + if ! goal account partkeyinfo | grep -q "$2"; then + fail_test "Key was not installed properly: $3" + fi + + # looking for yes/no, and the 8 character head of participation id in this line: + # yes LFMT...RHJQ 4UPT6AQC... 4 0 3000 + if ! goal account listpartkeys | grep -q "$1.*$(echo "$2" | cut -c1-8)\.\.\."; then + fail_test "Unexpected key state: $3" + fi +} + +# goal account installpartkey +# install manually generated participation keys (do not register) +NEW_ACCOUNT_1=$(create_and_fund_account) +algokey part generate --keyfile test_partkey --first 0 --last 3000 --parent "$NEW_ACCOUNT_1" +PARTICIPATION_ID_1=$(goal account installpartkey --delete-input --partkey test_partkey|awk '{ print $7 }') +verify_registered_state "no" "$PARTICIPATION_ID_1" "goal account installpartkey" + +# goal account addpartkey +# generate and install participation keys (do not register) +NEW_ACCOUNT_2=$(create_and_fund_account) +PARTICIPATION_ID_2=$(goal account addpartkey -a "$NEW_ACCOUNT_2" --roundFirstValid 0 --roundLastValid 3000|awk '{ print $7 }') +verify_registered_state "no" "$PARTICIPATION_ID_2" "goal account addpartkey" + +# goal account renewpartkeys +# generate, install, and register +NEW_ACCOUNT_3=$(create_and_fund_account) +PARTICIPATION_ID_3=$(${gcmd} account renewpartkey --roundLastValid 3000 -a "$NEW_ACCOUNT_3"|tail -n 1|awk '{ print $7 }') +verify_registered_state "yes" "$PARTICIPATION_ID_3" "goal account renewpartkey" + +# goal account changeonlinstatus (--account) +verify_registered_state "no" "$PARTICIPATION_ID_1" "goal account installpartkey (before)" +${gcmd} account changeonlinestatus -a "$NEW_ACCOUNT_1" +verify_registered_state "yes" "$PARTICIPATION_ID_1" "goal account installpartkey (after)" + +# goal account renewallpartkeys +# goal account changeonlinstatus (--partkey) +# These do not work as I expected them to. Do they work? I don't know, we should try to remove it. diff --git a/test/scripts/e2e_subs/goal-partkey-information.sh b/test/scripts/e2e_subs/goal-partkey-information.sh deleted file mode 100755 index 6d5069c554..0000000000 --- a/test/scripts/e2e_subs/goal-partkey-information.sh +++ /dev/null @@ -1,47 +0,0 @@ -#!/usr/bin/env bash -# TIMEOUT=300 - -# errors are handled manually, so no -e -set -x - -date "+$0 start %Y%m%d_%H%M%S" - -# Registered Account ParticipationID Last Used First round Last round -# yes LFMT...RHJQ 4UPT6AQC... 4 0 3000000 -OUTPUT=$(goal account listpartkeys) -OUTPUT=$(echo "$OUTPUT"|tail -n 1|tr -s ' ') -if [[ "$OUTPUT" != yes* ]]; then echo "Registered should be 'yes' but wasn't."; exit 1; fi -if [[ $(echo "$OUTPUT" | cut -d' ' -f 4) == 0 ]]; then echo "Last Used shouldn't be 0 but was."; exit 1; fi -if [[ $(echo "$OUTPUT" | cut -d' ' -f 5) != 0 ]]; then echo "First round should be 0 but wasn't."; exit 1; fi -if [[ $(echo "$OUTPUT" | cut -d' ' -f 6) != 3000 ]]; then echo "Last round should be 3000 but wasn't."; exit 1; fi - -#Dumping participation key info from /tmp/tmpwtomya9x/net/Node... -# -#Participation ID: 4UPT6AQCFZU5ZDN3WKVPCFYOH2SFJ7SPHK7XPWI2CIDYKK7K3WMQ -#Parent address: LFMTCXCY6WGSFSGLSNTFH532KVERJVNRD7W5H7GIQ4MPGM7SSVYMQYRHJQ -#Last vote round: 3 -#Last block proposal round: 4 -#Effective first round: 0 -#Effective last round: 3000000 -#First round: 0 -#Last round: 3000000 -#Key dilution: 10000 -#Selection key: esIsBJB86P+sLeqO3gVoLBGfpuwYlWN4lNzz2AYslTo= -#Voting key: W1OcXLZsaATyOd5FbhRgXHmcywvn++xEVUAQ0NejmW4= -OUTPUT=$(goal account partkeyinfo) -if ! echo "$OUTPUT" | grep -q 'First round:[[:space:]]* 0'; then echo "First round should have been 0."; exit 1; fi -if ! echo "$OUTPUT" | grep -q 'Last round:[[:space:]]* 3000'; then echo "Last round should have been 3000."; exit 1; fi -if ! echo "$OUTPUT" | grep -q 'Effective last round:[[:space:]]* 3000'; then echo "Effective last round should have been 3000."; exit 1; fi -# 100 or 10000 due to arm64 bug -if ! echo "$OUTPUT" | grep -q 'Key dilution:[[:space:]]* 100\(00\)\?'; then echo "Key dilution should have been 10000."; exit 1; fi -if ! echo "$OUTPUT" | grep -q 'Participation ID:[[:space:]]*[[:alnum:]]\{52\}'; then echo "There should be a participation ID."; exit 1; fi - -# Test multiple data directory supported -OUTPUT=$(goal account partkeyinfo -d "$ALGORAND_DATA" -d "$ALGORAND_DATA2") -OUTPUT=$(echo "$OUTPUT"|grep -c 'Participation ID') -if [[ "$OUTPUT" != "2" ]]; then echo "Two Participation IDs should have been found."; exit 1; fi - -# get stderr from this one -OUTPUT=$(goal account listpartkeys -d "$ALGORAND_DATA" -d "$ALGORAND_DATA2" 2>&1) -EXPECTED_ERR="Only one data directory can be specified for this command." -if [[ "$OUTPUT" != "$EXPECTED_ERR" ]]; then echo -e "Unexpected output from multiple data directories with 'listpartkeys': \n$OUTPUT"; exit 1; fi diff --git a/test/testdata/deployednettemplates/recipes/alphanet/Makefile b/test/testdata/deployednettemplates/recipes/alphanet/Makefile new file mode 100644 index 0000000000..13130934de --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/alphanet/Makefile @@ -0,0 +1,15 @@ +PARAMS=-w 8 -R 1 -N 4 -n 8 -H 2 --node-template node.json --relay-template relay.json --non-participating-node-template nonPartNode.json + +all: topology.json net.json genesis.json + +topology.json: gen_topology.py + python gen_topology.py + +net.json: node.json relay.json nonPartNode.json ${GOPATH}/bin/netgoal Makefile + netgoal generate -t net -r /tmp/wat -o net.json ${PARAMS} + +genesis.json: node.json relay.json nonPartNode.json ${GOPATH}/bin/netgoal Makefile + netgoal generate -t genesis -r /tmp/wat -o genesis.json ${PARAMS} + +clean: + rm -f net.json genesis.json topology.json diff --git a/test/testdata/deployednettemplates/recipes/alphanet/gen_topology.py b/test/testdata/deployednettemplates/recipes/alphanet/gen_topology.py new file mode 100644 index 0000000000..7298256d8a --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/alphanet/gen_topology.py @@ -0,0 +1,32 @@ +import json +import os + +node_types = {"R":1, "N":4, "NPN":2} +node_size = {"R":"-m5d.4xl", "N":"-m5d.4xl", "NPN":"-m5d.4xl"} +regions = [ + "AWS-US-EAST-2", + "AWS-US-WEST-2", + "AWS-EU-CENTRAL-1", + "AWS-EU-WEST-2", + "AWS-AP-SOUTHEAST-1", + "AWS-AP-SOUTHEAST-2" +] + +network = "alphanet" + +host_elements = [] +region_count = len(regions) +for node_type in node_types.keys(): + node_count = node_types[node_type] + region_size = node_size[node_type] + for i in range(node_count): + host = {} + node_name = node_type + str(i + 1) + "-" + network + region = regions[i % region_count] + host["Name"] = node_name + host["Template"] = region + region_size + host_elements.append(host) + +ec2_hosts = {"Hosts": host_elements} +with open("topology.json", "w") as f: + f.write(json.dumps(ec2_hosts, indent = 2) + os.linesep) diff --git a/test/testdata/deployednettemplates/recipes/alphanet/genesis.json b/test/testdata/deployednettemplates/recipes/alphanet/genesis.json new file mode 100644 index 0000000000..1d78dd7821 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/alphanet/genesis.json @@ -0,0 +1,64 @@ +{ + "NetworkName": "alphanet", + "VersionModifier": "", + "ConsensusProtocol": "alpha1", + "FirstPartKeyRound": 0, + "LastPartKeyRound": 3000000, + "PartKeyDilution": 0, + "Wallets": [ + { + "Name": "Wallet1", + "Stake": 6.25, + "Online": true + }, + { + "Name": "Wallet2", + "Stake": 6.25, + "Online": true + }, + { + "Name": "Wallet3", + "Stake": 6.25, + "Online": true + }, + { + "Name": "Wallet4", + "Stake": 6.25, + "Online": true + }, + { + "Name": "Wallet5", + "Stake": 6.25, + "Online": true + }, + { + "Name": "Wallet6", + "Stake": 6.25, + "Online": true + }, + { + "Name": "Wallet7", + "Stake": 6.25, + "Online": true + }, + { + "Name": "Wallet8", + "Stake": 6.25, + "Online": true + }, + { + "Name": "Wallet9", + "Stake": 25, + "Online": false + }, + { + "Name": "Wallet10", + "Stake": 25, + "Online": false + } + ], + "FeeSink": "OOZZ32IHB6SS6ZTARKJ2PQP3QKE7R3IWQTOPXRGLTAGPVCDS3FHJOEOYVM", + "RewardsPool": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAY5HFKQ", + "DevMode": false, + "Comment": "" +} diff --git a/test/testdata/deployednettemplates/recipes/alphanet/net.json b/test/testdata/deployednettemplates/recipes/alphanet/net.json new file mode 100644 index 0000000000..e75a91d293 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/alphanet/net.json @@ -0,0 +1,232 @@ +{ + "Hosts": [ + { + "Name": "R1-alphanet", + "Group": "", + "Nodes": [ + { + "Name": "relay1", + "Wallets": null, + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}" + } + ] + }, + { + "Name": "N1-alphanet", + "Group": "", + "Nodes": [ + { + "Name": "node1", + "Wallets": [ + { + "Name": "Wallet1", + "ParticipationOnly": false + } + ], + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}" + }, + { + "Name": "node5", + "Wallets": [ + { + "Name": "Wallet2", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}" + } + ] + }, + { + "Name": "N2-alphanet", + "Group": "", + "Nodes": [ + { + "Name": "node2", + "Wallets": [ + { + "Name": "Wallet3", + "ParticipationOnly": false + } + ], + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}" + }, + { + "Name": "node6", + "Wallets": [ + { + "Name": "Wallet4", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}" + } + ] + }, + { + "Name": "N3-alphanet", + "Group": "", + "Nodes": [ + { + "Name": "node3", + "Wallets": [ + { + "Name": "Wallet5", + "ParticipationOnly": false + } + ], + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}" + }, + { + "Name": "node7", + "Wallets": [ + { + "Name": "Wallet6", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}" + } + ] + }, + { + "Name": "N4-alphanet", + "Group": "", + "Nodes": [ + { + "Name": "node4", + "Wallets": [ + { + "Name": "Wallet7", + "ParticipationOnly": false + } + ], + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}" + }, + { + "Name": "node8", + "Wallets": [ + { + "Name": "Wallet8", + "ParticipationOnly": false + } + ], + "APIToken": "{{APIToken}}", + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "EnableService": false, + "EnableBlockStats": true, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}" + } + ] + }, + { + "Name": "NPN1-alphanet", + "Group": "", + "Nodes": [ + { + "Name": "nonParticipatingNode1", + "Wallets": [ + { + "Name": "Wallet9", + "ParticipationOnly": false + } + ], + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "EnableMetrics": false, + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }" + } + ] + }, + { + "Name": "NPN2-alphanet", + "Group": "", + "Nodes": [ + { + "Name": "nonParticipatingNode2", + "Wallets": [ + { + "Name": "Wallet10", + "ParticipationOnly": false + } + ], + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableTelemetry": false, + "EnableMetrics": false, + "EnableService": false, + "EnableBlockStats": false, + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }" + } + ] + } + ] +} diff --git a/test/testdata/deployednettemplates/recipes/alphanet/node.json b/test/testdata/deployednettemplates/recipes/alphanet/node.json new file mode 100644 index 0000000000..d3b429ee32 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/alphanet/node.json @@ -0,0 +1,10 @@ +{ + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableBlockStats": true, + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"CadaverSizeTarget\": 0, \"EnableAccountUpdatesStats\": true, \"EnableRuntimeMetrics\": true}" +} diff --git a/test/testdata/deployednettemplates/recipes/alphanet/nonPartNode.json b/test/testdata/deployednettemplates/recipes/alphanet/nonPartNode.json new file mode 100644 index 0000000000..5b0a52d9d9 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/alphanet/nonPartNode.json @@ -0,0 +1,5 @@ +{ + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"BaseLoggerDebugLevel\": 4, \"CadaverSizeTarget\": 0 }" +} diff --git a/test/testdata/deployednettemplates/recipes/alphanet/recipe.json b/test/testdata/deployednettemplates/recipes/alphanet/recipe.json new file mode 100644 index 0000000000..a2f88f63b4 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/alphanet/recipe.json @@ -0,0 +1,7 @@ +{ + "GenesisFile":"genesis.json", + "NetworkFile":"net.json", + "ConfigFile": "../../configs/reference.json", + "HostTemplatesFile": "../../hosttemplates/hosttemplates.json", + "TopologyFile": "topology.json" +} diff --git a/test/testdata/deployednettemplates/recipes/alphanet/relay.json b/test/testdata/deployednettemplates/recipes/alphanet/relay.json new file mode 100644 index 0000000000..db8fb939d8 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/alphanet/relay.json @@ -0,0 +1,11 @@ +{ + "NetAddress": "{{NetworkPort}}", + "APIEndpoint": "{{APIEndpoint}}", + "APIToken": "{{APIToken}}", + "EnableBlockStats": true, + "EnableTelemetry": true, + "TelemetryURI": "{{TelemetryURI}}", + "EnableMetrics": true, + "MetricsURI": "{{MetricsURI}}", + "ConfigJSONOverride": "{ \"TxPoolExponentialIncreaseFactor\": 1, \"DNSBootstrapID\": \".algodev.network\", \"DeadlockDetection\": -1, \"EnableIncomingMessageFilter\": true, \"CadaverSizeTarget\": 0, \"PeerPingPeriodSeconds\": 30, \"EnableAgreementReporting\": true, \"EnableAgreementTimeMetrics\": true, \"EnableAssembleStats\": true, \"EnableProcessBlockStats\": true, \"BaseLoggerDebugLevel\": 4, \"EnableProfiler\": true, \"EnableAccountUpdatesStats\": true}" +} diff --git a/test/testdata/deployednettemplates/recipes/alphanet/topology.json b/test/testdata/deployednettemplates/recipes/alphanet/topology.json new file mode 100644 index 0000000000..8760eae203 --- /dev/null +++ b/test/testdata/deployednettemplates/recipes/alphanet/topology.json @@ -0,0 +1,32 @@ +{ + "Hosts": [ + { + "Name": "R1-alphanet", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "N1-alphanet", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "N2-alphanet", + "Template": "AWS-US-WEST-2-m5d.4xl" + }, + { + "Name": "N3-alphanet", + "Template": "AWS-EU-CENTRAL-1-m5d.4xl" + }, + { + "Name": "N4-alphanet", + "Template": "AWS-EU-WEST-2-m5d.4xl" + }, + { + "Name": "NPN1-alphanet", + "Template": "AWS-US-EAST-2-m5d.4xl" + }, + { + "Name": "NPN2-alphanet", + "Template": "AWS-US-WEST-2-m5d.4xl" + } + ] +}